+15
CMakeLists.txt
+15
CMakeLists.txt
···
286
286
set_option(SDL_LIBICONV "Prefer iconv() from libiconv, if available, over libc version" OFF)
287
287
set_option(SDL_GCC_ATOMICS "Use gcc builtin atomics" ${SDL_GCC_ATOMICS_DEFAULT})
288
288
dep_option(SDL_DBUS "Enable D-Bus support" ON "${UNIX_SYS}" OFF)
289
+
dep_option(SDL_LIBURING "Enable liburing support" ON "${UNIX_SYS}" OFF)
289
290
dep_option(SDL_DISKAUDIO "Support the disk writer audio driver" ON "SDL_AUDIO" OFF)
290
291
dep_option(SDL_DUMMYAUDIO "Support the dummy audio driver" ON "SDL_AUDIO" OFF)
291
292
dep_option(SDL_DUMMYVIDEO "Use dummy video driver" ON "SDL_VIDEO" OFF)
···
1657
1658
set(SDL_USE_IME 1)
1658
1659
endif()
1659
1660
1661
+
if(SDL_LIBURING)
1662
+
pkg_search_module(LIBURING liburing-ffi)
1663
+
find_path(HAVE_LIBURING_H NAMES liburing.h)
1664
+
if(LIBURING_FOUND AND HAVE_LIBURING_H)
1665
+
set(HAVE_LIBURING_LIBURING_H TRUE)
1666
+
sdl_include_directories(PRIVATE SYSTEM ${LIBURING_INCLUDE_DIRS})
1667
+
set(HAVE_LIBURING TRUE)
1668
+
endif()
1669
+
endif()
1670
+
1660
1671
if((FREEBSD OR NETBSD) AND NOT HAVE_INOTIFY)
1661
1672
set(LibInotify_PKG_CONFIG_SPEC libinotify)
1662
1673
pkg_check_modules(PC_LIBINOTIFY IMPORTED_TARGET ${LibInotify_PKG_CONFIG_SPEC})
···
1718
1729
sdl_include_directories(PRIVATE SYSTEM ${DXVK_NATIVE_INCLUDE_DIRS})
1719
1730
endif()
1720
1731
endif()
1732
+
endif()
1733
+
1734
+
if(HAVE_LIBURING_H)
1735
+
sdl_sources("${SDL3_SOURCE_DIR}/src/file/io_uring/SDL_asyncio_liburing.c")
1721
1736
endif()
1722
1737
1723
1738
# Always compiled for Linux, unconditionally:
+3
-1
docs/README-linux.md
+3
-1
docs/README-linux.md
···
21
21
libxkbcommon-dev libdrm-dev libgbm-dev libgl1-mesa-dev libgles2-mesa-dev \
22
22
libegl1-mesa-dev libdbus-1-dev libibus-1.0-dev libudev-dev fcitx-libs-dev
23
23
24
-
Ubuntu 22.04+ can also add `libpipewire-0.3-dev libwayland-dev libdecor-0-dev` to that command line.
24
+
Ubuntu 22.04+ can also add `libpipewire-0.3-dev libwayland-dev libdecor-0-dev liburing-dev` to that command line.
25
25
26
26
Fedora 35, all available features enabled:
27
27
···
33
33
mesa-libEGL-devel vulkan-devel wayland-devel wayland-protocols-devel \
34
34
libdrm-devel mesa-libgbm-devel libusb-devel libdecor-devel \
35
35
pipewire-jack-audio-connection-kit-devel \
36
+
37
+
Fedora 39+ can also add `liburing-devel` to that command line.
36
38
37
39
NOTES:
38
40
- The sndio audio target is unavailable on Fedora (but probably not what you
+1
include/build_config/SDL_build_config.h.cmake
+1
include/build_config/SDL_build_config.h.cmake
+4
src/file/SDL_sysasyncio.h
+4
src/file/SDL_sysasyncio.h
···
28
28
// is #defined to 0 instead and implement the SDL_SYS_* functions below in your
29
29
// backend (having them maybe call into the SDL_SYS_*_Generic versions as a
30
30
// fallback if the platform has functionality that isn't always available).
31
+
#ifdef HAVE_LIBURING_H
32
+
#define SDL_ASYNCIO_ONLY_HAVE_GENERIC 0
33
+
#else
31
34
#define SDL_ASYNCIO_ONLY_HAVE_GENERIC 1
35
+
#endif
32
36
33
37
// this entire thing is just juggling doubly-linked lists, so make some helper macros.
34
38
#define LINKED_LIST_DECLARE_FIELDS(type, prefix) \
+517
src/file/io_uring/SDL_asyncio_liburing.c
+517
src/file/io_uring/SDL_asyncio_liburing.c
···
1
+
/*
2
+
Simple DirectMedia Layer
3
+
Copyright (C) 1997-2024 Sam Lantinga <slouken@libsdl.org>
4
+
5
+
This software is provided 'as-is', without any express or implied
6
+
warranty. In no event will the authors be held liable for any damages
7
+
arising from the use of this software.
8
+
9
+
Permission is granted to anyone to use this software for any purpose,
10
+
including commercial applications, and to alter it and redistribute it
11
+
freely, subject to the following restrictions:
12
+
13
+
1. The origin of this software must not be misrepresented; you must not
14
+
claim that you wrote the original software. If you use this software
15
+
in a product, an acknowledgment in the product documentation would be
16
+
appreciated but is not required.
17
+
2. Altered source versions must be plainly marked as such, and must not be
18
+
misrepresented as being the original software.
19
+
3. This notice may not be removed or altered from any source distribution.
20
+
*/
21
+
22
+
// The Linux backend uses io_uring for asynchronous i/o, and falls back to
23
+
// the "generic" threadpool implementation if liburing isn't available or
24
+
// fails for some other reason.
25
+
26
+
#include "SDL_internal.h"
27
+
28
+
#ifdef HAVE_LIBURING_H
29
+
30
+
#include "../SDL_sysasyncio.h"
31
+
32
+
#include <liburing.h>
33
+
#include <errno.h>
34
+
#include <string.h> // for strerror()
35
+
36
+
static SDL_InitState liburing_init;
37
+
38
+
// We could add a whole bootstrap thing like the audio/video/etc subsystems use, but let's keep this simple for now.
39
+
static bool (*CreateAsyncIOQueue)(SDL_AsyncIOQueue *queue);
40
+
static void (*QuitAsyncIO)(void);
41
+
static bool (*AsyncIOFromFile)(const char *file, const char *mode, SDL_AsyncIO *asyncio);
42
+
43
+
// we never link directly to liburing.
44
+
// (this says "-ffi" which sounds like a scripting language binding thing, but the non-ffi version
45
+
// is static-inline code we can't lookup with dlsym. This is by design.)
46
+
static const char *liburing_library = "liburing-ffi.so.2";
47
+
static void *liburing_handle = NULL;
48
+
49
+
#define SDL_LIBURING_FUNCS \
50
+
SDL_LIBURING_FUNC(int, io_uring_queue_init, (unsigned entries, struct io_uring *ring, unsigned flags)) \
51
+
SDL_LIBURING_FUNC(struct io_uring_probe *,io_uring_get_probe,(void)) \
52
+
SDL_LIBURING_FUNC(void, io_uring_free_probe, (struct io_uring_probe *probe)) \
53
+
SDL_LIBURING_FUNC(int, io_uring_opcode_supported, (const struct io_uring_probe *p, int op)) \
54
+
SDL_LIBURING_FUNC(struct io_uring_sqe *, io_uring_get_sqe, (struct io_uring *ring)) \
55
+
SDL_LIBURING_FUNC(void, io_uring_prep_read,(struct io_uring_sqe *sqe, int fd, void *buf, unsigned nbytes, __u64 offset)) \
56
+
SDL_LIBURING_FUNC(void, io_uring_prep_write,(struct io_uring_sqe *sqe, int fd, const void *buf, unsigned nbytes, __u64 offset)) \
57
+
SDL_LIBURING_FUNC(void, io_uring_prep_close, (struct io_uring_sqe *sqe, int fd)) \
58
+
SDL_LIBURING_FUNC(void, io_uring_prep_fsync, (struct io_uring_sqe *sqe, int fd, unsigned fsync_flags)) \
59
+
SDL_LIBURING_FUNC(void, io_uring_prep_cancel, (struct io_uring_sqe *sqe, void *user_data, int flags)) \
60
+
SDL_LIBURING_FUNC(void, io_uring_prep_timeout, (struct io_uring_sqe *sqe, struct __kernel_timespec *ts, unsigned count, unsigned flags)) \
61
+
SDL_LIBURING_FUNC(void, io_uring_sqe_set_data, (struct io_uring_sqe *sqe, void *data)) \
62
+
SDL_LIBURING_FUNC(void, io_uring_sqe_set_flags, (struct io_uring_sqe *sqe, unsigned flags)) \
63
+
SDL_LIBURING_FUNC(int, io_uring_submit, (struct io_uring *ring)) \
64
+
SDL_LIBURING_FUNC(int, io_uring_peek_cqe, (struct io_uring *ring, struct io_uring_cqe **cqe_ptr)) \
65
+
SDL_LIBURING_FUNC(int, io_uring_wait_cqe, (struct io_uring *ring, struct io_uring_cqe **cqe_ptr)) \
66
+
SDL_LIBURING_FUNC(int, io_uring_wait_cqe_timeout, (struct io_uring *ring, struct io_uring_cqe **cqe_ptr, struct __kernel_timespec *ts)) \
67
+
SDL_LIBURING_FUNC(void, io_uring_cqe_seen, (struct io_uring *ring, struct io_uring_cqe *cqe)) \
68
+
SDL_LIBURING_FUNC(void, io_uring_queue_exit, (struct io_uring *ring)) \
69
+
70
+
71
+
#define SDL_LIBURING_FUNC(ret, fn, args) typedef ret (*SDL_fntype_##fn) args;
72
+
SDL_LIBURING_FUNCS
73
+
#undef SDL_LIBURING_FUNC
74
+
75
+
typedef struct SDL_LibUringFunctions
76
+
{
77
+
#define SDL_LIBURING_FUNC(ret, fn, args) SDL_fntype_##fn fn;
78
+
SDL_LIBURING_FUNCS
79
+
#undef SDL_LIBURING_FUNC
80
+
} SDL_LibUringFunctions;
81
+
82
+
static SDL_LibUringFunctions liburing;
83
+
84
+
85
+
typedef struct LibUringAsyncIOQueueData
86
+
{
87
+
SDL_Mutex *sqe_lock;
88
+
SDL_Mutex *cqe_lock;
89
+
struct io_uring ring;
90
+
SDL_AtomicInt num_waiting;
91
+
} LibUringAsyncIOQueueData;
92
+
93
+
94
+
static void UnloadLibUringLibrary(void)
95
+
{
96
+
if (liburing_library) {
97
+
SDL_UnloadObject(liburing_handle);
98
+
liburing_library = NULL;
99
+
}
100
+
SDL_zero(liburing);
101
+
}
102
+
103
+
static bool LoadLibUringSyms(void)
104
+
{
105
+
#define SDL_LIBURING_FUNC(ret, fn, args) { \
106
+
liburing.fn = (SDL_fntype_##fn) SDL_LoadFunction(liburing_handle, #fn); \
107
+
if (!liburing.fn) { \
108
+
return false; \
109
+
} \
110
+
}
111
+
SDL_LIBURING_FUNCS
112
+
#undef SDL_LIBURING_FUNC
113
+
return true;
114
+
}
115
+
116
+
// we rely on the presence of liburing to handle io_uring for us. The alternative is making
117
+
// direct syscalls into the kernel, which is undesirable. liburing both shields us from this,
118
+
// but also smooths over some kernel version differences, etc.
119
+
static bool LoadLibUring(void)
120
+
{
121
+
bool result = true;
122
+
123
+
if (!liburing_handle) {
124
+
liburing_handle = SDL_LoadObject(liburing_library);
125
+
if (!liburing_handle) {
126
+
result = false;
127
+
// Don't call SDL_SetError(): SDL_LoadObject already did.
128
+
} else {
129
+
result = LoadLibUringSyms();
130
+
if (result) {
131
+
static const int needed_ops[] = {
132
+
IORING_OP_FSYNC,
133
+
IORING_OP_TIMEOUT,
134
+
IORING_OP_CLOSE,
135
+
IORING_OP_READ,
136
+
IORING_OP_WRITE,
137
+
IORING_OP_ASYNC_CANCEL
138
+
};
139
+
140
+
struct io_uring_probe *probe = liburing.io_uring_get_probe();
141
+
if (!probe) {
142
+
result = false;
143
+
} else {
144
+
for (int i = 0; i < SDL_arraysize(needed_ops); i++) {
145
+
if (!io_uring_opcode_supported(probe, needed_ops[i])) {
146
+
result = false;
147
+
break;
148
+
}
149
+
}
150
+
liburing.io_uring_free_probe(probe);
151
+
}
152
+
}
153
+
154
+
if (!result) {
155
+
UnloadLibUringLibrary();
156
+
}
157
+
}
158
+
}
159
+
return result;
160
+
}
161
+
162
+
static bool liburing_SetError(const char *what, int err)
163
+
{
164
+
SDL_assert(err <= 0);
165
+
return SDL_SetError("%s failed: %s", what, strerror(-err));
166
+
}
167
+
168
+
static Sint64 liburing_asyncio_size(void *userdata)
169
+
{
170
+
const int fd = (int) (size_t) userdata;
171
+
struct stat statbuf;
172
+
if (fstat(fd, &statbuf) < 0) {
173
+
SDL_SetError("fstat failed: %s", strerror(errno));
174
+
return -1;
175
+
}
176
+
return ((Sint64) statbuf.st_size);
177
+
}
178
+
179
+
// you must hold sqe_lock when calling this!
180
+
static bool liburing_asyncioqueue_queue_task(void *userdata, SDL_AsyncIOTask *task)
181
+
{
182
+
LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) userdata;
183
+
const int rc = liburing.io_uring_submit(&queuedata->ring);
184
+
return (rc < 0) ? liburing_SetError("io_uring_submit", rc) : true;
185
+
}
186
+
187
+
static void liburing_asyncioqueue_cancel_task(void *userdata, SDL_AsyncIOTask *task)
188
+
{
189
+
SDL_AsyncIOTask *cancel_task = (SDL_AsyncIOTask *) SDL_calloc(1, sizeof (*cancel_task));
190
+
if (!cancel_task) {
191
+
return; // oh well, the task can just finish on its own.
192
+
}
193
+
194
+
LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) userdata;
195
+
struct io_uring_sqe *sqe = liburing.io_uring_get_sqe(&queuedata->ring);
196
+
if (!sqe) {
197
+
SDL_free(cancel_task);
198
+
return; // oh well, the task can just finish on its own.
199
+
}
200
+
201
+
cancel_task->app_userdata = task;
202
+
liburing.io_uring_prep_cancel(sqe, task, 0);
203
+
liburing.io_uring_sqe_set_data(sqe, cancel_task);
204
+
liburing_asyncioqueue_queue_task(userdata, task);
205
+
}
206
+
207
+
static SDL_AsyncIOTask *ProcessCQE(LibUringAsyncIOQueueData *queuedata, struct io_uring_cqe *cqe)
208
+
{
209
+
if (!cqe) {
210
+
return NULL;
211
+
}
212
+
213
+
SDL_AsyncIOTask *task = (SDL_AsyncIOTask *) io_uring_cqe_get_data(cqe);
214
+
if (task) { // can be NULL if this was just a wakeup message, etc.
215
+
if (!task->queue) { // We leave `queue` blank to signify this was a task cancellation.
216
+
SDL_AsyncIOTask *cancel_task = task;
217
+
task = (SDL_AsyncIOTask *) cancel_task->app_userdata;
218
+
SDL_free(cancel_task);
219
+
if (cqe->res >= 0) { // cancel was successful?
220
+
task->result = SDL_ASYNCIO_CANCELLED;
221
+
} else {
222
+
task = NULL; // it already finished or was too far along to cancel, so we'll pick up the actual results later.
223
+
}
224
+
} else if (cqe->res < 0) {
225
+
task->result = SDL_ASYNCIO_FAILURE;
226
+
// !!! FIXME: fill in task->error.
227
+
} else {
228
+
if ((task->type == SDL_ASYNCIO_TASK_WRITE) && (((Uint64) cqe->res) < task->requested_size)) {
229
+
task->result = SDL_ASYNCIO_FAILURE; // it's always a failure on short writes.
230
+
} else {
231
+
task->result = SDL_ASYNCIO_COMPLETE;
232
+
}
233
+
if ((task->type == SDL_ASYNCIO_TASK_READ) || (task->type == SDL_ASYNCIO_TASK_WRITE)) {
234
+
task->result_size = (Uint64) cqe->res;
235
+
}
236
+
}
237
+
}
238
+
239
+
return task;
240
+
}
241
+
242
+
static SDL_AsyncIOTask *liburing_asyncioqueue_get_results(void *userdata)
243
+
{
244
+
LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) userdata;
245
+
246
+
// have to hold a lock because otherwise two threads will get the same cqe until we mark it "seen". Copy and mark it right away, then process further.
247
+
SDL_LockMutex(queuedata->cqe_lock);
248
+
struct io_uring_cqe *cqe = NULL;
249
+
const int rc = liburing.io_uring_peek_cqe(&queuedata->ring, &cqe);
250
+
if (rc != 0) {
251
+
SDL_assert(rc == -EAGAIN); // should only fail because nothing is available at the moment.
252
+
SDL_UnlockMutex(queuedata->cqe_lock);
253
+
return NULL;
254
+
}
255
+
struct io_uring_cqe cqe_copy;
256
+
SDL_copyp(&cqe_copy, cqe); // this is only a few bytes.
257
+
liburing.io_uring_cqe_seen(&queuedata->ring, cqe); // let io_uring use this slot again.
258
+
SDL_UnlockMutex(queuedata->cqe_lock);
259
+
260
+
return ProcessCQE(queuedata, &cqe_copy);
261
+
}
262
+
263
+
static SDL_AsyncIOTask *liburing_asyncioqueue_wait_results(void *userdata, Sint32 timeoutMS)
264
+
{
265
+
LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) userdata;
266
+
struct io_uring_cqe *cqe = NULL;
267
+
268
+
SDL_AddAtomicInt(&queuedata->num_waiting, 1);
269
+
if (timeoutMS < 0) {
270
+
liburing.io_uring_wait_cqe(&queuedata->ring, &cqe);
271
+
} else {
272
+
struct __kernel_timespec ts = { (__kernel_time64_t) timeoutMS / SDL_MS_PER_SECOND, (long long) SDL_MS_TO_NS(timeoutMS % SDL_MS_PER_SECOND) };
273
+
liburing.io_uring_wait_cqe_timeout(&queuedata->ring, &cqe, &ts);
274
+
}
275
+
SDL_AddAtomicInt(&queuedata->num_waiting, -1);
276
+
277
+
// (we don't care if the wait failed for any reason, as the upcoming peek_cqe will report valid information. We just wanted the wait operation to block.)
278
+
279
+
// each thing that peeks or waits for a completion _gets the same cqe_ until we mark it as seen. So when we wake up from the wait, lock the mutex and
280
+
// then use peek to make sure we have a unique cqe, and other competing threads either get their own or nothing.
281
+
return liburing_asyncioqueue_get_results(userdata); // this just happens to do all those things.
282
+
}
283
+
284
+
static void liburing_asyncioqueue_signal(void *userdata)
285
+
{
286
+
LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) userdata;
287
+
const int num_waiting = SDL_GetAtomicInt(&queuedata->num_waiting);
288
+
289
+
SDL_LockMutex(queuedata->sqe_lock);
290
+
for (int i = 0; i < num_waiting; i++) { // !!! FIXME: is there a better way to do this than pushing a zero-timeout request for everything waiting?
291
+
struct io_uring_sqe *sqe = liburing.io_uring_get_sqe(&queuedata->ring);
292
+
if (sqe) {
293
+
static struct __kernel_timespec ts; // no wait, just wake a thread as fast as this can land in the completion queue.
294
+
liburing.io_uring_prep_timeout(sqe, &ts, 0, 0);
295
+
liburing.io_uring_sqe_set_data(sqe, NULL);
296
+
}
297
+
}
298
+
liburing.io_uring_submit(&queuedata->ring);
299
+
300
+
SDL_UnlockMutex(queuedata->sqe_lock);
301
+
}
302
+
303
+
static void liburing_asyncioqueue_destroy(void *userdata)
304
+
{
305
+
LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) userdata;
306
+
liburing.io_uring_queue_exit(&queuedata->ring);
307
+
SDL_DestroyMutex(queuedata->sqe_lock);
308
+
SDL_DestroyMutex(queuedata->cqe_lock);
309
+
SDL_free(queuedata);
310
+
}
311
+
312
+
static bool SDL_SYS_CreateAsyncIOQueue_liburing(SDL_AsyncIOQueue *queue)
313
+
{
314
+
LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) SDL_calloc(1, sizeof (*queuedata));
315
+
if (!queuedata) {
316
+
return false;
317
+
}
318
+
319
+
SDL_SetAtomicInt(&queuedata->num_waiting, 0);
320
+
321
+
queuedata->sqe_lock = SDL_CreateMutex();
322
+
if (!queuedata->sqe_lock) {
323
+
SDL_free(queuedata);
324
+
return false;
325
+
}
326
+
327
+
queuedata->cqe_lock = SDL_CreateMutex();
328
+
if (!queuedata->cqe_lock) {
329
+
SDL_DestroyMutex(queuedata->sqe_lock);
330
+
SDL_free(queuedata);
331
+
return false;
332
+
}
333
+
334
+
// !!! FIXME: no idea how large the queue should be. Is 128 overkill or too small?
335
+
const int rc = liburing.io_uring_queue_init(128, &queuedata->ring, 0);
336
+
if (rc != 0) {
337
+
SDL_DestroyMutex(queuedata->sqe_lock);
338
+
SDL_DestroyMutex(queuedata->cqe_lock);
339
+
SDL_free(queuedata);
340
+
return liburing_SetError("io_uring_queue_init", rc);
341
+
}
342
+
343
+
static const SDL_AsyncIOQueueInterface SDL_AsyncIOQueue_liburing = {
344
+
liburing_asyncioqueue_queue_task,
345
+
liburing_asyncioqueue_cancel_task,
346
+
liburing_asyncioqueue_get_results,
347
+
liburing_asyncioqueue_wait_results,
348
+
liburing_asyncioqueue_signal,
349
+
liburing_asyncioqueue_destroy
350
+
};
351
+
352
+
SDL_copyp(&queue->iface, &SDL_AsyncIOQueue_liburing);
353
+
queue->userdata = queuedata;
354
+
return true;
355
+
}
356
+
357
+
358
+
static bool liburing_asyncio_read(void *userdata, SDL_AsyncIOTask *task)
359
+
{
360
+
LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) task->queue->userdata;
361
+
362
+
// !!! FIXME: `unsigned` is likely smaller than requested_size's Uint64. If we overflow it, we could try submitting multiple SQEs
363
+
// !!! FIXME: and make a note in the task that there are several in sequence.
364
+
if (task->requested_size > ((Uint64) ~((unsigned) 0))) {
365
+
return SDL_SetError("io_uring: i/o task is too large");
366
+
}
367
+
368
+
// have to hold a lock because otherwise two threads could get_sqe and submit while one request isn't fully set up.
369
+
SDL_LockMutex(queuedata->sqe_lock);
370
+
struct io_uring_sqe *sqe = liburing.io_uring_get_sqe(&queuedata->ring);
371
+
if (!sqe) {
372
+
return SDL_SetError("io_uring: submission queue is full");
373
+
}
374
+
375
+
liburing.io_uring_prep_read(sqe, (int) (size_t) userdata, task->buffer, (unsigned) task->requested_size, task->offset);
376
+
liburing.io_uring_sqe_set_data(sqe, task);
377
+
378
+
const bool retval = task->queue->iface.queue_task(task->queue->userdata, task);
379
+
SDL_UnlockMutex(queuedata->sqe_lock);
380
+
return retval;
381
+
}
382
+
383
+
static bool liburing_asyncio_write(void *userdata, SDL_AsyncIOTask *task)
384
+
{
385
+
LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) task->queue->userdata;
386
+
387
+
// !!! FIXME: `unsigned` is likely smaller than requested_size's Uint64. If we overflow it, we could try submitting multiple SQEs
388
+
// !!! FIXME: and make a note in the task that there are several in sequence.
389
+
if (task->requested_size > ((Uint64) ~((unsigned) 0))) {
390
+
return SDL_SetError("io_uring: i/o task is too large");
391
+
}
392
+
393
+
// have to hold a lock because otherwise two threads could get_sqe and submit while one request isn't fully set up.
394
+
SDL_LockMutex(queuedata->sqe_lock);
395
+
struct io_uring_sqe *sqe = liburing.io_uring_get_sqe(&queuedata->ring);
396
+
if (!sqe) {
397
+
return SDL_SetError("io_uring: submission queue is full");
398
+
}
399
+
400
+
liburing.io_uring_prep_write(sqe, (int) (size_t) userdata, task->buffer, (unsigned) task->requested_size, task->offset);
401
+
liburing.io_uring_sqe_set_data(sqe, task);
402
+
403
+
const bool retval = task->queue->iface.queue_task(task->queue->userdata, task);
404
+
SDL_UnlockMutex(queuedata->sqe_lock);
405
+
return retval;
406
+
}
407
+
408
+
static bool liburing_asyncio_close(void *userdata, SDL_AsyncIOTask *task)
409
+
{
410
+
LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) task->queue->userdata;
411
+
412
+
// have to hold a lock because otherwise two threads could get_sqe and submit while one request isn't fully set up.
413
+
SDL_LockMutex(queuedata->sqe_lock);
414
+
struct io_uring_sqe *sqe = liburing.io_uring_get_sqe(&queuedata->ring);
415
+
if (!sqe) {
416
+
return SDL_SetError("io_uring: submission queue is full");
417
+
}
418
+
419
+
liburing.io_uring_prep_close(sqe, (int) (size_t) userdata);
420
+
liburing.io_uring_sqe_set_data(sqe, task);
421
+
422
+
const bool retval = task->queue->iface.queue_task(task->queue->userdata, task);
423
+
SDL_UnlockMutex(queuedata->sqe_lock);
424
+
return retval;
425
+
}
426
+
427
+
static void liburing_asyncio_destroy(void *userdata)
428
+
{
429
+
// this is only a Unix file descriptor, should have been closed elsewhere.
430
+
}
431
+
432
+
static int PosixOpenModeFromString(const char *mode)
433
+
{
434
+
// this is exactly the set of strings that SDL_AsyncIOFromFile promises will work.
435
+
static const struct { const char *str; int flags; } mappings[] = {
436
+
{ "rb", O_RDONLY },
437
+
{ "wb", O_WRONLY | O_CREAT | O_TRUNC },
438
+
{ "r+b", O_RDWR },
439
+
{ "w+b", O_RDWR | O_CREAT | O_TRUNC }
440
+
};
441
+
442
+
for (int i = 0; i < SDL_arraysize(mappings); i++) {
443
+
if (SDL_strcmp(mappings[i].str, mode) == 0) {
444
+
return mappings[i].flags;
445
+
}
446
+
}
447
+
448
+
SDL_assert(!"Shouldn't have reached this code");
449
+
return 0;
450
+
}
451
+
452
+
static bool SDL_SYS_AsyncIOFromFile_liburing(const char *file, const char *mode, SDL_AsyncIO *asyncio)
453
+
{
454
+
const int fd = open(file, PosixOpenModeFromString(mode), 0644);
455
+
if (fd == -1) {
456
+
return SDL_SetError("open failed: %s", strerror(errno));
457
+
}
458
+
459
+
static const SDL_AsyncIOInterface SDL_AsyncIOFile_liburing = {
460
+
liburing_asyncio_size,
461
+
liburing_asyncio_read,
462
+
liburing_asyncio_write,
463
+
liburing_asyncio_close,
464
+
liburing_asyncio_destroy
465
+
};
466
+
467
+
SDL_copyp(&asyncio->iface, &SDL_AsyncIOFile_liburing);
468
+
asyncio->userdata = (void *) (size_t) fd;
469
+
return true;
470
+
}
471
+
472
+
static void SDL_SYS_QuitAsyncIO_liburing(void)
473
+
{
474
+
UnloadLibUringLibrary();
475
+
}
476
+
477
+
static void MaybeInitializeLibUring(void)
478
+
{
479
+
if (SDL_ShouldInit(&liburing_init)) {
480
+
if (LoadLibUring()) {
481
+
CreateAsyncIOQueue = SDL_SYS_CreateAsyncIOQueue_liburing;
482
+
QuitAsyncIO = SDL_SYS_QuitAsyncIO_liburing;
483
+
AsyncIOFromFile = SDL_SYS_AsyncIOFromFile_liburing;
484
+
} else { // can't use liburing? Use the "generic" threadpool implementation instead.
485
+
CreateAsyncIOQueue = SDL_SYS_CreateAsyncIOQueue_Generic;
486
+
QuitAsyncIO = SDL_SYS_QuitAsyncIO_Generic;
487
+
AsyncIOFromFile = SDL_SYS_AsyncIOFromFile_Generic;
488
+
}
489
+
SDL_SetInitialized(&liburing_init, true);
490
+
}
491
+
}
492
+
493
+
bool SDL_SYS_CreateAsyncIOQueue(SDL_AsyncIOQueue *queue)
494
+
{
495
+
MaybeInitializeLibUring();
496
+
return CreateAsyncIOQueue(queue);
497
+
}
498
+
499
+
bool SDL_SYS_AsyncIOFromFile(const char *file, const char *mode, SDL_AsyncIO *asyncio)
500
+
{
501
+
MaybeInitializeLibUring();
502
+
return AsyncIOFromFile(file, mode, asyncio);
503
+
}
504
+
505
+
void SDL_SYS_QuitAsyncIO(void)
506
+
{
507
+
if (SDL_ShouldQuit(&liburing_init)) {
508
+
QuitAsyncIO();
509
+
CreateAsyncIOQueue = NULL;
510
+
QuitAsyncIO = NULL;
511
+
AsyncIOFromFile = NULL;
512
+
SDL_SetInitialized(&liburing_init, false);
513
+
}
514
+
}
515
+
516
+
#endif // defined HAVE_LIBURING_H
517
+