The open source OpenXR runtime
1// Copyright 2019-2024, Collabora, Ltd.
2// Copyright 2025, NVIDIA CORPORATION.
3// SPDX-License-Identifier: BSL-1.0
4/*!
5 * @file
6 * @brief Multi client wrapper compositor.
7 * @author Pete Black <pblack@collabora.com>
8 * @author Jakob Bornecrantz <jakob@collabora.com>
9 * @author Korcan Hussein <korcan.hussein@collabora.com>
10 * @ingroup comp_multi
11 */
12
13#include "xrt/xrt_config_os.h"
14#include "xrt/xrt_session.h"
15
16#include "os/os_time.h"
17#include "os/os_threading.h"
18
19#include "util/u_var.h"
20#include "util/u_misc.h"
21#include "util/u_time.h"
22#include "util/u_wait.h"
23#include "util/u_debug.h"
24#include "util/u_trace_marker.h"
25#include "util/u_distortion_mesh.h"
26
27#ifdef XRT_OS_LINUX
28#include "util/u_linux.h"
29#endif
30
31#include "multi/comp_multi_private.h"
32#include "multi/comp_multi_interface.h"
33
34#include <math.h>
35#include <stdio.h>
36#include <assert.h>
37#include <stdarg.h>
38#include <stdlib.h>
39#include <string.h>
40
41#ifdef XRT_GRAPHICS_SYNC_HANDLE_IS_FD
42#include <unistd.h>
43#endif
44
45
46/*
47 *
48 * Render thread.
49 *
50 */
51
52static void
53do_projection_layer(struct xrt_compositor *xc, struct multi_compositor *mc, struct multi_layer_entry *layer, uint32_t i)
54{
55 struct xrt_device *xdev = layer->xdev;
56
57 // Cast away
58 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
59
60 // Do not need to copy the reference, but should verify the pointers for consistency
61 for (uint32_t j = 0; j < data->view_count; j++) {
62 if (layer->xscs[j] == NULL) {
63 U_LOG_E("Invalid swap chain for projection layer #%u!", i);
64 return;
65 }
66 }
67
68 if (xdev == NULL) {
69 U_LOG_E("Invalid xdev for projection layer #%u!", i);
70 return;
71 }
72
73 xrt_comp_layer_projection(xc, xdev, layer->xscs, data);
74}
75
76static void
77do_projection_layer_depth(struct xrt_compositor *xc,
78 struct multi_compositor *mc,
79 struct multi_layer_entry *layer,
80 uint32_t i)
81{
82 struct xrt_device *xdev = layer->xdev;
83
84 struct xrt_swapchain *xsc[XRT_MAX_VIEWS];
85 struct xrt_swapchain *d_xsc[XRT_MAX_VIEWS];
86 // Cast away
87 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
88
89 for (uint32_t j = 0; j < data->view_count; j++) {
90 xsc[j] = layer->xscs[j];
91 d_xsc[j] = layer->xscs[j + data->view_count];
92
93 if (xsc[j] == NULL || d_xsc[j] == NULL) {
94 U_LOG_E("Invalid swap chain for projection layer #%u!", i);
95 return;
96 }
97 }
98
99 if (xdev == NULL) {
100 U_LOG_E("Invalid xdev for projection layer #%u!", i);
101 return;
102 }
103
104
105 xrt_comp_layer_projection_depth(xc, xdev, xsc, d_xsc, data);
106}
107
108static bool
109do_single(struct xrt_compositor *xc,
110 struct multi_compositor *mc,
111 struct multi_layer_entry *layer,
112 uint32_t i,
113 const char *name,
114 struct xrt_device **out_xdev,
115 struct xrt_swapchain **out_xcs,
116 struct xrt_layer_data **out_data)
117{
118 struct xrt_device *xdev = layer->xdev;
119 struct xrt_swapchain *xcs = layer->xscs[0];
120
121 if (xcs == NULL) {
122 U_LOG_E("Invalid swapchain for layer #%u '%s'!", i, name);
123 return false;
124 }
125
126 if (xdev == NULL) {
127 U_LOG_E("Invalid xdev for layer #%u '%s'!", i, name);
128 return false;
129 }
130
131 // Cast away
132 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
133
134 *out_xdev = xdev;
135 *out_xcs = xcs;
136 *out_data = data;
137
138 return true;
139}
140
141static void
142do_quad_layer(struct xrt_compositor *xc, struct multi_compositor *mc, struct multi_layer_entry *layer, uint32_t i)
143{
144 struct xrt_device *xdev = NULL;
145 struct xrt_swapchain *xcs = NULL;
146 struct xrt_layer_data *data = NULL;
147
148 if (!do_single(xc, mc, layer, i, "quad", &xdev, &xcs, &data)) {
149 return;
150 }
151
152 xrt_comp_layer_quad(xc, xdev, xcs, data);
153}
154
155static void
156do_cube_layer(struct xrt_compositor *xc, struct multi_compositor *mc, struct multi_layer_entry *layer, uint32_t i)
157{
158 struct xrt_device *xdev = NULL;
159 struct xrt_swapchain *xcs = NULL;
160 struct xrt_layer_data *data = NULL;
161
162 if (!do_single(xc, mc, layer, i, "cube", &xdev, &xcs, &data)) {
163 return;
164 }
165
166 xrt_comp_layer_cube(xc, xdev, xcs, data);
167}
168
169static void
170do_cylinder_layer(struct xrt_compositor *xc, struct multi_compositor *mc, struct multi_layer_entry *layer, uint32_t i)
171{
172 struct xrt_device *xdev = NULL;
173 struct xrt_swapchain *xcs = NULL;
174 struct xrt_layer_data *data = NULL;
175
176 if (!do_single(xc, mc, layer, i, "cylinder", &xdev, &xcs, &data)) {
177 return;
178 }
179
180 xrt_comp_layer_cylinder(xc, xdev, xcs, data);
181}
182
183static void
184do_equirect1_layer(struct xrt_compositor *xc, struct multi_compositor *mc, struct multi_layer_entry *layer, uint32_t i)
185{
186 struct xrt_device *xdev = NULL;
187 struct xrt_swapchain *xcs = NULL;
188 struct xrt_layer_data *data = NULL;
189
190 if (!do_single(xc, mc, layer, i, "equirect1", &xdev, &xcs, &data)) {
191 return;
192 }
193
194 xrt_comp_layer_equirect1(xc, xdev, xcs, data);
195}
196
197static void
198do_equirect2_layer(struct xrt_compositor *xc, struct multi_compositor *mc, struct multi_layer_entry *layer, uint32_t i)
199{
200 struct xrt_device *xdev = NULL;
201 struct xrt_swapchain *xcs = NULL;
202 struct xrt_layer_data *data = NULL;
203
204 if (!do_single(xc, mc, layer, i, "equirect2", &xdev, &xcs, &data)) {
205 return;
206 }
207
208 xrt_comp_layer_equirect2(xc, xdev, xcs, data);
209}
210
211static int
212overlay_sort_func(const void *a, const void *b)
213{
214 struct multi_compositor *mc_a = *(struct multi_compositor **)a;
215 struct multi_compositor *mc_b = *(struct multi_compositor **)b;
216
217 if (mc_a->state.z_order < mc_b->state.z_order) {
218 return -1;
219 }
220
221 if (mc_a->state.z_order > mc_b->state.z_order) {
222 return 1;
223 }
224
225 return 0;
226}
227
228static enum xrt_blend_mode
229find_active_blend_mode(struct multi_compositor **overlay_sorted_clients, size_t size)
230{
231 if (overlay_sorted_clients == NULL)
232 return XRT_BLEND_MODE_OPAQUE;
233
234 const struct multi_compositor *first_visible = NULL;
235 for (size_t k = 0; k < size; ++k) {
236 const struct multi_compositor *mc = overlay_sorted_clients[k];
237 assert(mc != NULL);
238
239 // if a focused client is found just return, "first_visible" has lower priority and can be ignored.
240 if (mc->state.focused) {
241 assert(mc->state.visible);
242 return mc->delivered.data.env_blend_mode;
243 }
244
245 if (first_visible == NULL && mc->state.visible) {
246 first_visible = mc;
247 }
248 }
249 if (first_visible != NULL)
250 return first_visible->delivered.data.env_blend_mode;
251 return XRT_BLEND_MODE_OPAQUE;
252}
253
254static void
255transfer_layers_locked(struct multi_system_compositor *msc, int64_t display_time_ns, int64_t system_frame_id)
256{
257 COMP_TRACE_MARKER();
258
259 struct xrt_compositor *xc = &msc->xcn->base;
260
261 struct multi_compositor *array[MULTI_MAX_CLIENTS] = {0};
262
263 // To mark latching.
264 int64_t now_ns = os_monotonic_get_ns();
265
266 size_t count = 0;
267 for (size_t k = 0; k < ARRAY_SIZE(array); k++) {
268 struct multi_compositor *mc = msc->clients[k];
269
270 // Array can be empty
271 if (mc == NULL) {
272 continue;
273 }
274
275 // Even if it's not shown, make sure that frames are delivered.
276 multi_compositor_deliver_any_frames(mc, display_time_ns);
277
278 // None of the data in this slot is valid, don't check access it.
279 if (!mc->delivered.active) {
280 continue;
281 }
282
283 // The client isn't visible, do not submit it's layers.
284 if (!mc->state.visible) {
285 // Need to drop delivered frame as it shouldn't be reused.
286 multi_compositor_retire_delivered_locked(mc, now_ns);
287 continue;
288 }
289
290 // Just in case.
291 if (!mc->state.session_active) {
292 U_LOG_W("Session is visible but not active.");
293
294 // Need to drop delivered frame as it shouldn't be reused.
295 multi_compositor_retire_delivered_locked(mc, now_ns);
296 continue;
297 }
298
299 // The list_and_timing_lock is held when callign this function.
300 multi_compositor_latch_frame_locked(mc, now_ns, system_frame_id);
301
302 array[count++] = msc->clients[k];
303 }
304
305 // Sort the stack array
306 qsort(array, count, sizeof(struct multi_compositor *), overlay_sort_func);
307
308 // find first (ordered by bottom to top) active client to retrieve xrt_layer_frame_data
309 const enum xrt_blend_mode blend_mode = find_active_blend_mode(array, count);
310
311 const struct xrt_layer_frame_data data = {
312 .frame_id = system_frame_id,
313 .display_time_ns = display_time_ns,
314 .env_blend_mode = blend_mode,
315 };
316 xrt_comp_layer_begin(xc, &data);
317
318 // Copy all active layers.
319 for (size_t k = 0; k < count; k++) {
320 struct multi_compositor *mc = array[k];
321 assert(mc != NULL);
322
323 for (uint32_t i = 0; i < mc->delivered.layer_count; i++) {
324 struct multi_layer_entry *layer = &mc->delivered.layers[i];
325
326 switch (layer->data.type) {
327 case XRT_LAYER_PROJECTION: do_projection_layer(xc, mc, layer, i); break;
328 case XRT_LAYER_PROJECTION_DEPTH: do_projection_layer_depth(xc, mc, layer, i); break;
329 case XRT_LAYER_QUAD: do_quad_layer(xc, mc, layer, i); break;
330 case XRT_LAYER_CUBE: do_cube_layer(xc, mc, layer, i); break;
331 case XRT_LAYER_CYLINDER: do_cylinder_layer(xc, mc, layer, i); break;
332 case XRT_LAYER_EQUIRECT1: do_equirect1_layer(xc, mc, layer, i); break;
333 case XRT_LAYER_EQUIRECT2: do_equirect2_layer(xc, mc, layer, i); break;
334 default: U_LOG_E("Unhandled layer type '%i'!", layer->data.type); break;
335 }
336 }
337 }
338}
339
340static void
341broadcast_timings_to_clients(struct multi_system_compositor *msc, int64_t predicted_display_time_ns)
342{
343 COMP_TRACE_MARKER();
344
345 os_mutex_lock(&msc->list_and_timing_lock);
346
347 for (size_t i = 0; i < ARRAY_SIZE(msc->clients); i++) {
348 struct multi_compositor *mc = msc->clients[i];
349 if (mc == NULL) {
350 continue;
351 }
352
353 os_mutex_lock(&mc->slot_lock);
354 mc->slot_next_frame_display = predicted_display_time_ns;
355 os_mutex_unlock(&mc->slot_lock);
356 }
357
358 os_mutex_unlock(&msc->list_and_timing_lock);
359}
360
361static void
362broadcast_timings_to_pacers(struct multi_system_compositor *msc,
363 int64_t predicted_display_time_ns,
364 int64_t predicted_display_period_ns,
365 int64_t diff_ns)
366{
367 COMP_TRACE_MARKER();
368
369 os_mutex_lock(&msc->list_and_timing_lock);
370
371 for (size_t i = 0; i < ARRAY_SIZE(msc->clients); i++) {
372 struct multi_compositor *mc = msc->clients[i];
373 if (mc == NULL) {
374 continue;
375 }
376
377 u_pa_info( //
378 mc->upa, //
379 predicted_display_time_ns, //
380 predicted_display_period_ns, //
381 diff_ns); //
382
383 os_mutex_lock(&mc->slot_lock);
384 mc->slot_next_frame_display = predicted_display_time_ns;
385 os_mutex_unlock(&mc->slot_lock);
386 }
387
388 msc->last_timings.predicted_display_time_ns = predicted_display_time_ns;
389 msc->last_timings.predicted_display_period_ns = predicted_display_period_ns;
390 msc->last_timings.diff_ns = diff_ns;
391
392 os_mutex_unlock(&msc->list_and_timing_lock);
393}
394
395static void
396wait_frame(struct os_precise_sleeper *sleeper, struct xrt_compositor *xc, int64_t frame_id, int64_t wake_up_time_ns)
397{
398 COMP_TRACE_MARKER();
399
400 // Wait until the given wake up time.
401 u_wait_until(sleeper, wake_up_time_ns);
402
403 int64_t now_ns = os_monotonic_get_ns();
404
405 // Signal that we woke up.
406 xrt_comp_mark_frame(xc, frame_id, XRT_COMPOSITOR_FRAME_POINT_WOKE, now_ns);
407}
408
409static void
410update_session_state_locked(struct multi_system_compositor *msc)
411{
412 struct xrt_compositor *xc = &msc->xcn->base;
413
414 //! @todo Make this not be hardcoded.
415 const struct xrt_begin_session_info begin_session_info = {
416 .view_type = XRT_VIEW_TYPE_STEREO,
417 .ext_hand_tracking_enabled = false,
418 .ext_hand_tracking_data_source_enabled = false,
419 .ext_eye_gaze_interaction_enabled = false,
420 .ext_hand_interaction_enabled = false,
421 .htc_facial_tracking_enabled = false,
422 .fb_body_tracking_enabled = false,
423 .fb_face_tracking2_enabled = false,
424 .meta_body_tracking_full_body_enabled = false,
425 .meta_body_tracking_calibration_enabled = false,
426 .android_face_tracking_enabled = false,
427 };
428
429 switch (msc->sessions.state) {
430 case MULTI_SYSTEM_STATE_INIT_WARM_START:
431 // Produce at least one frame on init.
432 msc->sessions.state = MULTI_SYSTEM_STATE_STOPPING;
433 xrt_comp_begin_session(xc, &begin_session_info);
434 U_LOG_I("Doing warm start, %u active app session(s).", (uint32_t)msc->sessions.active_count);
435 break;
436
437 case MULTI_SYSTEM_STATE_STOPPED:
438 if (msc->sessions.active_count == 0) {
439 break;
440 }
441
442 msc->sessions.state = MULTI_SYSTEM_STATE_RUNNING;
443 xrt_comp_begin_session(xc, &begin_session_info);
444 U_LOG_I("Started native session, %u active app session(s).", (uint32_t)msc->sessions.active_count);
445 break;
446
447 case MULTI_SYSTEM_STATE_RUNNING:
448 if (msc->sessions.active_count > 0) {
449 break;
450 }
451
452 msc->sessions.state = MULTI_SYSTEM_STATE_STOPPING;
453 U_LOG_D("Stopping native session, %u active app session(s).", (uint32_t)msc->sessions.active_count);
454 break;
455
456 case MULTI_SYSTEM_STATE_STOPPING:
457 // Just in case
458 if (msc->sessions.active_count > 0) {
459 msc->sessions.state = MULTI_SYSTEM_STATE_RUNNING;
460 U_LOG_D("Restarting native session, %u active app session(s).",
461 (uint32_t)msc->sessions.active_count);
462 break;
463 }
464
465 msc->sessions.state = MULTI_SYSTEM_STATE_STOPPED;
466 xrt_comp_end_session(xc);
467 U_LOG_I("Stopped native session, %u active app session(s).", (uint32_t)msc->sessions.active_count);
468 break;
469
470 case MULTI_SYSTEM_STATE_INVALID:
471 default:
472 U_LOG_E("Got invalid state %u", msc->sessions.state);
473 msc->sessions.state = MULTI_SYSTEM_STATE_STOPPING;
474 assert(false);
475 }
476}
477
478static int
479multi_main_loop(struct multi_system_compositor *msc)
480{
481 U_TRACE_SET_THREAD_NAME("Multi Client Module");
482 os_thread_helper_name(&msc->oth, "Multi Client Module");
483
484#ifdef XRT_OS_LINUX
485 // Try to raise priority of this thread.
486 u_linux_try_to_set_realtime_priority_on_thread(U_LOGGING_INFO, "Multi Client Module");
487#endif
488
489 struct xrt_compositor *xc = &msc->xcn->base;
490
491 // For wait frame.
492 struct os_precise_sleeper sleeper = {0};
493 os_precise_sleeper_init(&sleeper);
494
495 // Protect the thread state and the sessions state.
496 os_thread_helper_lock(&msc->oth);
497
498 while (os_thread_helper_is_running_locked(&msc->oth)) {
499
500 // Updates msc->sessions.active depending on active client sessions.
501 update_session_state_locked(msc);
502
503 if (msc->sessions.state == MULTI_SYSTEM_STATE_STOPPED) {
504 // Sleep and wait to be signaled.
505 os_thread_helper_wait_locked(&msc->oth);
506
507 // Loop back to running and session check.
508 continue;
509 }
510
511 // Unlock the thread after the checks has been done.
512 os_thread_helper_unlock(&msc->oth);
513
514 int64_t frame_id = -1;
515 int64_t wake_up_time_ns = 0;
516 int64_t predicted_gpu_time_ns = 0;
517 int64_t predicted_display_time_ns = 0;
518 int64_t predicted_display_period_ns = 0;
519
520 // Get the information for the next frame.
521 xrt_comp_predict_frame( //
522 xc, //
523 &frame_id, //
524 &wake_up_time_ns, //
525 &predicted_gpu_time_ns, //
526 &predicted_display_time_ns, //
527 &predicted_display_period_ns); //
528
529 // Do this as soon as we have the new display time.
530 broadcast_timings_to_clients(msc, predicted_display_time_ns);
531
532 // Now we can wait.
533 wait_frame(&sleeper, xc, frame_id, wake_up_time_ns);
534
535 int64_t now_ns = os_monotonic_get_ns();
536 int64_t diff_ns = predicted_display_time_ns - now_ns;
537
538 // Now we know the diff, broadcast to pacers.
539 broadcast_timings_to_pacers(msc, predicted_display_time_ns, predicted_display_period_ns, diff_ns);
540
541 xrt_comp_begin_frame(xc, frame_id);
542
543 // Make sure that the clients doesn't go away while we transfer layers.
544 os_mutex_lock(&msc->list_and_timing_lock);
545 transfer_layers_locked(msc, predicted_display_time_ns, frame_id);
546 os_mutex_unlock(&msc->list_and_timing_lock);
547
548 xrt_comp_layer_commit(xc, XRT_GRAPHICS_SYNC_HANDLE_INVALID);
549
550 // Re-lock the thread for check in while statement.
551 os_thread_helper_lock(&msc->oth);
552 }
553
554 // Clean up the sessions state.
555 switch (msc->sessions.state) {
556 case MULTI_SYSTEM_STATE_RUNNING:
557 case MULTI_SYSTEM_STATE_STOPPING:
558 U_LOG_I("Stopped native session, shutting down.");
559 xrt_comp_end_session(xc);
560 break;
561 case MULTI_SYSTEM_STATE_STOPPED: U_LOG_I("Already stopped, nothing to clean up."); break;
562 case MULTI_SYSTEM_STATE_INIT_WARM_START:
563 U_LOG_I("Cleaning up from warm start state.");
564 xrt_comp_end_session(xc);
565 break;
566 case MULTI_SYSTEM_STATE_INVALID:
567 U_LOG_W("Cleaning up from invalid state.");
568 // Best effort cleanup
569 xrt_comp_end_session(xc);
570 break;
571 default: U_LOG_E("Unknown session state during cleanup: %d", msc->sessions.state); assert(false);
572 }
573
574 os_thread_helper_unlock(&msc->oth);
575
576 os_precise_sleeper_deinit(&sleeper);
577
578 return 0;
579}
580
581static void *
582thread_func(void *ptr)
583{
584 return (void *)(intptr_t)multi_main_loop((struct multi_system_compositor *)ptr);
585}
586
587
588/*
589 *
590 * System multi compositor functions.
591 *
592 */
593
594static xrt_result_t
595system_compositor_set_state(
596 struct xrt_system_compositor *xsc, struct xrt_compositor *xc, bool visible, bool focused, int64_t timestamp_ns)
597{
598 struct multi_system_compositor *msc = multi_system_compositor(xsc);
599 struct multi_compositor *mc = multi_compositor(xc);
600 (void)msc;
601
602 //! @todo Locking?
603 if (mc->state.visible != visible || mc->state.focused != focused) {
604 mc->state.visible = visible;
605 mc->state.focused = focused;
606
607 union xrt_session_event xse = XRT_STRUCT_INIT;
608 xse.type = XRT_SESSION_EVENT_STATE_CHANGE;
609 xse.state.visible = visible;
610 xse.state.focused = focused;
611 xse.state.timestamp_ns = timestamp_ns;
612
613 return multi_compositor_push_event(mc, &xse);
614 }
615
616 return XRT_SUCCESS;
617}
618
619static xrt_result_t
620system_compositor_set_z_order(struct xrt_system_compositor *xsc, struct xrt_compositor *xc, int64_t z_order)
621{
622 struct multi_system_compositor *msc = multi_system_compositor(xsc);
623 struct multi_compositor *mc = multi_compositor(xc);
624 (void)msc;
625
626 //! @todo Locking?
627 mc->state.z_order = z_order;
628
629 return XRT_SUCCESS;
630}
631
632static xrt_result_t
633system_compositor_set_main_app_visibility(struct xrt_system_compositor *xsc, struct xrt_compositor *xc, bool visible)
634{
635 struct multi_system_compositor *msc = multi_system_compositor(xsc);
636 struct multi_compositor *mc = multi_compositor(xc);
637 (void)msc;
638
639 union xrt_session_event xse = XRT_STRUCT_INIT;
640 xse.type = XRT_SESSION_EVENT_OVERLAY_CHANGE;
641 xse.overlay.visible = visible;
642
643 return multi_compositor_push_event(mc, &xse);
644}
645
646static xrt_result_t
647system_compositor_notify_loss_pending(struct xrt_system_compositor *xsc,
648 struct xrt_compositor *xc,
649 int64_t loss_time_ns)
650{
651 struct multi_system_compositor *msc = multi_system_compositor(xsc);
652 struct multi_compositor *mc = multi_compositor(xc);
653 (void)msc;
654
655 union xrt_session_event xse = XRT_STRUCT_INIT;
656 xse.type = XRT_SESSION_EVENT_LOSS_PENDING;
657 xse.loss_pending.loss_time_ns = loss_time_ns;
658
659 return multi_compositor_push_event(mc, &xse);
660}
661
662static xrt_result_t
663system_compositor_notify_lost(struct xrt_system_compositor *xsc, struct xrt_compositor *xc)
664{
665 struct multi_system_compositor *msc = multi_system_compositor(xsc);
666 struct multi_compositor *mc = multi_compositor(xc);
667 (void)msc;
668
669 union xrt_session_event xse = XRT_STRUCT_INIT;
670 xse.type = XRT_SESSION_EVENT_LOST;
671
672 return multi_compositor_push_event(mc, &xse);
673}
674
675static xrt_result_t
676system_compositor_notify_display_refresh_changed(struct xrt_system_compositor *xsc,
677 struct xrt_compositor *xc,
678 float from_display_refresh_rate_hz,
679 float to_display_refresh_rate_hz)
680{
681 struct multi_system_compositor *msc = multi_system_compositor(xsc);
682 struct multi_compositor *mc = multi_compositor(xc);
683 (void)msc;
684
685 union xrt_session_event xse = XRT_STRUCT_INIT;
686 xse.type = XRT_SESSION_EVENT_DISPLAY_REFRESH_RATE_CHANGE;
687 xse.display.from_display_refresh_rate_hz = from_display_refresh_rate_hz;
688 xse.display.to_display_refresh_rate_hz = to_display_refresh_rate_hz;
689
690 return multi_compositor_push_event(mc, &xse);
691}
692
693
694/*
695 *
696 * System compositor functions.
697 *
698 */
699
700static xrt_result_t
701system_compositor_create_native_compositor(struct xrt_system_compositor *xsc,
702 const struct xrt_session_info *xsi,
703 struct xrt_session_event_sink *xses,
704 struct xrt_compositor_native **out_xcn)
705{
706 struct multi_system_compositor *msc = multi_system_compositor(xsc);
707
708 return multi_compositor_create(msc, xsi, xses, out_xcn);
709}
710
711static void
712system_compositor_destroy(struct xrt_system_compositor *xsc)
713{
714 struct multi_system_compositor *msc = multi_system_compositor(xsc);
715
716 // Destroy the render thread first, destroy also stops the thread.
717 os_thread_helper_destroy(&msc->oth);
718
719 u_paf_destroy(&msc->upaf);
720
721 xrt_comp_native_destroy(&msc->xcn);
722
723 os_mutex_destroy(&msc->list_and_timing_lock);
724
725 free(msc);
726}
727
728
729/*
730 *
731 * 'Exported' functions.
732 *
733 */
734
735void
736multi_system_compositor_update_session_status(struct multi_system_compositor *msc, bool active)
737{
738 os_thread_helper_lock(&msc->oth);
739
740 if (active) {
741 assert(msc->sessions.active_count < UINT32_MAX);
742 msc->sessions.active_count++;
743
744 // If the thread is sleeping wake it up.
745 os_thread_helper_signal_locked(&msc->oth);
746 } else {
747 assert(msc->sessions.active_count > 0);
748 msc->sessions.active_count--;
749 }
750
751 os_thread_helper_unlock(&msc->oth);
752}
753
754xrt_result_t
755comp_multi_create_system_compositor(struct xrt_compositor_native *xcn,
756 struct u_pacing_app_factory *upaf,
757 const struct xrt_system_compositor_info *xsci,
758 bool do_warm_start,
759 struct xrt_system_compositor **out_xsysc)
760{
761 struct multi_system_compositor *msc = U_TYPED_CALLOC(struct multi_system_compositor);
762 msc->base.create_native_compositor = system_compositor_create_native_compositor;
763 msc->base.destroy = system_compositor_destroy;
764 msc->xmcc.set_state = system_compositor_set_state;
765 msc->xmcc.set_z_order = system_compositor_set_z_order;
766 msc->xmcc.set_main_app_visibility = system_compositor_set_main_app_visibility;
767 msc->xmcc.notify_loss_pending = system_compositor_notify_loss_pending;
768 msc->xmcc.notify_lost = system_compositor_notify_lost;
769 msc->xmcc.notify_display_refresh_changed = system_compositor_notify_display_refresh_changed;
770 msc->base.xmcc = &msc->xmcc;
771 msc->base.info = *xsci;
772 msc->upaf = upaf;
773 msc->xcn = xcn;
774 msc->sessions.active_count = 0;
775 msc->sessions.state = do_warm_start ? MULTI_SYSTEM_STATE_INIT_WARM_START : MULTI_SYSTEM_STATE_STOPPED;
776
777 os_mutex_init(&msc->list_and_timing_lock);
778
779 //! @todo Make the clients not go from IDLE to READY before we have completed a first frame.
780 // Make sure there is at least some sort of valid frame data here.
781 msc->last_timings.predicted_display_time_ns = os_monotonic_get_ns(); // As good as any time.
782 msc->last_timings.predicted_display_period_ns = U_TIME_1MS_IN_NS * 16; // Just a wild guess.
783 msc->last_timings.diff_ns = U_TIME_1MS_IN_NS * 5; // Make sure it's not zero at least.
784
785 int ret = os_thread_helper_init(&msc->oth);
786 if (ret < 0) {
787 return XRT_ERROR_THREADING_INIT_FAILURE;
788 }
789
790 os_thread_helper_start(&msc->oth, thread_func, msc);
791
792 *out_xsysc = &msc->base;
793
794 return XRT_SUCCESS;
795}