The open source OpenXR runtime
1// Copyright 2020-2024, Collabora, Ltd.
2// Copyright 2025, NVIDIA CORPORATION.
3// SPDX-License-Identifier: BSL-1.0
4/*!
5 * @file
6 * @brief Handling functions called from generated dispatch function.
7 * @author Pete Black <pblack@collabora.com>
8 * @author Jakob Bornecrantz <jakob@collabora.com>
9 * @author Korcan Hussein <korcan.hussein@collabora.com>
10 * @ingroup ipc_server
11 */
12
13#include "util/u_misc.h"
14#include "util/u_handles.h"
15#include "util/u_pretty_print.h"
16#include "util/u_visibility_mask.h"
17#include "util/u_trace_marker.h"
18
19#include "server/ipc_server.h"
20#include "ipc_server_generated.h"
21#include "xrt/xrt_device.h"
22#include "xrt/xrt_results.h"
23
24#ifdef XRT_GRAPHICS_SYNC_HANDLE_IS_FD
25#include <unistd.h>
26#endif
27
28
29/*
30 *
31 * Helper functions.
32 *
33 */
34
35static xrt_result_t
36validate_device_id(volatile struct ipc_client_state *ics, int64_t device_id, struct xrt_device **out_device)
37{
38 if (device_id >= XRT_SYSTEM_MAX_DEVICES) {
39 IPC_ERROR(ics->server, "Invalid device ID (device_id >= XRT_SYSTEM_MAX_DEVICES)!");
40 return XRT_ERROR_IPC_FAILURE;
41 }
42
43 struct xrt_device *xdev = ics->server->idevs[device_id].xdev;
44 if (xdev == NULL) {
45 IPC_ERROR(ics->server, "Invalid device ID (xdev is NULL)!");
46 return XRT_ERROR_IPC_FAILURE;
47 }
48
49 *out_device = xdev;
50
51 return XRT_SUCCESS;
52}
53
54#define GET_XDEV_OR_RETURN(ics, device_id, out_device) \
55 do { \
56 xrt_result_t res = validate_device_id(ics, device_id, &out_device); \
57 if (res != XRT_SUCCESS) { \
58 return res; \
59 } \
60 } while (0)
61
62
63static xrt_result_t
64validate_origin_id(volatile struct ipc_client_state *ics, int64_t origin_id, struct xrt_tracking_origin **out_xtrack)
65{
66 if (origin_id >= XRT_SYSTEM_MAX_DEVICES) {
67 IPC_ERROR(ics->server, "Invalid origin ID (origin_id >= XRT_SYSTEM_MAX_DEVICES)!");
68 return XRT_ERROR_IPC_FAILURE;
69 }
70
71 struct xrt_tracking_origin *xtrack = ics->server->xtracks[origin_id];
72 if (xtrack == NULL) {
73 IPC_ERROR(ics->server, "Invalid origin ID (xtrack is NULL)!");
74 return XRT_ERROR_IPC_FAILURE;
75 }
76
77 *out_xtrack = xtrack;
78
79 return XRT_SUCCESS;
80}
81
82static xrt_result_t
83validate_swapchain_state(volatile struct ipc_client_state *ics, uint32_t *out_index)
84{
85 // Our handle is just the index for now.
86 uint32_t index = 0;
87 for (; index < IPC_MAX_CLIENT_SWAPCHAINS; index++) {
88 if (!ics->swapchain_data[index].active) {
89 break;
90 }
91 }
92
93 if (index >= IPC_MAX_CLIENT_SWAPCHAINS) {
94 IPC_ERROR(ics->server, "Too many swapchains!");
95 return XRT_ERROR_IPC_FAILURE;
96 }
97
98 *out_index = index;
99
100 return XRT_SUCCESS;
101}
102
103static void
104set_swapchain_info(volatile struct ipc_client_state *ics,
105 uint32_t index,
106 const struct xrt_swapchain_create_info *info,
107 struct xrt_swapchain *xsc)
108{
109 ics->xscs[index] = xsc;
110 ics->swapchain_data[index].active = true;
111 ics->swapchain_data[index].width = info->width;
112 ics->swapchain_data[index].height = info->height;
113 ics->swapchain_data[index].format = info->format;
114 ics->swapchain_data[index].image_count = xsc->image_count;
115}
116
117static xrt_result_t
118validate_reference_space_type(volatile struct ipc_client_state *ics, enum xrt_reference_space_type type)
119{
120 if ((uint32_t)type >= XRT_SPACE_REFERENCE_TYPE_COUNT) {
121 IPC_ERROR(ics->server, "Invalid reference space type %u", type);
122 return XRT_ERROR_IPC_FAILURE;
123 }
124
125 return XRT_SUCCESS;
126}
127
128static xrt_result_t
129validate_device_feature_type(volatile struct ipc_client_state *ics, enum xrt_device_feature_type type)
130{
131 if ((uint32_t)type >= XRT_DEVICE_FEATURE_MAX_ENUM) {
132 IPC_ERROR(ics->server, "Invalid device feature type %u", type);
133 return XRT_ERROR_FEATURE_NOT_SUPPORTED;
134 }
135
136 return XRT_SUCCESS;
137}
138
139
140static xrt_result_t
141validate_space_id(volatile struct ipc_client_state *ics, int64_t space_id, struct xrt_space **out_xspc)
142{
143 if (space_id < 0) {
144 return XRT_ERROR_IPC_FAILURE;
145 }
146
147 if (space_id >= IPC_MAX_CLIENT_SPACES) {
148 return XRT_ERROR_IPC_FAILURE;
149 }
150
151 if (ics->xspcs[space_id] == NULL) {
152 return XRT_ERROR_IPC_FAILURE;
153 }
154
155 *out_xspc = (struct xrt_space *)ics->xspcs[space_id];
156
157 return XRT_SUCCESS;
158}
159
160static xrt_result_t
161get_new_space_id(volatile struct ipc_client_state *ics, uint32_t *out_id)
162{
163 // Our handle is just the index for now.
164 uint32_t index = 0;
165 for (; index < IPC_MAX_CLIENT_SPACES; index++) {
166 if (ics->xspcs[index] == NULL) {
167 break;
168 }
169 }
170
171 if (index >= IPC_MAX_CLIENT_SPACES) {
172 IPC_ERROR(ics->server, "Too many spaces!");
173 return XRT_ERROR_IPC_FAILURE;
174 }
175
176 *out_id = index;
177
178 return XRT_SUCCESS;
179}
180
181static xrt_result_t
182track_space(volatile struct ipc_client_state *ics, struct xrt_space *xs, uint32_t *out_id)
183{
184 uint32_t id = UINT32_MAX;
185 xrt_result_t xret = get_new_space_id(ics, &id);
186 if (xret != XRT_SUCCESS) {
187 return xret;
188 }
189
190 // Remove volatile
191 struct xrt_space **xs_ptr = (struct xrt_space **)&ics->xspcs[id];
192 xrt_space_reference(xs_ptr, xs);
193
194 *out_id = id;
195
196 return XRT_SUCCESS;
197}
198
199
200static xrt_result_t
201get_new_localspace_id(volatile struct ipc_client_state *ics, uint32_t *out_local_id, uint32_t *out_local_floor_id)
202{
203 // Our handle is just the index for now.
204 uint32_t index = 0;
205 for (; index < IPC_MAX_CLIENT_SPACES; index++) {
206 if (ics->server->xso->localspace[index] == NULL) {
207 break;
208 }
209 }
210
211 if (index >= IPC_MAX_CLIENT_SPACES) {
212 IPC_ERROR(ics->server, "Too many localspaces!");
213 return XRT_ERROR_IPC_FAILURE;
214 }
215
216 ics->local_space_overseer_index = index;
217 index = 0;
218 for (; index < IPC_MAX_CLIENT_SPACES; index++) {
219 if (ics->xspcs[index] == NULL) {
220 break;
221 }
222 }
223
224 if (index >= IPC_MAX_CLIENT_SPACES) {
225 IPC_ERROR(ics->server, "Too many spaces!");
226 return XRT_ERROR_IPC_FAILURE;
227 }
228
229 ics->local_space_index = index;
230 *out_local_id = index;
231
232 for (index = 0; index < IPC_MAX_CLIENT_SPACES; index++) {
233 if (ics->server->xso->localfloorspace[index] == NULL) {
234 break;
235 }
236 }
237
238 if (index >= IPC_MAX_CLIENT_SPACES) {
239 IPC_ERROR(ics->server, "Too many localfloorspaces!");
240 return XRT_ERROR_IPC_FAILURE;
241 }
242
243 ics->local_floor_space_overseer_index = index;
244
245 for (index = 0; index < IPC_MAX_CLIENT_SPACES; index++) {
246 if (ics->xspcs[index] == NULL && index != ics->local_space_index) {
247 break;
248 }
249 }
250
251 if (index >= IPC_MAX_CLIENT_SPACES) {
252 IPC_ERROR(ics->server, "Too many spaces!");
253 return XRT_ERROR_IPC_FAILURE;
254 }
255
256 ics->local_floor_space_index = index;
257 *out_local_floor_id = index;
258
259 return XRT_SUCCESS;
260}
261
262static xrt_result_t
263create_localspace(volatile struct ipc_client_state *ics, uint32_t *out_local_id, uint32_t *out_local_floor_id)
264{
265 uint32_t local_id = UINT32_MAX;
266 uint32_t local_floor_id = UINT32_MAX;
267 xrt_result_t xret = get_new_localspace_id(ics, &local_id, &local_floor_id);
268 if (xret != XRT_SUCCESS) {
269 return xret;
270 }
271
272 struct xrt_space_overseer *xso = ics->server->xso;
273 struct xrt_space **xslocal_ptr = (struct xrt_space **)&ics->xspcs[local_id];
274 struct xrt_space **xslocalfloor_ptr = (struct xrt_space **)&ics->xspcs[local_floor_id];
275
276 xret = xrt_space_overseer_create_local_space(xso, &xso->localspace[ics->local_space_overseer_index],
277 &xso->localfloorspace[ics->local_floor_space_overseer_index]);
278 if (xret != XRT_SUCCESS) {
279 return xret;
280 }
281 xrt_space_reference(xslocal_ptr, xso->localspace[ics->local_space_overseer_index]);
282 xrt_space_reference(xslocalfloor_ptr, xso->localfloorspace[ics->local_floor_space_overseer_index]);
283 *out_local_id = local_id;
284 *out_local_floor_id = local_floor_id;
285
286 return XRT_SUCCESS;
287}
288
289XRT_MAYBE_UNUSED xrt_result_t
290get_new_future_id(volatile struct ipc_client_state *ics, uint32_t *out_id)
291{
292 // Our handle is just the index for now.
293 uint32_t index = 0;
294 for (; index < IPC_MAX_CLIENT_FUTURES; ++index) {
295 if (ics->xfts[index] == NULL) {
296 break;
297 }
298 }
299
300 if (index >= IPC_MAX_CLIENT_FUTURES) {
301 IPC_ERROR(ics->server, "Too many futures!");
302 return XRT_ERROR_IPC_FAILURE;
303 }
304
305 *out_id = index;
306
307 return XRT_SUCCESS;
308}
309
310static inline xrt_result_t
311validate_future_id(volatile struct ipc_client_state *ics, uint32_t future_id, struct xrt_future **out_xft)
312{
313 if (future_id >= IPC_MAX_CLIENT_FUTURES) {
314 return XRT_ERROR_IPC_FAILURE;
315 }
316
317 if (ics->xfts[future_id] == NULL) {
318 return XRT_ERROR_IPC_FAILURE;
319 }
320
321 *out_xft = (struct xrt_future *)ics->xfts[future_id];
322 return (*out_xft != NULL) ? XRT_SUCCESS : XRT_ERROR_ALLOCATION;
323}
324
325static inline xrt_result_t
326release_future(volatile struct ipc_client_state *ics, uint32_t future_id)
327{
328 struct xrt_future *xft = NULL;
329 xrt_result_t xret = validate_future_id(ics, future_id, &xft);
330 if (xret != XRT_SUCCESS) {
331 return xret;
332 }
333 xrt_future_reference(&xft, NULL);
334 ics->xfts[future_id] = NULL;
335 return XRT_SUCCESS;
336}
337
338/*
339 *
340 * Handle functions.
341 *
342 */
343
344xrt_result_t
345ipc_handle_instance_get_shm_fd(volatile struct ipc_client_state *ics,
346 uint32_t max_handle_capacity,
347 xrt_shmem_handle_t *out_handles,
348 uint32_t *out_handle_count)
349{
350 IPC_TRACE_MARKER();
351
352 assert(max_handle_capacity >= 1);
353
354 out_handles[0] = get_ism_handle(ics);
355 *out_handle_count = 1;
356
357 return XRT_SUCCESS;
358}
359
360xrt_result_t
361ipc_handle_instance_describe_client(volatile struct ipc_client_state *ics,
362 const struct ipc_client_description *client_desc)
363{
364 ics->client_state.info = client_desc->info;
365 ics->client_state.pid = client_desc->pid;
366
367 struct u_pp_sink_stack_only sink;
368 u_pp_delegate_t dg = u_pp_sink_stack_only_init(&sink);
369
370#define P(...) u_pp(dg, __VA_ARGS__)
371#define PNT(...) u_pp(dg, "\n\t" __VA_ARGS__)
372#define PNTT(...) u_pp(dg, "\n\t\t" __VA_ARGS__)
373#define EXT(NAME) PNTT(#NAME ": %s", client_desc->info.NAME ? "true" : "false")
374
375 P("Client info:");
376 PNT("id: %u", ics->client_state.id);
377 PNT("application_name: '%s'", client_desc->info.application_name);
378 PNT("pid: %i", client_desc->pid);
379 PNT("extensions:");
380
381 EXT(ext_hand_tracking_enabled);
382 EXT(ext_hand_tracking_data_source_enabled);
383 EXT(ext_eye_gaze_interaction_enabled);
384 EXT(ext_future_enabled);
385 EXT(ext_hand_interaction_enabled);
386 EXT(htc_facial_tracking_enabled);
387 EXT(fb_body_tracking_enabled);
388 EXT(meta_body_tracking_full_body_enabled);
389 EXT(meta_body_tracking_calibration_enabled);
390 EXT(fb_face_tracking2_enabled);
391 EXT(android_face_tracking_enabled);
392
393#undef EXT
394#undef PTT
395#undef PT
396#undef P
397
398 // Log the pretty message.
399 IPC_INFO(ics->server, "%s", sink.buffer);
400
401 return XRT_SUCCESS;
402}
403
404xrt_result_t
405ipc_handle_instance_is_system_available(volatile struct ipc_client_state *ics, bool *out_available)
406{
407 IPC_TRACE_MARKER();
408
409 xrt_result_t xret = XRT_SUCCESS;
410
411 struct ipc_server *s = ics->server;
412
413 os_mutex_lock(&s->global_state.lock);
414
415 xret = ipc_server_init_system_if_available_locked(s, ics, out_available);
416 IPC_CHK_WITH_GOTO(s, xret, "ipc_server_init_system_if_available_locked", cleanup);
417
418cleanup:
419 os_mutex_unlock(&s->global_state.lock);
420 return xret;
421}
422
423xrt_result_t
424ipc_handle_system_compositor_get_info(volatile struct ipc_client_state *ics,
425 struct xrt_system_compositor_info *out_info)
426{
427 IPC_TRACE_MARKER();
428
429 *out_info = ics->server->xsysc->info;
430
431 return XRT_SUCCESS;
432}
433
434xrt_result_t
435ipc_handle_session_create(volatile struct ipc_client_state *ics,
436 const struct xrt_session_info *xsi,
437 bool create_native_compositor)
438{
439 IPC_TRACE_MARKER();
440
441 struct xrt_session *xs = NULL;
442 struct xrt_compositor_native *xcn = NULL;
443
444 if (ics->xs != NULL) {
445 return XRT_ERROR_IPC_SESSION_ALREADY_CREATED;
446 }
447
448 if (!create_native_compositor) {
449 IPC_INFO(ics->server, "App asked for headless session, creating native compositor anyways");
450 }
451
452 xrt_result_t xret = xrt_system_create_session(ics->server->xsys, xsi, &xs, &xcn);
453 if (xret != XRT_SUCCESS) {
454 return xret;
455 }
456
457 ics->client_state.session_overlay = xsi->is_overlay;
458 ics->client_state.z_order = xsi->z_order;
459
460 ics->xs = xs;
461 ics->xc = &xcn->base;
462
463 xrt_syscomp_set_state(ics->server->xsysc, ics->xc, ics->client_state.session_visible,
464 ics->client_state.session_focused, os_monotonic_get_ns());
465 xrt_syscomp_set_z_order(ics->server->xsysc, ics->xc, ics->client_state.z_order);
466
467 return XRT_SUCCESS;
468}
469
470xrt_result_t
471ipc_handle_session_poll_events(volatile struct ipc_client_state *ics, union xrt_session_event *out_xse)
472{
473 // Have we created the session?
474 if (ics->xs == NULL) {
475 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
476 }
477
478 return xrt_session_poll_events(ics->xs, out_xse);
479}
480
481xrt_result_t
482ipc_handle_session_begin(volatile struct ipc_client_state *ics)
483{
484 IPC_TRACE_MARKER();
485
486 // Have we created the session?
487 if (ics->xs == NULL) {
488 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
489 }
490
491 // Need to check both because begin session is handled by compositor.
492 if (ics->xc == NULL) {
493 return XRT_ERROR_IPC_COMPOSITOR_NOT_CREATED;
494 }
495
496 //! @todo Pass the view type down.
497 const struct xrt_begin_session_info begin_session_info = {
498 .view_type = XRT_VIEW_TYPE_STEREO,
499 .ext_hand_tracking_enabled = ics->client_state.info.ext_hand_tracking_enabled,
500 .ext_hand_tracking_data_source_enabled = ics->client_state.info.ext_hand_tracking_data_source_enabled,
501 .ext_eye_gaze_interaction_enabled = ics->client_state.info.ext_eye_gaze_interaction_enabled,
502 .ext_future_enabled = ics->client_state.info.ext_future_enabled,
503 .ext_hand_interaction_enabled = ics->client_state.info.ext_hand_interaction_enabled,
504 .htc_facial_tracking_enabled = ics->client_state.info.htc_facial_tracking_enabled,
505 .fb_body_tracking_enabled = ics->client_state.info.fb_body_tracking_enabled,
506 .fb_face_tracking2_enabled = ics->client_state.info.fb_face_tracking2_enabled,
507 .meta_body_tracking_full_body_enabled = ics->client_state.info.meta_body_tracking_full_body_enabled,
508 .meta_body_tracking_calibration_enabled = ics->client_state.info.meta_body_tracking_calibration_enabled,
509 .android_face_tracking_enabled = ics->client_state.info.android_face_tracking_enabled,
510 };
511
512 return xrt_comp_begin_session(ics->xc, &begin_session_info);
513}
514
515xrt_result_t
516ipc_handle_session_end(volatile struct ipc_client_state *ics)
517{
518 IPC_TRACE_MARKER();
519
520 // Have we created the session?
521 if (ics->xs == NULL) {
522 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
523 }
524
525 // Need to check both because end session is handled by compositor.
526 if (ics->xc == NULL) {
527 return XRT_ERROR_IPC_COMPOSITOR_NOT_CREATED;
528 }
529
530 return xrt_comp_end_session(ics->xc);
531}
532
533xrt_result_t
534ipc_handle_session_destroy(volatile struct ipc_client_state *ics)
535{
536 IPC_TRACE_MARKER();
537
538 // Have we created the session?
539 if (ics->xs == NULL) {
540 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
541 }
542
543 ipc_server_client_destroy_session_and_compositor(ics);
544
545 return XRT_SUCCESS;
546}
547
548xrt_result_t
549ipc_handle_space_create_semantic_ids(volatile struct ipc_client_state *ics,
550 uint32_t *out_root_id,
551 uint32_t *out_view_id,
552 uint32_t *out_local_id,
553 uint32_t *out_local_floor_id,
554 uint32_t *out_stage_id,
555 uint32_t *out_unbounded_id)
556{
557 IPC_TRACE_MARKER();
558
559 struct xrt_space_overseer *xso = ics->server->xso;
560
561#define CREATE(NAME) \
562 do { \
563 *out_##NAME##_id = UINT32_MAX; \
564 if (xso->semantic.NAME == NULL) { \
565 break; \
566 } \
567 uint32_t id = 0; \
568 xrt_result_t xret = track_space(ics, xso->semantic.NAME, &id); \
569 if (xret != XRT_SUCCESS) { \
570 break; \
571 } \
572 *out_##NAME##_id = id; \
573 } while (false)
574
575 CREATE(root);
576 CREATE(view);
577 CREATE(stage);
578 CREATE(unbounded);
579
580#undef CREATE
581 return create_localspace(ics, out_local_id, out_local_floor_id);
582}
583
584xrt_result_t
585ipc_handle_space_create_offset(volatile struct ipc_client_state *ics,
586 uint32_t parent_id,
587 const struct xrt_pose *offset,
588 uint32_t *out_space_id)
589{
590 IPC_TRACE_MARKER();
591
592 struct xrt_space_overseer *xso = ics->server->xso;
593
594 struct xrt_space *parent = NULL;
595 xrt_result_t xret = validate_space_id(ics, parent_id, &parent);
596 if (xret != XRT_SUCCESS) {
597 return xret;
598 }
599
600
601 struct xrt_space *xs = NULL;
602 xret = xrt_space_overseer_create_offset_space(xso, parent, offset, &xs);
603 if (xret != XRT_SUCCESS) {
604 return xret;
605 }
606
607 uint32_t space_id = UINT32_MAX;
608 xret = track_space(ics, xs, &space_id);
609
610 // Track space grabs a reference, or it errors and we don't want to keep it around.
611 xrt_space_reference(&xs, NULL);
612
613 if (xret != XRT_SUCCESS) {
614 return xret;
615 }
616
617 *out_space_id = space_id;
618
619 return XRT_SUCCESS;
620}
621
622xrt_result_t
623ipc_handle_space_create_pose(volatile struct ipc_client_state *ics,
624 uint32_t xdev_id,
625 enum xrt_input_name name,
626 uint32_t *out_space_id)
627{
628 IPC_TRACE_MARKER();
629
630 struct xrt_space_overseer *xso = ics->server->xso;
631
632 struct xrt_device *xdev = NULL;
633 GET_XDEV_OR_RETURN(ics, xdev_id, xdev);
634
635 struct xrt_space *xs = NULL;
636 xrt_result_t xret = xrt_space_overseer_create_pose_space(xso, xdev, name, &xs);
637 if (xret != XRT_SUCCESS) {
638 return xret;
639 }
640
641 uint32_t space_id = UINT32_MAX;
642 xret = track_space(ics, xs, &space_id);
643
644 // Track space grabs a reference, or it errors and we don't want to keep it around.
645 xrt_space_reference(&xs, NULL);
646
647 if (xret != XRT_SUCCESS) {
648 return xret;
649 }
650
651 *out_space_id = space_id;
652
653 return xret;
654}
655
656xrt_result_t
657ipc_handle_space_locate_space(volatile struct ipc_client_state *ics,
658 uint32_t base_space_id,
659 const struct xrt_pose *base_offset,
660 int64_t at_timestamp,
661 uint32_t space_id,
662 const struct xrt_pose *offset,
663 struct xrt_space_relation *out_relation)
664{
665 IPC_TRACE_MARKER();
666
667 struct xrt_space_overseer *xso = ics->server->xso;
668 struct xrt_space *base_space = NULL;
669 struct xrt_space *space = NULL;
670 xrt_result_t xret;
671
672 xret = validate_space_id(ics, base_space_id, &base_space);
673 if (xret != XRT_SUCCESS) {
674 U_LOG_E("Invalid base_space_id!");
675 return xret;
676 }
677
678 xret = validate_space_id(ics, space_id, &space);
679 if (xret != XRT_SUCCESS) {
680 U_LOG_E("Invalid space_id!");
681 return xret;
682 }
683
684 return xrt_space_overseer_locate_space( //
685 xso, //
686 base_space, //
687 base_offset, //
688 at_timestamp, //
689 space, //
690 offset, //
691 out_relation); //
692}
693
694xrt_result_t
695ipc_handle_space_locate_spaces(volatile struct ipc_client_state *ics,
696 uint32_t base_space_id,
697 const struct xrt_pose *base_offset,
698 uint32_t space_count,
699 int64_t at_timestamp)
700{
701 IPC_TRACE_MARKER();
702 struct ipc_message_channel *imc = (struct ipc_message_channel *)&ics->imc;
703 struct ipc_server *s = ics->server;
704
705 struct xrt_space_overseer *xso = ics->server->xso;
706 struct xrt_space *base_space = NULL;
707
708 struct xrt_space **xspaces = U_TYPED_ARRAY_CALLOC(struct xrt_space *, space_count);
709 struct xrt_pose *offsets = U_TYPED_ARRAY_CALLOC(struct xrt_pose, space_count);
710 struct xrt_space_relation *out_relations = U_TYPED_ARRAY_CALLOC(struct xrt_space_relation, space_count);
711
712 xrt_result_t xret;
713
714 os_mutex_lock(&ics->server->global_state.lock);
715
716 uint32_t *space_ids = U_TYPED_ARRAY_CALLOC(uint32_t, space_count);
717
718 // we need to send back whether allocation succeeded so the client knows whether to send more data
719 if (space_ids == NULL) {
720 xret = XRT_ERROR_ALLOCATION;
721 } else {
722 xret = XRT_SUCCESS;
723 }
724
725 xret = ipc_send(imc, &xret, sizeof(enum xrt_result));
726 if (xret != XRT_SUCCESS) {
727 IPC_ERROR(ics->server, "Failed to send spaces allocate result");
728 // Nothing else we can do
729 goto out_locate_spaces;
730 }
731
732 // only after sending the allocation result can we skip to the end in the allocation error case
733 if (space_ids == NULL) {
734 IPC_ERROR(s, "Failed to allocate space for receiving spaces ids");
735 xret = XRT_ERROR_ALLOCATION;
736 goto out_locate_spaces;
737 }
738
739 xret = ipc_receive(imc, space_ids, space_count * sizeof(uint32_t));
740 if (xret != XRT_SUCCESS) {
741 IPC_ERROR(ics->server, "Failed to receive spaces ids");
742 // assume early abort is possible, i.e. client will not send more data for this request
743 goto out_locate_spaces;
744 }
745
746 xret = ipc_receive(imc, offsets, space_count * sizeof(struct xrt_pose));
747 if (xret != XRT_SUCCESS) {
748 IPC_ERROR(ics->server, "Failed to receive spaces offsets");
749 // assume early abort is possible, i.e. client will not send more data for this request
750 goto out_locate_spaces;
751 }
752
753 xret = validate_space_id(ics, base_space_id, &base_space);
754 if (xret != XRT_SUCCESS) {
755 U_LOG_E("Invalid base_space_id %d!", base_space_id);
756 // Client is receiving out_relations now, it will get xret on this receive.
757 goto out_locate_spaces;
758 }
759
760 for (uint32_t i = 0; i < space_count; i++) {
761 if (space_ids[i] == UINT32_MAX) {
762 xspaces[i] = NULL;
763 } else {
764 xret = validate_space_id(ics, space_ids[i], &xspaces[i]);
765 if (xret != XRT_SUCCESS) {
766 U_LOG_E("Invalid space_id space_ids[%d] = %d!", i, space_ids[i]);
767 // Client is receiving out_relations now, it will get xret on this receive.
768 goto out_locate_spaces;
769 }
770 }
771 }
772 xret = xrt_space_overseer_locate_spaces( //
773 xso, //
774 base_space, //
775 base_offset, //
776 at_timestamp, //
777 xspaces, //
778 space_count, //
779 offsets, //
780 out_relations); //
781
782 xret = ipc_send(imc, out_relations, sizeof(struct xrt_space_relation) * space_count);
783 if (xret != XRT_SUCCESS) {
784 IPC_ERROR(ics->server, "Failed to send spaces relations");
785 // Nothing else we can do
786 goto out_locate_spaces;
787 }
788
789out_locate_spaces:
790 free(xspaces);
791 free(offsets);
792 free(out_relations);
793 os_mutex_unlock(&ics->server->global_state.lock);
794 return xret;
795}
796
797xrt_result_t
798ipc_handle_space_locate_device(volatile struct ipc_client_state *ics,
799 uint32_t base_space_id,
800 const struct xrt_pose *base_offset,
801 int64_t at_timestamp,
802 uint32_t xdev_id,
803 struct xrt_space_relation *out_relation)
804{
805 IPC_TRACE_MARKER();
806
807 struct xrt_space_overseer *xso = ics->server->xso;
808 struct xrt_space *base_space = NULL;
809 struct xrt_device *xdev = NULL;
810 xrt_result_t xret;
811
812 xret = validate_space_id(ics, base_space_id, &base_space);
813 if (xret != XRT_SUCCESS) {
814 U_LOG_E("Invalid base_space_id!");
815 return xret;
816 }
817
818 xret = validate_device_id(ics, xdev_id, &xdev);
819 if (xret != XRT_SUCCESS) {
820 U_LOG_E("Invalid device_id!");
821 return xret;
822 }
823
824 return xrt_space_overseer_locate_device( //
825 xso, //
826 base_space, //
827 base_offset, //
828 at_timestamp, //
829 xdev, //
830 out_relation); //
831}
832
833xrt_result_t
834ipc_handle_space_destroy(volatile struct ipc_client_state *ics, uint32_t space_id)
835{
836 struct xrt_space *xs = NULL;
837 xrt_result_t xret;
838
839 xret = validate_space_id(ics, space_id, &xs);
840 if (xret != XRT_SUCCESS) {
841 U_LOG_E("Invalid space_id!");
842 return xret;
843 }
844
845 assert(xs != NULL);
846 xs = NULL;
847
848 // Remove volatile
849 struct xrt_space **xs_ptr = (struct xrt_space **)&ics->xspcs[space_id];
850 xrt_space_reference(xs_ptr, NULL);
851
852 if (space_id == ics->local_space_index) {
853 struct xrt_space **xslocal_ptr =
854 (struct xrt_space **)&ics->server->xso->localspace[ics->local_space_overseer_index];
855 xrt_space_reference(xslocal_ptr, NULL);
856 }
857
858 if (space_id == ics->local_floor_space_index) {
859 struct xrt_space **xslocalfloor_ptr =
860 (struct xrt_space **)&ics->server->xso->localfloorspace[ics->local_floor_space_overseer_index];
861 xrt_space_reference(xslocalfloor_ptr, NULL);
862 }
863
864 return XRT_SUCCESS;
865}
866
867xrt_result_t
868ipc_handle_space_mark_ref_space_in_use(volatile struct ipc_client_state *ics, enum xrt_reference_space_type type)
869{
870 struct xrt_space_overseer *xso = ics->server->xso;
871 xrt_result_t xret;
872
873 xret = validate_reference_space_type(ics, type);
874 if (xret != XRT_SUCCESS) {
875 return XRT_ERROR_IPC_FAILURE;
876 }
877
878 // Is this space already used?
879 if (ics->ref_space_used[type]) {
880 IPC_ERROR(ics->server, "Space '%u' already used!", type);
881 return XRT_ERROR_IPC_FAILURE;
882 }
883
884 xret = xrt_space_overseer_ref_space_inc(xso, type);
885 if (xret != XRT_SUCCESS) {
886 IPC_ERROR(ics->server, "xrt_space_overseer_ref_space_inc failed");
887 return xret;
888 }
889
890 // Can now mark it as used.
891 ics->ref_space_used[type] = true;
892
893 return XRT_SUCCESS;
894}
895
896xrt_result_t
897ipc_handle_space_unmark_ref_space_in_use(volatile struct ipc_client_state *ics, enum xrt_reference_space_type type)
898{
899 struct xrt_space_overseer *xso = ics->server->xso;
900 xrt_result_t xret;
901
902 xret = validate_reference_space_type(ics, type);
903 if (xret != XRT_SUCCESS) {
904 return XRT_ERROR_IPC_FAILURE;
905 }
906
907 if (!ics->ref_space_used[type]) {
908 IPC_ERROR(ics->server, "Space '%u' not used!", type);
909 return XRT_ERROR_IPC_FAILURE;
910 }
911
912 xret = xrt_space_overseer_ref_space_dec(xso, type);
913 if (xret != XRT_SUCCESS) {
914 IPC_ERROR(ics->server, "xrt_space_overseer_ref_space_dec failed");
915 return xret;
916 }
917
918 // Now we can mark it as not used.
919 ics->ref_space_used[type] = false;
920
921 return XRT_SUCCESS;
922}
923
924xrt_result_t
925ipc_handle_space_recenter_local_spaces(volatile struct ipc_client_state *ics)
926{
927 struct xrt_space_overseer *xso = ics->server->xso;
928
929 return xrt_space_overseer_recenter_local_spaces(xso);
930}
931
932xrt_result_t
933ipc_handle_space_get_tracking_origin_offset(volatile struct ipc_client_state *ics,
934 uint32_t origin_id,
935 struct xrt_pose *out_offset)
936{
937 struct xrt_space_overseer *xso = ics->server->xso;
938 struct xrt_tracking_origin *xto;
939 xrt_result_t xret = validate_origin_id(ics, origin_id, &xto);
940 if (xret != XRT_SUCCESS) {
941 return xret;
942 }
943 return xrt_space_overseer_get_tracking_origin_offset(xso, xto, out_offset);
944}
945
946xrt_result_t
947ipc_handle_space_set_tracking_origin_offset(volatile struct ipc_client_state *ics,
948 uint32_t origin_id,
949 const struct xrt_pose *offset)
950{
951 struct xrt_space_overseer *xso = ics->server->xso;
952 struct xrt_tracking_origin *xto;
953 xrt_result_t xret = validate_origin_id(ics, origin_id, &xto);
954 if (xret != XRT_SUCCESS) {
955 return xret;
956 }
957 return xrt_space_overseer_set_tracking_origin_offset(xso, xto, offset);
958}
959
960xrt_result_t
961ipc_handle_space_get_reference_space_offset(volatile struct ipc_client_state *ics,
962 enum xrt_reference_space_type type,
963 struct xrt_pose *out_offset)
964{
965 struct xrt_space_overseer *xso = ics->server->xso;
966 return xrt_space_overseer_get_reference_space_offset(xso, type, out_offset);
967}
968
969xrt_result_t
970ipc_handle_space_set_reference_space_offset(volatile struct ipc_client_state *ics,
971 enum xrt_reference_space_type type,
972 const struct xrt_pose *offset)
973{
974 struct xrt_space_overseer *xso = ics->server->xso;
975 return xrt_space_overseer_set_reference_space_offset(xso, type, offset);
976}
977
978xrt_result_t
979ipc_handle_compositor_get_info(volatile struct ipc_client_state *ics, struct xrt_compositor_info *out_info)
980{
981 IPC_TRACE_MARKER();
982
983 if (ics->xc == NULL) {
984 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
985 }
986
987 *out_info = ics->xc->info;
988
989 return XRT_SUCCESS;
990}
991
992xrt_result_t
993ipc_handle_compositor_predict_frame(volatile struct ipc_client_state *ics,
994 int64_t *out_frame_id,
995 int64_t *out_wake_up_time_ns,
996 int64_t *out_predicted_display_time_ns,
997 int64_t *out_predicted_display_period_ns)
998{
999 IPC_TRACE_MARKER();
1000
1001 if (ics->xc == NULL) {
1002 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1003 }
1004
1005 /*
1006 * We use this to signal that the session has started, this is needed
1007 * to make this client/session active/visible/focused.
1008 */
1009 ipc_server_activate_session(ics);
1010
1011 int64_t gpu_time_ns = 0;
1012 return xrt_comp_predict_frame( //
1013 ics->xc, //
1014 out_frame_id, //
1015 out_wake_up_time_ns, //
1016 &gpu_time_ns, //
1017 out_predicted_display_time_ns, //
1018 out_predicted_display_period_ns); //
1019}
1020
1021xrt_result_t
1022ipc_handle_compositor_wait_woke(volatile struct ipc_client_state *ics, int64_t frame_id)
1023{
1024 IPC_TRACE_MARKER();
1025
1026 if (ics->xc == NULL) {
1027 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1028 }
1029
1030 return xrt_comp_mark_frame(ics->xc, frame_id, XRT_COMPOSITOR_FRAME_POINT_WOKE, os_monotonic_get_ns());
1031}
1032
1033xrt_result_t
1034ipc_handle_compositor_begin_frame(volatile struct ipc_client_state *ics, int64_t frame_id)
1035{
1036 IPC_TRACE_MARKER();
1037
1038 if (ics->xc == NULL) {
1039 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1040 }
1041
1042 return xrt_comp_begin_frame(ics->xc, frame_id);
1043}
1044
1045xrt_result_t
1046ipc_handle_compositor_discard_frame(volatile struct ipc_client_state *ics, int64_t frame_id)
1047{
1048 IPC_TRACE_MARKER();
1049
1050 if (ics->xc == NULL) {
1051 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1052 }
1053
1054 return xrt_comp_discard_frame(ics->xc, frame_id);
1055}
1056
1057xrt_result_t
1058ipc_handle_compositor_get_display_refresh_rate(volatile struct ipc_client_state *ics,
1059 float *out_display_refresh_rate_hz)
1060{
1061 IPC_TRACE_MARKER();
1062
1063 if (ics->xc == NULL) {
1064 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1065 }
1066
1067 return xrt_comp_get_display_refresh_rate(ics->xc, out_display_refresh_rate_hz);
1068}
1069
1070xrt_result_t
1071ipc_handle_compositor_request_display_refresh_rate(volatile struct ipc_client_state *ics, float display_refresh_rate_hz)
1072{
1073 IPC_TRACE_MARKER();
1074
1075 if (ics->xc == NULL) {
1076 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1077 }
1078
1079 return xrt_comp_request_display_refresh_rate(ics->xc, display_refresh_rate_hz);
1080}
1081
1082xrt_result_t
1083ipc_handle_compositor_set_performance_level(volatile struct ipc_client_state *ics,
1084 enum xrt_perf_domain domain,
1085 enum xrt_perf_set_level level)
1086{
1087 IPC_TRACE_MARKER();
1088
1089 if (ics->xc == NULL) {
1090 return XRT_ERROR_IPC_COMPOSITOR_NOT_CREATED;
1091 }
1092
1093 if (ics->xc->set_performance_level == NULL) {
1094 return XRT_ERROR_IPC_FAILURE;
1095 }
1096
1097 return xrt_comp_set_performance_level(ics->xc, domain, level);
1098}
1099
1100static bool
1101_update_projection_layer(struct xrt_compositor *xc,
1102 volatile struct ipc_client_state *ics,
1103 volatile struct ipc_layer_entry *layer,
1104 uint32_t i)
1105{
1106 // xdev
1107 uint32_t device_id = layer->xdev_id;
1108 struct xrt_device *xdev = NULL;
1109 GET_XDEV_OR_RETURN(ics, device_id, xdev);
1110
1111 if (xdev == NULL) {
1112 U_LOG_E("Invalid xdev for projection layer!");
1113 return false;
1114 }
1115
1116 uint32_t view_count = xdev->hmd->view_count;
1117
1118 struct xrt_swapchain *xcs[XRT_MAX_VIEWS];
1119 for (uint32_t k = 0; k < view_count; k++) {
1120 const uint32_t xsci = layer->swapchain_ids[k];
1121 xcs[k] = ics->xscs[xsci];
1122 if (xcs[k] == NULL) {
1123 U_LOG_E("Invalid swap chain for projection layer!");
1124 return false;
1125 }
1126 }
1127
1128
1129 // Cast away volatile.
1130 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
1131
1132 xrt_comp_layer_projection(xc, xdev, xcs, data);
1133
1134 return true;
1135}
1136
1137static bool
1138_update_projection_layer_depth(struct xrt_compositor *xc,
1139 volatile struct ipc_client_state *ics,
1140 volatile struct ipc_layer_entry *layer,
1141 uint32_t i)
1142{
1143 // xdev
1144 uint32_t xdevi = layer->xdev_id;
1145
1146 // Cast away volatile.
1147 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
1148
1149 struct xrt_device *xdev = NULL;
1150 GET_XDEV_OR_RETURN(ics, xdevi, xdev);
1151 if (xdev == NULL) {
1152 U_LOG_E("Invalid xdev for projection layer #%u!", i);
1153 return false;
1154 }
1155
1156 struct xrt_swapchain *xcs[XRT_MAX_VIEWS];
1157 struct xrt_swapchain *d_xcs[XRT_MAX_VIEWS];
1158
1159 for (uint32_t j = 0; j < data->view_count; j++) {
1160 int xsci = layer->swapchain_ids[j];
1161 int d_xsci = layer->swapchain_ids[j + data->view_count];
1162
1163 xcs[j] = ics->xscs[xsci];
1164 d_xcs[j] = ics->xscs[d_xsci];
1165 if (xcs[j] == NULL || d_xcs[j] == NULL) {
1166 U_LOG_E("Invalid swap chain for projection layer #%u!", i);
1167 return false;
1168 }
1169 }
1170
1171 xrt_comp_layer_projection_depth(xc, xdev, xcs, d_xcs, data);
1172
1173 return true;
1174}
1175
1176static bool
1177do_single(struct xrt_compositor *xc,
1178 volatile struct ipc_client_state *ics,
1179 volatile struct ipc_layer_entry *layer,
1180 uint32_t i,
1181 const char *name,
1182 struct xrt_device **out_xdev,
1183 struct xrt_swapchain **out_xcs,
1184 struct xrt_layer_data **out_data)
1185{
1186 uint32_t device_id = layer->xdev_id;
1187 uint32_t sci = layer->swapchain_ids[0];
1188
1189 struct xrt_device *xdev = NULL;
1190 GET_XDEV_OR_RETURN(ics, device_id, xdev);
1191 struct xrt_swapchain *xcs = ics->xscs[sci];
1192
1193 if (xcs == NULL) {
1194 U_LOG_E("Invalid swapchain for layer #%u, '%s'!", i, name);
1195 return false;
1196 }
1197
1198 if (xdev == NULL) {
1199 U_LOG_E("Invalid xdev for layer #%u, '%s'!", i, name);
1200 return false;
1201 }
1202
1203 // Cast away volatile.
1204 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
1205
1206 *out_xdev = xdev;
1207 *out_xcs = xcs;
1208 *out_data = data;
1209
1210 return true;
1211}
1212
1213static bool
1214_update_quad_layer(struct xrt_compositor *xc,
1215 volatile struct ipc_client_state *ics,
1216 volatile struct ipc_layer_entry *layer,
1217 uint32_t i)
1218{
1219 struct xrt_device *xdev;
1220 struct xrt_swapchain *xcs;
1221 struct xrt_layer_data *data;
1222
1223 if (!do_single(xc, ics, layer, i, "quad", &xdev, &xcs, &data)) {
1224 return false;
1225 }
1226
1227 xrt_comp_layer_quad(xc, xdev, xcs, data);
1228
1229 return true;
1230}
1231
1232static bool
1233_update_cube_layer(struct xrt_compositor *xc,
1234 volatile struct ipc_client_state *ics,
1235 volatile struct ipc_layer_entry *layer,
1236 uint32_t i)
1237{
1238 struct xrt_device *xdev;
1239 struct xrt_swapchain *xcs;
1240 struct xrt_layer_data *data;
1241
1242 if (!do_single(xc, ics, layer, i, "cube", &xdev, &xcs, &data)) {
1243 return false;
1244 }
1245
1246 xrt_comp_layer_cube(xc, xdev, xcs, data);
1247
1248 return true;
1249}
1250
1251static bool
1252_update_cylinder_layer(struct xrt_compositor *xc,
1253 volatile struct ipc_client_state *ics,
1254 volatile struct ipc_layer_entry *layer,
1255 uint32_t i)
1256{
1257 struct xrt_device *xdev;
1258 struct xrt_swapchain *xcs;
1259 struct xrt_layer_data *data;
1260
1261 if (!do_single(xc, ics, layer, i, "cylinder", &xdev, &xcs, &data)) {
1262 return false;
1263 }
1264
1265 xrt_comp_layer_cylinder(xc, xdev, xcs, data);
1266
1267 return true;
1268}
1269
1270static bool
1271_update_equirect1_layer(struct xrt_compositor *xc,
1272 volatile struct ipc_client_state *ics,
1273 volatile struct ipc_layer_entry *layer,
1274 uint32_t i)
1275{
1276 struct xrt_device *xdev;
1277 struct xrt_swapchain *xcs;
1278 struct xrt_layer_data *data;
1279
1280 if (!do_single(xc, ics, layer, i, "equirect1", &xdev, &xcs, &data)) {
1281 return false;
1282 }
1283
1284 xrt_comp_layer_equirect1(xc, xdev, xcs, data);
1285
1286 return true;
1287}
1288
1289static bool
1290_update_equirect2_layer(struct xrt_compositor *xc,
1291 volatile struct ipc_client_state *ics,
1292 volatile struct ipc_layer_entry *layer,
1293 uint32_t i)
1294{
1295 struct xrt_device *xdev;
1296 struct xrt_swapchain *xcs;
1297 struct xrt_layer_data *data;
1298
1299 if (!do_single(xc, ics, layer, i, "equirect2", &xdev, &xcs, &data)) {
1300 return false;
1301 }
1302
1303 xrt_comp_layer_equirect2(xc, xdev, xcs, data);
1304
1305 return true;
1306}
1307
1308static bool
1309_update_passthrough_layer(struct xrt_compositor *xc,
1310 volatile struct ipc_client_state *ics,
1311 volatile struct ipc_layer_entry *layer,
1312 uint32_t i)
1313{
1314 // xdev
1315 uint32_t xdevi = layer->xdev_id;
1316
1317 struct xrt_device *xdev = NULL;
1318 GET_XDEV_OR_RETURN(ics, xdevi, xdev);
1319
1320 if (xdev == NULL) {
1321 U_LOG_E("Invalid xdev for passthrough layer #%u!", i);
1322 return false;
1323 }
1324
1325 // Cast away volatile.
1326 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
1327
1328 xrt_comp_layer_passthrough(xc, xdev, data);
1329
1330 return true;
1331}
1332
1333static bool
1334_update_layers(volatile struct ipc_client_state *ics, struct xrt_compositor *xc, struct ipc_layer_slot *slot)
1335{
1336 IPC_TRACE_MARKER();
1337
1338 for (uint32_t i = 0; i < slot->layer_count; i++) {
1339 volatile struct ipc_layer_entry *layer = &slot->layers[i];
1340
1341 switch (layer->data.type) {
1342 case XRT_LAYER_PROJECTION:
1343 if (!_update_projection_layer(xc, ics, layer, i)) {
1344 return false;
1345 }
1346 break;
1347 case XRT_LAYER_PROJECTION_DEPTH:
1348 if (!_update_projection_layer_depth(xc, ics, layer, i)) {
1349 return false;
1350 }
1351 break;
1352 case XRT_LAYER_QUAD:
1353 if (!_update_quad_layer(xc, ics, layer, i)) {
1354 return false;
1355 }
1356 break;
1357 case XRT_LAYER_CUBE:
1358 if (!_update_cube_layer(xc, ics, layer, i)) {
1359 return false;
1360 }
1361 break;
1362 case XRT_LAYER_CYLINDER:
1363 if (!_update_cylinder_layer(xc, ics, layer, i)) {
1364 return false;
1365 }
1366 break;
1367 case XRT_LAYER_EQUIRECT1:
1368 if (!_update_equirect1_layer(xc, ics, layer, i)) {
1369 return false;
1370 }
1371 break;
1372 case XRT_LAYER_EQUIRECT2:
1373 if (!_update_equirect2_layer(xc, ics, layer, i)) {
1374 return false;
1375 }
1376 break;
1377 case XRT_LAYER_PASSTHROUGH:
1378 if (!_update_passthrough_layer(xc, ics, layer, i)) {
1379 return false;
1380 }
1381 break;
1382 default: U_LOG_E("Unhandled layer type '%i'!", layer->data.type); break;
1383 }
1384 }
1385
1386 return true;
1387}
1388
1389xrt_result_t
1390ipc_handle_compositor_layer_sync(volatile struct ipc_client_state *ics,
1391 uint32_t slot_id,
1392 uint32_t *out_free_slot_id,
1393 const xrt_graphics_sync_handle_t *handles,
1394 const uint32_t handle_count)
1395{
1396 IPC_TRACE_MARKER();
1397
1398 if (ics->xc == NULL) {
1399 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1400 }
1401
1402 struct ipc_shared_memory *ism = get_ism(ics);
1403 struct ipc_layer_slot *slot = &ism->slots[slot_id];
1404 xrt_graphics_sync_handle_t sync_handle = XRT_GRAPHICS_SYNC_HANDLE_INVALID;
1405
1406 // If we have one or more save the first handle.
1407 if (handle_count >= 1) {
1408 sync_handle = handles[0];
1409 }
1410
1411 // Free all sync handles after the first one.
1412 for (uint32_t i = 1; i < handle_count; i++) {
1413 // Checks for valid handle.
1414 xrt_graphics_sync_handle_t tmp = handles[i];
1415 u_graphics_sync_unref(&tmp);
1416 }
1417
1418 // Copy current slot data.
1419 struct ipc_layer_slot copy = *slot;
1420
1421
1422 /*
1423 * Transfer data to underlying compositor.
1424 */
1425
1426 xrt_comp_layer_begin(ics->xc, ©.data);
1427
1428 _update_layers(ics, ics->xc, ©);
1429
1430 xrt_comp_layer_commit(ics->xc, sync_handle);
1431
1432
1433 /*
1434 * Manage shared state.
1435 */
1436
1437 os_mutex_lock(&ics->server->global_state.lock);
1438
1439 *out_free_slot_id = (ics->server->current_slot_index + 1) % IPC_MAX_SLOTS;
1440 ics->server->current_slot_index = *out_free_slot_id;
1441
1442 os_mutex_unlock(&ics->server->global_state.lock);
1443
1444 return XRT_SUCCESS;
1445}
1446
1447xrt_result_t
1448ipc_handle_compositor_layer_sync_with_semaphore(volatile struct ipc_client_state *ics,
1449 uint32_t slot_id,
1450 uint32_t semaphore_id,
1451 uint64_t semaphore_value,
1452 uint32_t *out_free_slot_id)
1453{
1454 IPC_TRACE_MARKER();
1455
1456 if (ics->xc == NULL) {
1457 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1458 }
1459 if (semaphore_id >= IPC_MAX_CLIENT_SEMAPHORES) {
1460 IPC_ERROR(ics->server, "Invalid semaphore_id");
1461 return XRT_ERROR_IPC_FAILURE;
1462 }
1463 if (ics->xcsems[semaphore_id] == NULL) {
1464 IPC_ERROR(ics->server, "Semaphore of id %u not created!", semaphore_id);
1465 return XRT_ERROR_IPC_FAILURE;
1466 }
1467
1468 struct xrt_compositor_semaphore *xcsem = ics->xcsems[semaphore_id];
1469
1470 struct ipc_shared_memory *ism = get_ism(ics);
1471 struct ipc_layer_slot *slot = &ism->slots[slot_id];
1472
1473 // Copy current slot data.
1474 struct ipc_layer_slot copy = *slot;
1475
1476
1477
1478 /*
1479 * Transfer data to underlying compositor.
1480 */
1481
1482 xrt_comp_layer_begin(ics->xc, ©.data);
1483
1484 _update_layers(ics, ics->xc, ©);
1485
1486 xrt_comp_layer_commit_with_semaphore(ics->xc, xcsem, semaphore_value);
1487
1488
1489 /*
1490 * Manage shared state.
1491 */
1492
1493 os_mutex_lock(&ics->server->global_state.lock);
1494
1495 *out_free_slot_id = (ics->server->current_slot_index + 1) % IPC_MAX_SLOTS;
1496 ics->server->current_slot_index = *out_free_slot_id;
1497
1498 os_mutex_unlock(&ics->server->global_state.lock);
1499
1500 return XRT_SUCCESS;
1501}
1502
1503xrt_result_t
1504ipc_handle_compositor_create_passthrough(volatile struct ipc_client_state *ics,
1505 const struct xrt_passthrough_create_info *info)
1506{
1507 IPC_TRACE_MARKER();
1508
1509 if (ics->xc == NULL) {
1510 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1511 }
1512
1513 return xrt_comp_create_passthrough(ics->xc, info);
1514}
1515
1516xrt_result_t
1517ipc_handle_compositor_create_passthrough_layer(volatile struct ipc_client_state *ics,
1518 const struct xrt_passthrough_layer_create_info *info)
1519{
1520 IPC_TRACE_MARKER();
1521
1522 if (ics->xc == NULL) {
1523 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1524 }
1525
1526 return xrt_comp_create_passthrough_layer(ics->xc, info);
1527}
1528
1529xrt_result_t
1530ipc_handle_compositor_destroy_passthrough(volatile struct ipc_client_state *ics)
1531{
1532 IPC_TRACE_MARKER();
1533
1534 if (ics->xc == NULL) {
1535 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1536 }
1537
1538 xrt_comp_destroy_passthrough(ics->xc);
1539
1540 return XRT_SUCCESS;
1541}
1542
1543xrt_result_t
1544ipc_handle_compositor_set_thread_hint(volatile struct ipc_client_state *ics,
1545 enum xrt_thread_hint hint,
1546 uint32_t thread_id)
1547
1548{
1549 IPC_TRACE_MARKER();
1550
1551 if (ics->xc == NULL) {
1552 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1553 }
1554
1555 return xrt_comp_set_thread_hint(ics->xc, hint, thread_id);
1556}
1557
1558xrt_result_t
1559ipc_handle_compositor_get_reference_bounds_rect(volatile struct ipc_client_state *ics,
1560 enum xrt_reference_space_type reference_space_type,
1561 struct xrt_vec2 *bounds)
1562{
1563 IPC_TRACE_MARKER();
1564
1565 if (ics->xc == NULL) {
1566 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1567 }
1568
1569 return xrt_comp_get_reference_bounds_rect(ics->xc, reference_space_type, bounds);
1570}
1571
1572xrt_result_t
1573ipc_handle_system_get_clients(volatile struct ipc_client_state *_ics, struct ipc_client_list *list)
1574{
1575 struct ipc_server *s = _ics->server;
1576
1577 // Look client list.
1578 os_mutex_lock(&s->global_state.lock);
1579
1580 uint32_t count = 0;
1581 for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) {
1582
1583 volatile struct ipc_client_state *ics = &s->threads[i].ics;
1584
1585 // Is this thread running?
1586 if (ics->server_thread_index < 0) {
1587 continue;
1588 }
1589
1590 list->ids[count++] = ics->client_state.id;
1591 }
1592
1593 list->id_count = count;
1594
1595 // Unlock now.
1596 os_mutex_unlock(&s->global_state.lock);
1597
1598 return XRT_SUCCESS;
1599}
1600
1601xrt_result_t
1602ipc_handle_system_get_properties(volatile struct ipc_client_state *_ics, struct xrt_system_properties *out_properties)
1603{
1604 struct ipc_server *s = _ics->server;
1605
1606 return ipc_server_get_system_properties(s, out_properties);
1607}
1608
1609xrt_result_t
1610ipc_handle_system_get_client_info(volatile struct ipc_client_state *_ics,
1611 uint32_t client_id,
1612 struct ipc_app_state *out_ias)
1613{
1614 struct ipc_server *s = _ics->server;
1615
1616 return ipc_server_get_client_app_state(s, client_id, out_ias);
1617}
1618
1619xrt_result_t
1620ipc_handle_system_set_primary_client(volatile struct ipc_client_state *_ics, uint32_t client_id)
1621{
1622 struct ipc_server *s = _ics->server;
1623
1624 IPC_INFO(s, "System setting active client to %d.", client_id);
1625
1626 return ipc_server_set_active_client(s, client_id);
1627}
1628
1629xrt_result_t
1630ipc_handle_system_set_focused_client(volatile struct ipc_client_state *ics, uint32_t client_id)
1631{
1632 IPC_INFO(ics->server, "UNIMPLEMENTED: system setting focused client to %d.", client_id);
1633
1634 return XRT_SUCCESS;
1635}
1636
1637xrt_result_t
1638ipc_handle_system_toggle_io_client(volatile struct ipc_client_state *_ics, uint32_t client_id)
1639{
1640 struct ipc_server *s = _ics->server;
1641
1642 IPC_INFO(s, "System toggling io for client %u.", client_id);
1643
1644 return ipc_server_toggle_io_client(s, client_id);
1645}
1646
1647xrt_result_t
1648ipc_handle_swapchain_get_properties(volatile struct ipc_client_state *ics,
1649 const struct xrt_swapchain_create_info *info,
1650 struct xrt_swapchain_create_properties *xsccp)
1651{
1652 IPC_TRACE_MARKER();
1653
1654 if (ics->xc == NULL) {
1655 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1656 }
1657
1658 return xrt_comp_get_swapchain_create_properties(ics->xc, info, xsccp);
1659}
1660
1661xrt_result_t
1662ipc_handle_swapchain_create(volatile struct ipc_client_state *ics,
1663 const struct xrt_swapchain_create_info *info,
1664 uint32_t *out_id,
1665 uint32_t *out_image_count,
1666 uint64_t *out_size,
1667 bool *out_use_dedicated_allocation,
1668 uint32_t max_handle_capacity,
1669 xrt_graphics_buffer_handle_t *out_handles,
1670 uint32_t *out_handle_count)
1671{
1672 IPC_TRACE_MARKER();
1673
1674 xrt_result_t xret = XRT_SUCCESS;
1675 uint32_t index = 0;
1676
1677 xret = validate_swapchain_state(ics, &index);
1678 if (xret != XRT_SUCCESS) {
1679 return xret;
1680 }
1681
1682 // Create the swapchain
1683 struct xrt_swapchain *xsc = NULL; // Has to be NULL.
1684 xret = xrt_comp_create_swapchain(ics->xc, info, &xsc);
1685 if (xret != XRT_SUCCESS) {
1686 if (xret == XRT_ERROR_SWAPCHAIN_FLAG_VALID_BUT_UNSUPPORTED) {
1687 IPC_WARN(ics->server,
1688 "xrt_comp_create_swapchain: Attempted to create valid, but unsupported swapchain");
1689 } else {
1690 IPC_ERROR(ics->server, "Error xrt_comp_create_swapchain failed!");
1691 }
1692 return xret;
1693 }
1694
1695 // It's now safe to increment the number of swapchains.
1696 ics->swapchain_count++;
1697
1698 IPC_TRACE(ics->server, "Created swapchain %d.", index);
1699
1700 set_swapchain_info(ics, index, info, xsc);
1701
1702 // return our result to the caller.
1703 struct xrt_swapchain_native *xscn = (struct xrt_swapchain_native *)xsc;
1704
1705 // Limit checking
1706 assert(xsc->image_count <= XRT_MAX_SWAPCHAIN_IMAGES);
1707 assert(xsc->image_count <= max_handle_capacity);
1708
1709 for (size_t i = 1; i < xsc->image_count; i++) {
1710 assert(xscn->images[0].size == xscn->images[i].size);
1711 assert(xscn->images[0].use_dedicated_allocation == xscn->images[i].use_dedicated_allocation);
1712 }
1713
1714 // Assuming all images allocated in the same swapchain have the same allocation requirements.
1715 *out_size = xscn->images[0].size;
1716 *out_use_dedicated_allocation = xscn->images[0].use_dedicated_allocation;
1717 *out_id = index;
1718 *out_image_count = xsc->image_count;
1719
1720 // Setup the fds.
1721 *out_handle_count = xsc->image_count;
1722 for (size_t i = 0; i < xsc->image_count; i++) {
1723 out_handles[i] = xscn->images[i].handle;
1724 }
1725
1726 return XRT_SUCCESS;
1727}
1728
1729xrt_result_t
1730ipc_handle_swapchain_import(volatile struct ipc_client_state *ics,
1731 const struct xrt_swapchain_create_info *info,
1732 const struct ipc_arg_swapchain_from_native *args,
1733 uint32_t *out_id,
1734 const xrt_graphics_buffer_handle_t *handles,
1735 uint32_t handle_count)
1736{
1737 IPC_TRACE_MARKER();
1738
1739 xrt_result_t xret = XRT_SUCCESS;
1740 uint32_t index = 0;
1741
1742 xret = validate_swapchain_state(ics, &index);
1743 if (xret != XRT_SUCCESS) {
1744 return xret;
1745 }
1746
1747 struct xrt_image_native xins[XRT_MAX_SWAPCHAIN_IMAGES] = XRT_STRUCT_INIT;
1748 for (uint32_t i = 0; i < handle_count; i++) {
1749 xins[i].handle = handles[i];
1750 xins[i].size = args->sizes[i];
1751#if defined(XRT_GRAPHICS_BUFFER_HANDLE_IS_WIN32_HANDLE)
1752 // DXGI handles need to be dealt with differently, they are identified
1753 // by having their lower bit set to 1 during transfer
1754 if ((size_t)xins[i].handle & 1) {
1755 xins[i].handle = (HANDLE)((size_t)xins[i].handle - 1);
1756 xins[i].is_dxgi_handle = true;
1757 }
1758#endif
1759 }
1760
1761 // create the swapchain
1762 struct xrt_swapchain *xsc = NULL;
1763 xret = xrt_comp_import_swapchain(ics->xc, info, xins, handle_count, &xsc);
1764 if (xret != XRT_SUCCESS) {
1765 return xret;
1766 }
1767
1768 // It's now safe to increment the number of swapchains.
1769 ics->swapchain_count++;
1770
1771 IPC_TRACE(ics->server, "Created swapchain %d.", index);
1772
1773 set_swapchain_info(ics, index, info, xsc);
1774 *out_id = index;
1775
1776 return XRT_SUCCESS;
1777}
1778
1779xrt_result_t
1780ipc_handle_swapchain_wait_image(volatile struct ipc_client_state *ics, uint32_t id, int64_t timeout_ns, uint32_t index)
1781{
1782 if (ics->xc == NULL) {
1783 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1784 }
1785
1786 //! @todo Look up the index.
1787 uint32_t sc_index = id;
1788 struct xrt_swapchain *xsc = ics->xscs[sc_index];
1789
1790 return xrt_swapchain_wait_image(xsc, timeout_ns, index);
1791}
1792
1793xrt_result_t
1794ipc_handle_swapchain_acquire_image(volatile struct ipc_client_state *ics, uint32_t id, uint32_t *out_index)
1795{
1796 if (ics->xc == NULL) {
1797 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1798 }
1799
1800 //! @todo Look up the index.
1801 uint32_t sc_index = id;
1802 struct xrt_swapchain *xsc = ics->xscs[sc_index];
1803
1804 xrt_swapchain_acquire_image(xsc, out_index);
1805
1806 return XRT_SUCCESS;
1807}
1808
1809xrt_result_t
1810ipc_handle_swapchain_release_image(volatile struct ipc_client_state *ics, uint32_t id, uint32_t index)
1811{
1812 if (ics->xc == NULL) {
1813 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1814 }
1815
1816 //! @todo Look up the index.
1817 uint32_t sc_index = id;
1818 struct xrt_swapchain *xsc = ics->xscs[sc_index];
1819
1820 xrt_swapchain_release_image(xsc, index);
1821
1822 return XRT_SUCCESS;
1823}
1824
1825xrt_result_t
1826ipc_handle_swapchain_destroy(volatile struct ipc_client_state *ics, uint32_t id)
1827{
1828 if (ics->xc == NULL) {
1829 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1830 }
1831
1832 ics->swapchain_count--;
1833
1834 // Drop our reference, does NULL checking. Cast away volatile.
1835 xrt_swapchain_reference((struct xrt_swapchain **)&ics->xscs[id], NULL);
1836 ics->swapchain_data[id].active = false;
1837
1838 return XRT_SUCCESS;
1839}
1840
1841
1842/*
1843 *
1844 * Compositor semaphore function..
1845 *
1846 */
1847
1848xrt_result_t
1849ipc_handle_compositor_semaphore_create(volatile struct ipc_client_state *ics,
1850 uint32_t *out_id,
1851 uint32_t max_handle_count,
1852 xrt_graphics_sync_handle_t *out_handles,
1853 uint32_t *out_handle_count)
1854{
1855 xrt_result_t xret;
1856
1857 if (ics->xc == NULL) {
1858 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1859 }
1860
1861 int id = 0;
1862 for (; id < IPC_MAX_CLIENT_SEMAPHORES; id++) {
1863 if (ics->xcsems[id] == NULL) {
1864 break;
1865 }
1866 }
1867
1868 if (id == IPC_MAX_CLIENT_SEMAPHORES) {
1869 IPC_ERROR(ics->server, "Too many compositor semaphores alive!");
1870 return XRT_ERROR_IPC_FAILURE;
1871 }
1872
1873 struct xrt_compositor_semaphore *xcsem = NULL;
1874 xrt_graphics_sync_handle_t handle = XRT_GRAPHICS_SYNC_HANDLE_INVALID;
1875
1876 xret = xrt_comp_create_semaphore(ics->xc, &handle, &xcsem);
1877 if (xret != XRT_SUCCESS) {
1878 IPC_ERROR(ics->server, "Failed to create compositor semaphore!");
1879 return xret;
1880 }
1881
1882 // Set it directly, no need to use reference here.
1883 ics->xcsems[id] = xcsem;
1884
1885 // Set out parameters.
1886 *out_id = id;
1887 out_handles[0] = handle;
1888 *out_handle_count = 1;
1889
1890 return XRT_SUCCESS;
1891}
1892
1893xrt_result_t
1894ipc_handle_compositor_semaphore_destroy(volatile struct ipc_client_state *ics, uint32_t id)
1895{
1896 if (ics->xc == NULL) {
1897 return XRT_ERROR_IPC_SESSION_NOT_CREATED;
1898 }
1899
1900 if (ics->xcsems[id] == NULL) {
1901 IPC_ERROR(ics->server, "Client tried to delete non-existent compositor semaphore!");
1902 return XRT_ERROR_IPC_FAILURE;
1903 }
1904
1905 ics->compositor_semaphore_count--;
1906
1907 // Drop our reference, does NULL checking. Cast away volatile.
1908 xrt_compositor_semaphore_reference((struct xrt_compositor_semaphore **)&ics->xcsems[id], NULL);
1909
1910 return XRT_SUCCESS;
1911}
1912
1913
1914/*
1915 *
1916 * Device functions.
1917 *
1918 */
1919
1920xrt_result_t
1921ipc_handle_device_update_input(volatile struct ipc_client_state *ics, uint32_t id)
1922{
1923 // To make the code a bit more readable.
1924 uint32_t device_id = id;
1925 struct ipc_shared_memory *ism = get_ism(ics);
1926 struct ipc_device *idev = get_idev(ics, device_id);
1927 struct xrt_device *xdev = idev->xdev;
1928 struct ipc_shared_device *isdev = &ism->isdevs[device_id];
1929
1930 // Update inputs.
1931 xrt_result_t xret = xrt_device_update_inputs(xdev);
1932 if (xret != XRT_SUCCESS) {
1933 IPC_ERROR(ics->server, "Failed to update input");
1934 return xret;
1935 }
1936
1937 // Copy data into the shared memory.
1938 struct xrt_input *src = xdev->inputs;
1939 struct xrt_input *dst = &ism->inputs[isdev->first_input_index];
1940 size_t size = sizeof(struct xrt_input) * isdev->input_count;
1941
1942 bool io_active = ics->io_active;
1943 if (io_active) {
1944 memcpy(dst, src, size);
1945 } else {
1946 memset(dst, 0, size);
1947
1948 for (uint32_t i = 0; i < isdev->input_count; i++) {
1949 dst[i].name = src[i].name;
1950
1951 // Special case the rotation of the head.
1952 if (dst[i].name == XRT_INPUT_GENERIC_HEAD_POSE) {
1953 dst[i].active = src[i].active;
1954 }
1955 }
1956 }
1957
1958 // Reply.
1959 return XRT_SUCCESS;
1960}
1961
1962static struct xrt_input *
1963find_input(volatile struct ipc_client_state *ics, uint32_t device_id, enum xrt_input_name name)
1964{
1965 struct ipc_shared_memory *ism = get_ism(ics);
1966 struct ipc_shared_device *isdev = &ism->isdevs[device_id];
1967 struct xrt_input *io = &ism->inputs[isdev->first_input_index];
1968
1969 for (uint32_t i = 0; i < isdev->input_count; i++) {
1970 if (io[i].name == name) {
1971 return &io[i];
1972 }
1973 }
1974
1975 return NULL;
1976}
1977
1978xrt_result_t
1979ipc_handle_device_get_tracked_pose(volatile struct ipc_client_state *ics,
1980 uint32_t id,
1981 enum xrt_input_name name,
1982 int64_t at_timestamp,
1983 struct xrt_space_relation *out_relation)
1984{
1985 // To make the code a bit more readable.
1986 uint32_t device_id = id;
1987 struct ipc_device *isdev = &ics->server->idevs[device_id];
1988 struct xrt_device *xdev = isdev->xdev;
1989
1990 // Find the input
1991 struct xrt_input *input = find_input(ics, device_id, name);
1992 if (input == NULL) {
1993 return XRT_ERROR_IPC_FAILURE;
1994 }
1995
1996 // Special case the headpose.
1997 bool disabled = !ics->io_active && name != XRT_INPUT_GENERIC_HEAD_POSE;
1998 bool active_on_client = input->active;
1999
2000 // We have been disabled but the client hasn't called update.
2001 if (disabled && active_on_client) {
2002 U_ZERO(out_relation);
2003 return XRT_SUCCESS;
2004 }
2005
2006 if (disabled || !active_on_client) {
2007 return XRT_ERROR_POSE_NOT_ACTIVE;
2008 }
2009
2010 // Get the pose.
2011 return xrt_device_get_tracked_pose(xdev, name, at_timestamp, out_relation);
2012}
2013
2014xrt_result_t
2015ipc_handle_device_get_hand_tracking(volatile struct ipc_client_state *ics,
2016 uint32_t id,
2017 enum xrt_input_name name,
2018 int64_t at_timestamp,
2019 struct xrt_hand_joint_set *out_value,
2020 int64_t *out_timestamp)
2021{
2022
2023 // To make the code a bit more readable.
2024 uint32_t device_id = id;
2025 struct xrt_device *xdev = NULL;
2026 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2027
2028 // Get the pose.
2029 return xrt_device_get_hand_tracking(xdev, name, at_timestamp, out_value, out_timestamp);
2030}
2031
2032xrt_result_t
2033ipc_handle_device_get_view_poses(volatile struct ipc_client_state *ics,
2034 uint32_t id,
2035 const struct xrt_vec3 *fallback_eye_relation,
2036 int64_t at_timestamp_ns,
2037 enum xrt_view_type view_type,
2038 uint32_t view_count)
2039{
2040 struct ipc_message_channel *imc = (struct ipc_message_channel *)&ics->imc;
2041 struct ipc_device_get_view_poses_reply reply = XRT_STRUCT_INIT;
2042 struct ipc_server *s = ics->server;
2043 xrt_result_t xret;
2044
2045 // To make the code a bit more readable.
2046 uint32_t device_id = id;
2047 struct xrt_device *xdev = NULL;
2048 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2049
2050
2051 if (view_count == 0 || view_count > IPC_MAX_RAW_VIEWS) {
2052 IPC_ERROR(s, "Client asked for zero or too many views! (%u)", view_count);
2053
2054 reply.result = XRT_ERROR_IPC_FAILURE;
2055 // Send the full reply, the client expects it.
2056 return ipc_send(imc, &reply, sizeof(reply));
2057 }
2058
2059 // Data to get.
2060 struct xrt_fov fovs[IPC_MAX_RAW_VIEWS];
2061 struct xrt_pose poses[IPC_MAX_RAW_VIEWS];
2062
2063 reply.result = xrt_device_get_view_poses( //
2064 xdev, //
2065 fallback_eye_relation, //
2066 at_timestamp_ns, //
2067 view_type, //
2068 view_count, //
2069 &reply.head_relation, //
2070 fovs, //
2071 poses); //
2072
2073 /*
2074 * This isn't really needed, but demonstrates the server sending the
2075 * length back in the reply, a common pattern for other functions.
2076 */
2077 reply.view_count = view_count;
2078
2079 /*
2080 * Send the reply first isn't required for functions in general, but it
2081 * will need to match what the client expects. This demonstrates the
2082 * server sending the length back in the reply, a common pattern for
2083 * other functions.
2084 */
2085 xret = ipc_send(imc, &reply, sizeof(reply));
2086 if (xret != XRT_SUCCESS) {
2087 IPC_ERROR(s, "Failed to send reply!");
2088 return xret;
2089 }
2090
2091 // Send the fovs that we got.
2092 xret = ipc_send(imc, fovs, sizeof(struct xrt_fov) * view_count);
2093 if (xret != XRT_SUCCESS) {
2094 IPC_ERROR(s, "Failed to send fovs!");
2095 return xret;
2096 }
2097
2098 // And finally the poses.
2099 xret = ipc_send(imc, poses, sizeof(struct xrt_pose) * view_count);
2100 if (xret != XRT_SUCCESS) {
2101 IPC_ERROR(s, "Failed to send poses!");
2102 return xret;
2103 }
2104
2105 return XRT_SUCCESS;
2106}
2107
2108xrt_result_t
2109ipc_handle_device_get_view_poses_2(volatile struct ipc_client_state *ics,
2110 uint32_t id,
2111 const struct xrt_vec3 *default_eye_relation,
2112 int64_t at_timestamp_ns,
2113 enum xrt_view_type view_type,
2114 uint32_t view_count,
2115 struct ipc_info_get_view_poses_2 *out_info)
2116{
2117 // To make the code a bit more readable.
2118 uint32_t device_id = id;
2119 struct xrt_device *xdev = NULL;
2120 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2121
2122 return xrt_device_get_view_poses( //
2123 xdev, //
2124 default_eye_relation, //
2125 at_timestamp_ns, //
2126 view_type, //
2127 view_count, //
2128 &out_info->head_relation, //
2129 out_info->fovs, //
2130 out_info->poses); //
2131}
2132
2133xrt_result_t
2134ipc_handle_device_compute_distortion(volatile struct ipc_client_state *ics,
2135 uint32_t id,
2136 uint32_t view,
2137 float u,
2138 float v,
2139 struct xrt_uv_triplet *out_triplet)
2140{
2141 // To make the code a bit more readable.
2142 uint32_t device_id = id;
2143 struct xrt_device *xdev = NULL;
2144 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2145
2146 return xrt_device_compute_distortion(xdev, view, u, v, out_triplet);
2147}
2148
2149xrt_result_t
2150ipc_handle_device_begin_plane_detection_ext(volatile struct ipc_client_state *ics,
2151 uint32_t id,
2152 uint64_t plane_detection_id,
2153 uint64_t *out_plane_detection_id)
2154{
2155 // To make the code a bit more readable.
2156 uint32_t device_id = id;
2157 struct xrt_device *xdev = NULL;
2158 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2159
2160 uint64_t new_count = ics->plane_detection_count + 1;
2161
2162 if (new_count > ics->plane_detection_size) {
2163 IPC_TRACE(ics->server, "Plane detections tracking size: %u -> %u", (uint32_t)ics->plane_detection_count,
2164 (uint32_t)new_count);
2165
2166 U_ARRAY_REALLOC_OR_FREE(ics->plane_detection_ids, uint64_t, new_count);
2167 U_ARRAY_REALLOC_OR_FREE(ics->plane_detection_xdev, struct xrt_device *, new_count);
2168 ics->plane_detection_size = new_count;
2169 }
2170
2171 struct xrt_plane_detector_begin_info_ext *begin_info = &get_ism(ics)->plane_begin_info_ext;
2172
2173 enum xrt_result xret =
2174 xrt_device_begin_plane_detection_ext(xdev, begin_info, plane_detection_id, out_plane_detection_id);
2175 if (xret != XRT_SUCCESS) {
2176 IPC_TRACE(ics->server, "xrt_device_begin_plane_detection_ext error: %d", xret);
2177 return xret;
2178 }
2179
2180 if (*out_plane_detection_id != 0) {
2181 uint64_t index = ics->plane_detection_count;
2182 ics->plane_detection_ids[index] = *out_plane_detection_id;
2183 ics->plane_detection_xdev[index] = xdev;
2184 ics->plane_detection_count = new_count;
2185 }
2186
2187 return XRT_SUCCESS;
2188}
2189
2190xrt_result_t
2191ipc_handle_device_destroy_plane_detection_ext(volatile struct ipc_client_state *ics,
2192 uint32_t id,
2193 uint64_t plane_detection_id)
2194{
2195 // To make the code a bit more readable.
2196 uint32_t device_id = id;
2197 struct xrt_device *xdev = NULL;
2198 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2199
2200 enum xrt_result xret = xrt_device_destroy_plane_detection_ext(xdev, plane_detection_id);
2201
2202 // Iterate through plane detection ids. Once found, move every item one slot to the left.
2203 bool compact_right = false;
2204 for (uint32_t i = 0; i < ics->plane_detection_count; i++) {
2205 if (ics->plane_detection_ids[i] == plane_detection_id) {
2206 compact_right = true;
2207 }
2208 if (compact_right && (i + 1) < ics->plane_detection_count) {
2209 ics->plane_detection_ids[i] = ics->plane_detection_ids[i + 1];
2210 ics->plane_detection_xdev[i] = ics->plane_detection_xdev[i + 1];
2211 }
2212 }
2213 // if the plane detection was correctly tracked compact_right should always be true
2214 if (compact_right) {
2215 ics->plane_detection_count -= 1;
2216 } else {
2217 IPC_ERROR(ics->server, "Destroyed plane detection that was not tracked");
2218 }
2219
2220 if (xret != XRT_SUCCESS) {
2221 IPC_ERROR(ics->server, "xrt_device_destroy_plane_detection_ext error: %d", xret);
2222 return xret;
2223 }
2224
2225 return XRT_SUCCESS;
2226}
2227
2228xrt_result_t
2229ipc_handle_device_get_plane_detection_state_ext(volatile struct ipc_client_state *ics,
2230 uint32_t id,
2231 uint64_t plane_detection_id,
2232 enum xrt_plane_detector_state_ext *out_state)
2233{
2234 // To make the code a bit more readable.
2235 uint32_t device_id = id;
2236 struct xrt_device *xdev = NULL;
2237 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2238
2239 xrt_result_t xret = xrt_device_get_plane_detection_state_ext(xdev, plane_detection_id, out_state);
2240 if (xret != XRT_SUCCESS) {
2241 IPC_ERROR(ics->server, "xrt_device_get_plane_detection_state_ext error: %d", xret);
2242 return xret;
2243 }
2244
2245 return XRT_SUCCESS;
2246}
2247
2248xrt_result_t
2249ipc_handle_device_get_plane_detections_ext(volatile struct ipc_client_state *ics,
2250 uint32_t id,
2251 uint64_t plane_detection_id)
2252
2253{
2254 struct ipc_message_channel *imc = (struct ipc_message_channel *)&ics->imc;
2255 struct ipc_device_get_plane_detections_ext_reply reply = XRT_STRUCT_INIT;
2256 struct ipc_server *s = ics->server;
2257
2258 // To make the code a bit more readable.
2259 uint32_t device_id = id;
2260 struct xrt_device *xdev = NULL;
2261 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2262
2263 struct xrt_plane_detections_ext out = {0};
2264
2265 xrt_result_t xret = xrt_device_get_plane_detections_ext(xdev, plane_detection_id, &out);
2266 if (xret != XRT_SUCCESS) {
2267 IPC_ERROR(ics->server, "xrt_device_get_plane_detections_ext error: %d", xret);
2268 // probably nothing allocated on error, but make sure
2269 xrt_plane_detections_ext_clear(&out);
2270 return xret;
2271 }
2272
2273 reply.result = XRT_SUCCESS;
2274 reply.location_size = out.location_count; // because we initialized to 0, now size == count
2275 reply.polygon_size = out.polygon_info_size;
2276 reply.vertex_size = out.vertex_size;
2277
2278 xret = ipc_send(imc, &reply, sizeof(reply));
2279 if (xret != XRT_SUCCESS) {
2280 IPC_ERROR(s, "Failed to send reply!");
2281 goto out;
2282 }
2283
2284 // send expected contents
2285
2286 if (out.location_count > 0) {
2287 xret =
2288 ipc_send(imc, out.locations, sizeof(struct xrt_plane_detector_location_ext) * out.location_count);
2289 if (xret != XRT_SUCCESS) {
2290 IPC_ERROR(s, "Failed to send locations!");
2291 goto out;
2292 }
2293
2294 xret = ipc_send(imc, out.polygon_info_start_index, sizeof(uint32_t) * out.location_count);
2295 if (xret != XRT_SUCCESS) {
2296 IPC_ERROR(s, "Failed to send locations!");
2297 goto out;
2298 }
2299 }
2300
2301 if (out.polygon_info_size > 0) {
2302 xret =
2303 ipc_send(imc, out.polygon_infos, sizeof(struct xrt_plane_polygon_info_ext) * out.polygon_info_size);
2304 if (xret != XRT_SUCCESS) {
2305 IPC_ERROR(s, "Failed to send polygon_infos!");
2306 goto out;
2307 }
2308 }
2309
2310 if (out.vertex_size > 0) {
2311 xret = ipc_send(imc, out.vertices, sizeof(struct xrt_vec2) * out.vertex_size);
2312 if (xret != XRT_SUCCESS) {
2313 IPC_ERROR(s, "Failed to send vertices!");
2314 goto out;
2315 }
2316 }
2317
2318out:
2319 xrt_plane_detections_ext_clear(&out);
2320 return xret;
2321}
2322
2323xrt_result_t
2324ipc_handle_device_get_presence(volatile struct ipc_client_state *ics, uint32_t id, bool *presence)
2325{
2326 struct xrt_device *xdev = NULL;
2327 GET_XDEV_OR_RETURN(ics, id, xdev);
2328 return xrt_device_get_presence(xdev, presence);
2329}
2330
2331xrt_result_t
2332ipc_handle_device_set_output(volatile struct ipc_client_state *ics,
2333 uint32_t id,
2334 enum xrt_output_name name,
2335 const struct xrt_output_value *value)
2336{
2337 // To make the code a bit more readable.
2338 uint32_t device_id = id;
2339 struct xrt_device *xdev = NULL;
2340 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2341
2342 // Set the output.
2343 return xrt_device_set_output(xdev, name, value);
2344}
2345
2346xrt_result_t
2347ipc_handle_device_set_haptic_output(volatile struct ipc_client_state *ics,
2348 uint32_t id,
2349 enum xrt_output_name name,
2350 const struct ipc_pcm_haptic_buffer *buffer)
2351{
2352 IPC_TRACE_MARKER();
2353 struct ipc_message_channel *imc = (struct ipc_message_channel *)&ics->imc;
2354 struct ipc_server *s = ics->server;
2355
2356 xrt_result_t xret;
2357
2358 // To make the code a bit more readable.
2359 uint32_t device_id = id;
2360 struct xrt_device *xdev = NULL;
2361 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2362
2363 os_mutex_lock(&ics->server->global_state.lock);
2364
2365 float *samples = U_TYPED_ARRAY_CALLOC(float, buffer->num_samples);
2366
2367 // send the allocation result
2368 xret = samples ? XRT_SUCCESS : XRT_ERROR_ALLOCATION;
2369 xret = ipc_send(imc, &xret, sizeof xret);
2370 if (xret != XRT_SUCCESS) {
2371 IPC_ERROR(ics->server, "Failed to send samples allocate result");
2372 goto set_haptic_output_end;
2373 }
2374
2375 if (!samples) {
2376 IPC_ERROR(s, "Failed to allocate samples for haptic output");
2377 xret = XRT_ERROR_ALLOCATION;
2378 goto set_haptic_output_end;
2379 }
2380
2381 xret = ipc_receive(imc, samples, sizeof(float) * buffer->num_samples);
2382 if (xret != XRT_SUCCESS) {
2383 IPC_ERROR(s, "Failed to receive samples");
2384 goto set_haptic_output_end;
2385 }
2386
2387 uint32_t samples_consumed;
2388 struct xrt_output_value value = {
2389 .type = XRT_OUTPUT_VALUE_TYPE_PCM_VIBRATION,
2390 .pcm_vibration =
2391 {
2392 .append = buffer->append,
2393 .buffer_size = buffer->num_samples,
2394 .sample_rate = buffer->sample_rate,
2395 .samples_consumed = &samples_consumed,
2396 .buffer = samples,
2397 },
2398 };
2399
2400 // Set the output.
2401 xrt_device_set_output(xdev, name, &value);
2402
2403 xret = ipc_send(imc, &samples_consumed, sizeof samples_consumed);
2404 if (xret != XRT_SUCCESS) {
2405 IPC_ERROR(ics->server, "Failed to send samples consumed");
2406 goto set_haptic_output_end;
2407 }
2408
2409 xret = XRT_SUCCESS;
2410
2411set_haptic_output_end:
2412 os_mutex_unlock(&ics->server->global_state.lock);
2413
2414 free(samples);
2415
2416 return xret;
2417}
2418
2419xrt_result_t
2420ipc_handle_device_get_output_limits(volatile struct ipc_client_state *ics,
2421 uint32_t id,
2422 struct xrt_output_limits *limits)
2423{
2424 // To make the code a bit more readable.
2425 uint32_t device_id = id;
2426 struct xrt_device *xdev = NULL;
2427 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2428
2429 // Set the output.
2430 return xrt_device_get_output_limits(xdev, limits);
2431}
2432
2433xrt_result_t
2434ipc_handle_device_get_visibility_mask(volatile struct ipc_client_state *ics,
2435 uint32_t device_id,
2436 enum xrt_visibility_mask_type type,
2437 uint32_t view_index)
2438{
2439 struct ipc_message_channel *imc = (struct ipc_message_channel *)&ics->imc;
2440 struct ipc_device_get_visibility_mask_reply reply = XRT_STRUCT_INIT;
2441 struct ipc_server *s = ics->server;
2442 xrt_result_t xret;
2443
2444 // @todo verify
2445 struct xrt_device *xdev = NULL;
2446 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2447 struct xrt_visibility_mask *mask = NULL;
2448 if (xdev->get_visibility_mask) {
2449 xret = xrt_device_get_visibility_mask(xdev, type, view_index, &mask);
2450 if (xret != XRT_SUCCESS) {
2451 IPC_ERROR(s, "Failed to get visibility mask");
2452 return xret;
2453 }
2454 } else {
2455 struct xrt_fov fov = xdev->hmd->distortion.fov[view_index];
2456 u_visibility_mask_get_default(type, &fov, &mask);
2457 }
2458
2459 if (mask == NULL) {
2460 IPC_ERROR(s, "Failed to get visibility mask");
2461 reply.mask_size = 0;
2462 } else {
2463 reply.mask_size = xrt_visibility_mask_get_size(mask);
2464 }
2465
2466 xret = ipc_send(imc, &reply, sizeof(reply));
2467 if (xret != XRT_SUCCESS) {
2468 IPC_ERROR(s, "Failed to send reply");
2469 goto out_free;
2470 }
2471
2472 xret = ipc_send(imc, mask, reply.mask_size);
2473 if (xret != XRT_SUCCESS) {
2474 IPC_ERROR(s, "Failed to send mask");
2475 goto out_free;
2476 }
2477
2478out_free:
2479 free(mask);
2480 return xret;
2481}
2482
2483xrt_result_t
2484ipc_handle_device_is_form_factor_available(volatile struct ipc_client_state *ics,
2485 uint32_t id,
2486 enum xrt_form_factor form_factor,
2487 bool *out_available)
2488{
2489 // To make the code a bit more readable.
2490 uint32_t device_id = id;
2491 struct xrt_device *xdev = NULL;
2492 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2493 *out_available = xrt_device_is_form_factor_available(xdev, form_factor);
2494 return XRT_SUCCESS;
2495}
2496
2497xrt_result_t
2498ipc_handle_system_devices_get_roles(volatile struct ipc_client_state *ics, struct xrt_system_roles *out_roles)
2499{
2500 return xrt_system_devices_get_roles(ics->server->xsysd, out_roles);
2501}
2502
2503xrt_result_t
2504ipc_handle_system_devices_begin_feature(volatile struct ipc_client_state *ics, enum xrt_device_feature_type type)
2505{
2506 struct xrt_system_devices *xsysd = ics->server->xsysd;
2507 xrt_result_t xret;
2508
2509 xret = validate_device_feature_type(ics, type);
2510 if (xret != XRT_SUCCESS) {
2511 return XRT_ERROR_IPC_FAILURE;
2512 }
2513
2514 // Is this feature already used?
2515 if (ics->device_feature_used[type]) {
2516 IPC_ERROR(ics->server, "feature '%u' already used!", type);
2517 return XRT_ERROR_IPC_FAILURE;
2518 }
2519
2520 xret = xrt_system_devices_feature_inc(xsysd, type);
2521 if (xret != XRT_SUCCESS) {
2522 IPC_ERROR(ics->server, "xrt_system_devices_feature_inc failed");
2523 return xret;
2524 }
2525
2526 // Can now mark it as used.
2527 ics->device_feature_used[type] = true;
2528
2529 return XRT_SUCCESS;
2530}
2531
2532xrt_result_t
2533ipc_handle_system_devices_end_feature(volatile struct ipc_client_state *ics, enum xrt_device_feature_type type)
2534{
2535 struct xrt_system_devices *xsysd = ics->server->xsysd;
2536 xrt_result_t xret;
2537
2538 xret = validate_device_feature_type(ics, type);
2539 if (xret != XRT_SUCCESS) {
2540 return XRT_ERROR_IPC_FAILURE;
2541 }
2542
2543 if (!ics->device_feature_used[type]) {
2544 IPC_ERROR(ics->server, "feature '%u' not used!", type);
2545 return XRT_ERROR_IPC_FAILURE;
2546 }
2547
2548 xret = xrt_system_devices_feature_dec(xsysd, type);
2549 if (xret != XRT_SUCCESS) {
2550 IPC_ERROR(ics->server, "xrt_system_devices_feature_dec failed");
2551 return xret;
2552 }
2553
2554 // Now we can mark it as not used.
2555 ics->device_feature_used[type] = false;
2556
2557 return XRT_SUCCESS;
2558}
2559
2560xrt_result_t
2561ipc_handle_device_get_face_tracking(volatile struct ipc_client_state *ics,
2562 uint32_t id,
2563 enum xrt_input_name facial_expression_type,
2564 int64_t at_timestamp_ns,
2565 struct xrt_facial_expression_set *out_value)
2566{
2567 const uint32_t device_id = id;
2568 struct xrt_device *xdev = NULL;
2569 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2570 // Get facial expression data.
2571 return xrt_device_get_face_tracking(xdev, facial_expression_type, at_timestamp_ns, out_value);
2572}
2573
2574xrt_result_t
2575ipc_handle_device_device_get_face_calibration_state_android(volatile struct ipc_client_state *ics,
2576 uint32_t id,
2577 bool *out_face_is_calibrated)
2578{
2579 const uint32_t device_id = id;
2580 struct xrt_device *xdev = NULL;
2581 GET_XDEV_OR_RETURN(ics, device_id, xdev);
2582 return xrt_device_get_face_calibration_state_android(xdev, out_face_is_calibrated);
2583}
2584
2585xrt_result_t
2586ipc_handle_device_get_body_skeleton(volatile struct ipc_client_state *ics,
2587 uint32_t id,
2588 enum xrt_input_name body_tracking_type,
2589 struct xrt_body_skeleton *out_value)
2590{
2591 struct xrt_device *xdev = NULL;
2592 GET_XDEV_OR_RETURN(ics, id, xdev);
2593 return xrt_device_get_body_skeleton(xdev, body_tracking_type, out_value);
2594}
2595
2596xrt_result_t
2597ipc_handle_device_get_body_joints(volatile struct ipc_client_state *ics,
2598 uint32_t id,
2599 enum xrt_input_name body_tracking_type,
2600 int64_t desired_timestamp_ns,
2601 struct xrt_body_joint_set *out_value)
2602{
2603 struct xrt_device *xdev = NULL;
2604 GET_XDEV_OR_RETURN(ics, id, xdev);
2605 return xrt_device_get_body_joints(xdev, body_tracking_type, desired_timestamp_ns, out_value);
2606}
2607
2608xrt_result_t
2609ipc_handle_device_reset_body_tracking_calibration_meta(volatile struct ipc_client_state *ics, uint32_t id)
2610{
2611 struct xrt_device *xdev = get_xdev(ics, id);
2612 return xrt_device_reset_body_tracking_calibration_meta(xdev);
2613}
2614
2615xrt_result_t
2616ipc_handle_device_set_body_tracking_calibration_override_meta(volatile struct ipc_client_state *ics,
2617 uint32_t id,
2618 float new_body_height)
2619{
2620 struct xrt_device *xdev = get_xdev(ics, id);
2621 return xrt_device_set_body_tracking_calibration_override_meta(xdev, new_body_height);
2622}
2623
2624xrt_result_t
2625ipc_handle_device_get_battery_status(
2626 volatile struct ipc_client_state *ics, uint32_t id, bool *out_present, bool *out_charging, float *out_charge)
2627{
2628 struct xrt_device *xdev = NULL;
2629 GET_XDEV_OR_RETURN(ics, id, xdev);
2630 return xrt_device_get_battery_status(xdev, out_present, out_charging, out_charge);
2631}
2632
2633xrt_result_t
2634ipc_handle_device_get_brightness(volatile struct ipc_client_state *ics, uint32_t id, float *out_brightness)
2635{
2636 struct xrt_device *xdev = NULL;
2637 GET_XDEV_OR_RETURN(ics, id, xdev);
2638
2639 if (!xdev->supported.brightness_control) {
2640 return XRT_ERROR_FEATURE_NOT_SUPPORTED;
2641 }
2642
2643 return xrt_device_get_brightness(xdev, out_brightness);
2644}
2645
2646xrt_result_t
2647ipc_handle_device_set_brightness(volatile struct ipc_client_state *ics, uint32_t id, float brightness, bool relative)
2648{
2649 struct xrt_device *xdev = NULL;
2650 GET_XDEV_OR_RETURN(ics, id, xdev);
2651
2652 if (!xdev->supported.brightness_control) {
2653 return XRT_ERROR_FEATURE_NOT_SUPPORTED;
2654 }
2655
2656 return xrt_device_set_brightness(xdev, brightness, relative);
2657}
2658
2659xrt_result_t
2660ipc_handle_future_get_state(volatile struct ipc_client_state *ics, uint32_t future_id, enum xrt_future_state *out_state)
2661{
2662 struct xrt_future *xft = NULL;
2663 xrt_result_t xret = validate_future_id(ics, future_id, &xft);
2664 if (xret != XRT_SUCCESS) {
2665 return xret;
2666 }
2667 return xrt_future_get_state(xft, out_state);
2668}
2669
2670xrt_result_t
2671ipc_handle_future_get_result(volatile struct ipc_client_state *ics,
2672 uint32_t future_id,
2673 struct xrt_future_result *out_ft_result)
2674{
2675 struct xrt_future *xft = NULL;
2676 xrt_result_t xret = validate_future_id(ics, future_id, &xft);
2677 if (xret != XRT_SUCCESS) {
2678 return xret;
2679 }
2680 return xrt_future_get_result(xft, out_ft_result);
2681}
2682
2683xrt_result_t
2684ipc_handle_future_cancel(volatile struct ipc_client_state *ics, uint32_t future_id)
2685{
2686 struct xrt_future *xft = NULL;
2687 xrt_result_t xret = validate_future_id(ics, future_id, &xft);
2688 if (xret != XRT_SUCCESS) {
2689 return xret;
2690 }
2691 return xrt_future_cancel(xft);
2692}
2693
2694xrt_result_t
2695ipc_handle_future_destroy(volatile struct ipc_client_state *ics, uint32_t future_id)
2696{
2697 return release_future(ics, future_id);
2698}