The open source OpenXR runtime
1// Copyright 2023, Collabora, Ltd.
2// Copyright 2025, NVIDIA CORPORATION.
3// SPDX-License-Identifier: BSL-1.0
4/*!
5 * @file
6 * @brief A implementation of the @ref xrt_space_overseer interface.
7 *
8 * @author Jakob Bornecrantz <jakob@collabora.com>
9 * @ingroup aux_util
10 */
11
12#include "xrt/xrt_space.h"
13#include "xrt/xrt_device.h"
14#include "xrt/xrt_session.h"
15#include "xrt/xrt_tracking.h"
16
17#include "os/os_time.h"
18
19#include "math/m_space.h"
20
21#include "util/u_misc.h"
22#include "util/u_hashmap.h"
23#include "util/u_logging.h"
24#include "util/u_space_overseer.h"
25
26#include <assert.h>
27#include <math.h>
28#include <pthread.h>
29
30
31/*
32 *
33 * Structs and defines.
34 *
35 */
36
37/*!
38 * Keeps track of what kind of space it is.
39 */
40enum u_space_type
41{
42 U_SPACE_TYPE_NULL,
43 U_SPACE_TYPE_POSE,
44 U_SPACE_TYPE_OFFSET,
45 U_SPACE_TYPE_ROOT,
46
47 /*!
48 * Space designed to be attachable to others, most importantly it is
49 * re-attachable, and in order to move all of the spaces that has this
50 * space as it's parent/next we need a node that can be updated.
51 */
52 U_SPACE_TYPE_ATTACHABLE,
53};
54
55/*!
56 * Representing a single space, can be several ones. There should only be one
57 * root space per overseer.
58 */
59struct u_space
60{
61 struct xrt_space base;
62
63 /*!
64 * The space this space is in.
65 */
66 struct u_space *next;
67
68 /*!
69 * The type of the space.
70 */
71 enum u_space_type type;
72
73 union {
74 struct
75 {
76 struct xrt_device *xdev;
77 enum xrt_input_name xname;
78 } pose;
79
80 struct
81 {
82 struct xrt_pose pose;
83 } offset;
84 };
85};
86
87/*!
88 * Default implementation of the xrt_space_overseer object.
89 */
90struct u_space_overseer
91{
92 struct xrt_space_overseer base;
93
94 //! Main graph lock.
95 pthread_rwlock_t lock;
96
97 //! Map from xdev to space, each entry holds a reference.
98 struct u_hashmap_int *xdev_map;
99
100 //! Map from xrt_tracking_origin to space, each entry holds a reference.
101 struct u_hashmap_int *xto_map;
102
103 //! Tracks usage of reference spaces.
104 struct xrt_reference ref_space_use[XRT_SPACE_REFERENCE_TYPE_COUNT];
105
106 //! Event sink to broadcast events to all sessions.
107 struct xrt_session_event_sink *broadcast;
108
109 /*!
110 * The notify device, usually the head device. Used to notify when
111 * reference spaces are used and not used. Must not change during
112 * runtime.
113 */
114 struct xrt_device *notify;
115
116 /*!
117 * Can we do a recenter of the local and local_floor spaces, protected
118 * by the lock.
119 *
120 * This requires that local and local_floor are either null or offset
121 * spaces and that they share the same parent.
122 */
123 bool can_do_local_spaces_recenter;
124
125 /*!
126 * Create independent local and local_floor per application
127 */
128 bool per_app_local_spaces;
129};
130
131
132/*
133 *
134 * Helper functions.
135 *
136 */
137
138static inline struct u_space *
139u_space(struct xrt_space *xs)
140{
141 return (struct u_space *)xs;
142}
143
144static inline struct u_space_overseer *
145u_space_overseer(struct xrt_space_overseer *xso)
146{
147 return (struct u_space_overseer *)xso;
148}
149
150static const char *
151type_to_small_string(enum xrt_reference_space_type type)
152{
153 switch (type) {
154 case XRT_SPACE_REFERENCE_TYPE_VIEW: return "view";
155 case XRT_SPACE_REFERENCE_TYPE_LOCAL: return "local";
156 case XRT_SPACE_REFERENCE_TYPE_LOCAL_FLOOR: return "local_floor";
157 case XRT_SPACE_REFERENCE_TYPE_STAGE: return "stage";
158 case XRT_SPACE_REFERENCE_TYPE_UNBOUNDED: return "unbounded";
159 }
160
161 return "invalid";
162}
163
164static struct u_space *
165get_semantic_space(struct u_space_overseer *uso, enum xrt_reference_space_type type)
166{
167 switch (type) {
168 case XRT_SPACE_REFERENCE_TYPE_VIEW: return u_space(uso->base.semantic.view);
169 case XRT_SPACE_REFERENCE_TYPE_LOCAL: return u_space(uso->base.semantic.local);
170 case XRT_SPACE_REFERENCE_TYPE_LOCAL_FLOOR: return u_space(uso->base.semantic.local_floor);
171 case XRT_SPACE_REFERENCE_TYPE_STAGE: return u_space(uso->base.semantic.stage);
172 case XRT_SPACE_REFERENCE_TYPE_UNBOUNDED: return u_space(uso->base.semantic.unbounded);
173 }
174
175 return NULL;
176}
177
178/*!
179 * A lot of code here uses u_space directly and need to change reference count
180 * so this helper is here to make that easier.
181 */
182static inline void
183u_space_reference(struct u_space **dst, struct u_space *src)
184{
185 struct u_space *old_dst = *dst;
186
187 if (old_dst == src) {
188 return;
189 }
190
191 if (src) {
192 xrt_reference_inc(&src->base.reference);
193 }
194
195 *dst = src;
196
197 if (old_dst) {
198 if (xrt_reference_dec_and_is_zero(&old_dst->base.reference)) {
199 old_dst->base.destroy(&old_dst->base);
200 }
201 }
202}
203
204/*!
205 * Helper function when clearing a hashmap to also unreference a space.
206 */
207static void
208hashmap_unreference_space_items(void *item, void *priv)
209{
210 struct u_space *us = (struct u_space *)item;
211 u_space_reference(&us, NULL);
212}
213
214static struct u_space *
215find_xdev_space_read_locked(struct u_space_overseer *uso, struct xrt_device *xdev)
216{
217 void *ptr = NULL;
218 uint64_t key = (uint64_t)(intptr_t)xdev;
219 u_hashmap_int_find(uso->xdev_map, key, &ptr);
220
221 if (ptr == NULL) {
222 U_LOG_E("Looking for space belonging to unknown xrt_device! '%s'", xdev->str);
223 }
224 assert(ptr != NULL);
225
226 return (struct u_space *)ptr;
227}
228
229static struct u_space *
230find_xto_space_read_locked(struct u_space_overseer *uso, struct xrt_tracking_origin *xto)
231{
232 void *ptr = NULL;
233 uint64_t key = (uint64_t)(intptr_t)xto;
234 u_hashmap_int_find(uso->xto_map, key, &ptr);
235
236 if (ptr == NULL) {
237 U_LOG_E("Looking for space belonging to unknown xrt_tracking_origin! '%s'", xto->name);
238 }
239 assert(ptr != NULL);
240
241 return (struct u_space *)ptr;
242}
243
244static bool
245space_is_offset_compatible(struct u_space *us)
246{
247 return us != NULL && (us->type == U_SPACE_TYPE_NULL || us->type == U_SPACE_TYPE_OFFSET);
248}
249
250/*!
251 * Updates the offset of a NULL or OFFSET space.
252 */
253static void
254update_offset_write_locked(struct u_space *us, const struct xrt_pose *new_offset)
255{
256 assert(us->type == U_SPACE_TYPE_NULL || us->type == U_SPACE_TYPE_OFFSET);
257
258 if (m_pose_is_identity(new_offset)) { // Small optimisation.
259 us->type = U_SPACE_TYPE_NULL;
260 U_ZERO(&us->offset.pose);
261 } else {
262 us->type = U_SPACE_TYPE_OFFSET;
263 us->offset.pose = *new_offset;
264 }
265}
266
267/*!
268 * Returns the offset for an offset space or an identity pose, it's valid to
269 * call on all spaces.
270 */
271static void
272get_offset_or_ident_read_locked(const struct u_space *us, struct xrt_pose *offset)
273{
274 if (us->type == U_SPACE_TYPE_OFFSET) {
275 *offset = us->offset.pose;
276 } else {
277 *offset = (struct xrt_pose)XRT_POSE_IDENTITY;
278 }
279}
280
281
282/*
283 *
284 * Reference space to device notification code.
285 *
286 */
287
288static void
289notify_ref_space_usage_device(struct u_space_overseer *uso, enum xrt_reference_space_type type, bool used)
290{
291 struct xrt_device *xdev = NULL;
292 enum xrt_input_name name = 0;
293
294 struct u_space *uspace = get_semantic_space(uso, type);
295 if (uspace == NULL) {
296 // This is weird, should always be a space.
297 return;
298 }
299
300 if (uspace->type == U_SPACE_TYPE_POSE) {
301 xdev = uspace->pose.xdev;
302 name = uspace->pose.xname;
303 } else {
304 xdev = uso->notify;
305 }
306
307 if (xdev == NULL || !xdev->supported.ref_space_usage) {
308 return;
309 }
310
311 xrt_device_ref_space_usage(xdev, type, name, used);
312}
313
314
315/*
316 *
317 * Graph traversing functions.
318 *
319 */
320
321/*!
322 * For each space, push the relation of that space and then traverse by calling
323 * @p push_then_traverse again with the parent space. That means traverse goes
324 * from a leaf space to a the root space, relations are pushed in the same
325 * order.
326 */
327static void
328push_then_traverse(struct xrt_relation_chain *xrc, struct u_space *space, int64_t at_timestamp_ns)
329{
330 switch (space->type) {
331 case U_SPACE_TYPE_NULL: break; // No-op
332 case U_SPACE_TYPE_POSE: {
333 assert(space->pose.xdev != NULL);
334 assert(space->pose.xname != 0);
335
336 struct xrt_space_relation xsr;
337 xrt_device_get_tracked_pose(space->pose.xdev, space->pose.xname, at_timestamp_ns, &xsr);
338 m_relation_chain_push_relation(xrc, &xsr);
339 } break;
340 case U_SPACE_TYPE_OFFSET: m_relation_chain_push_pose_if_not_identity(xrc, &space->offset.pose); break;
341 case U_SPACE_TYPE_ROOT: return; // Stops the traversing.
342 case U_SPACE_TYPE_ATTACHABLE: break; // No-op
343 }
344
345 // Please tail-call optimise this miss compiler.
346 assert(space->next != NULL);
347 push_then_traverse(xrc, space->next, at_timestamp_ns);
348}
349
350/*!
351 * For each space, traverse by calling @p traverse_then_push_inverse again with
352 * the parent space then push the inverse of the relation of that. That means
353 * traverse goes from a leaf space to a the root space, relations are pushed in
354 * the reversed order.
355 */
356static void
357traverse_then_push_inverse(struct xrt_relation_chain *xrc, struct u_space *space, int64_t at_timestamp_ns)
358{
359 // Done traversing.
360 switch (space->type) {
361 case U_SPACE_TYPE_NULL: break;
362 case U_SPACE_TYPE_POSE: break;
363 case U_SPACE_TYPE_OFFSET: break;
364 case U_SPACE_TYPE_ROOT: return; // Stops the traversing.
365 case U_SPACE_TYPE_ATTACHABLE: break; // No-op
366 }
367
368 // Can't tail-call optimise this one :(
369 assert(space->next != NULL);
370 traverse_then_push_inverse(xrc, space->next, at_timestamp_ns);
371
372 switch (space->type) {
373 case U_SPACE_TYPE_NULL: break; // No-op
374 case U_SPACE_TYPE_POSE: {
375 assert(space->pose.xdev != NULL);
376 assert(space->pose.xname != 0);
377
378 struct xrt_space_relation xsr;
379 xrt_device_get_tracked_pose(space->pose.xdev, space->pose.xname, at_timestamp_ns, &xsr);
380 m_relation_chain_push_inverted_relation(xrc, &xsr);
381 } break;
382 case U_SPACE_TYPE_OFFSET: m_relation_chain_push_inverted_pose_if_not_identity(xrc, &space->offset.pose); break;
383 case U_SPACE_TYPE_ROOT: assert(false); // Should not get here.
384 case U_SPACE_TYPE_ATTACHABLE: break; // No-op
385 }
386}
387
388static void
389build_relation_chain_read_locked(struct u_space_overseer *uso,
390 struct xrt_relation_chain *xrc,
391 struct u_space *base,
392 struct u_space *target,
393 int64_t at_timestamp_ns)
394{
395 assert(xrc != NULL);
396 assert(base != NULL);
397 assert(target != NULL);
398
399 push_then_traverse(xrc, target, at_timestamp_ns);
400 traverse_then_push_inverse(xrc, base, at_timestamp_ns);
401}
402
403static void
404build_relation_chain(struct u_space_overseer *uso,
405 struct xrt_relation_chain *xrc,
406 struct u_space *base,
407 struct u_space *target,
408 int64_t at_timestamp_ns)
409{
410 pthread_rwlock_rdlock(&uso->lock);
411 build_relation_chain_read_locked(uso, xrc, base, target, at_timestamp_ns);
412 pthread_rwlock_unlock(&uso->lock);
413}
414
415static inline void
416special_resolve(struct xrt_relation_chain *xrc, struct xrt_space_relation *out_relation)
417{
418 // A space chain with zero step is always valid.
419 if (xrc->step_count == 0) {
420 out_relation->pose = (struct xrt_pose)XRT_POSE_IDENTITY;
421 out_relation->linear_velocity = (struct xrt_vec3)XRT_VEC3_ZERO;
422 out_relation->angular_velocity = (struct xrt_vec3)XRT_VEC3_ZERO;
423 out_relation->relation_flags = //
424 XRT_SPACE_RELATION_ORIENTATION_VALID_BIT | //
425 XRT_SPACE_RELATION_ORIENTATION_TRACKED_BIT | //
426 XRT_SPACE_RELATION_POSITION_VALID_BIT | //
427 XRT_SPACE_RELATION_POSITION_TRACKED_BIT | //
428 XRT_SPACE_RELATION_LINEAR_VELOCITY_VALID_BIT | //
429 XRT_SPACE_RELATION_ANGULAR_VELOCITY_VALID_BIT;
430 } else {
431 m_relation_chain_resolve(xrc, out_relation);
432 }
433}
434
435
436/*
437 *
438 * Direct space functions.
439 *
440 */
441
442static void
443space_destroy(struct xrt_space *xs)
444{
445 struct u_space *us = u_space(xs);
446
447 assert(us->next != NULL || us->type == U_SPACE_TYPE_ROOT);
448
449 u_space_reference(&us->next, NULL);
450
451 free(us);
452}
453
454/*!
455 * Creates a space, returns with a reference of one. The lock doesn't need to be
456 * held as this function is not modifying any of the currently existing spaces
457 * only creating a new one that is not pointed to by any other spaces.
458 */
459static struct u_space *
460create_space(enum u_space_type type, struct u_space *parent)
461{
462 assert(parent != NULL || type == U_SPACE_TYPE_ROOT);
463
464 struct u_space *us = U_TYPED_CALLOC(struct u_space);
465 us->base.reference.count = 1;
466 us->base.destroy = space_destroy;
467 us->type = type;
468
469 u_space_reference(&us->next, parent);
470
471 return us;
472}
473
474static void
475create_and_set_root_space(struct u_space_overseer *uso)
476{
477 assert(uso->base.semantic.root == NULL);
478
479 struct u_space *us = create_space(U_SPACE_TYPE_ROOT, NULL);
480
481 // Created with one reference.
482 uso->base.semantic.root = &us->base;
483}
484
485
486/*
487 *
488 * Device helpers.
489 *
490 */
491
492/*!
493 * Helper function to add a device to the space overseer. This function
494 * handles creating or finding a space for the device's tracking origin
495 * and linking the device to that space.
496 */
497static xrt_result_t
498add_device_helper(struct u_space_overseer *uso, struct xrt_device *xdev)
499{
500 struct xrt_tracking_origin *torig = xdev->tracking_origin;
501 assert(torig != NULL);
502
503 struct xrt_space *root = uso->base.semantic.root;
504 uint64_t key = (uint64_t)(intptr_t)torig;
505 struct xrt_space *xs = NULL;
506
507 // Need to take the write lock.
508 pthread_rwlock_wrlock(&uso->lock);
509
510 // Does this tracking origin already have space.
511 void *ptr = NULL;
512 u_hashmap_int_find(uso->xto_map, key, &ptr);
513
514 if (ptr != NULL) {
515 xs = (struct xrt_space *)ptr;
516 } else if (torig->type == XRT_TRACKING_TYPE_ATTACHABLE) {
517 /*
518 * If we ever make u_space_overseer sub-classable make sure
519 * this calls the right function, can't call interface function
520 * as the lock is held here.
521 */
522 xs = (struct xrt_space *)create_space(U_SPACE_TYPE_ATTACHABLE, u_space(root));
523 u_hashmap_int_insert(uso->xto_map, key, xs);
524 } else {
525 /*
526 * If we ever make u_space_overseer sub-classable make sure
527 * this calls the right function, can't call interface function
528 * as the lock is held here.
529 */
530 xs = (struct xrt_space *)create_space(U_SPACE_TYPE_OFFSET, u_space(root));
531
532 update_offset_write_locked(u_space(xs), &torig->initial_offset);
533
534 u_hashmap_int_insert(uso->xto_map, key, xs);
535 }
536
537 pthread_rwlock_unlock(&uso->lock);
538
539 u_space_overseer_link_space_to_device(uso, xs, xdev);
540
541 return XRT_SUCCESS;
542}
543
544
545/*
546 *
547 * Member functions.
548 *
549 */
550
551static xrt_result_t
552create_offset_space(struct xrt_space_overseer *xso,
553 struct xrt_space *parent,
554 const struct xrt_pose *offset,
555 struct xrt_space **out_space)
556{
557 assert(out_space != NULL);
558 assert(*out_space == NULL);
559
560 struct u_space *uparent = u_space(parent);
561 struct u_space *us = NULL;
562
563 if (m_pose_is_identity(offset)) { // Small optimisation.
564 us = create_space(U_SPACE_TYPE_NULL, uparent);
565 } else {
566 us = create_space(U_SPACE_TYPE_OFFSET, uparent);
567 us->offset.pose = *offset;
568 }
569
570 // Created with one references.
571 *out_space = &us->base;
572
573 return XRT_SUCCESS;
574}
575
576static xrt_result_t
577create_pose_space(struct xrt_space_overseer *xso,
578 struct xrt_device *xdev,
579 enum xrt_input_name name,
580 struct xrt_space **out_space)
581{
582 assert(out_space != NULL);
583 assert(*out_space == NULL);
584
585 struct u_space_overseer *uso = u_space_overseer(xso);
586
587 // Only need the read lock.
588 pthread_rwlock_rdlock(&uso->lock);
589
590 struct u_space *uparent = find_xdev_space_read_locked(uso, xdev);
591 struct u_space *us = create_space(U_SPACE_TYPE_POSE, uparent);
592
593 // Safe to unlock now.
594 pthread_rwlock_unlock(&uso->lock);
595
596 us->pose.xdev = xdev;
597 us->pose.xname = name;
598
599 // Created with one references.
600 *out_space = &us->base;
601
602 return XRT_SUCCESS;
603}
604
605static xrt_result_t
606locate_space(struct xrt_space_overseer *xso,
607 struct xrt_space *base_space,
608 const struct xrt_pose *base_offset,
609 int64_t at_timestamp_ns,
610 struct xrt_space *space,
611 const struct xrt_pose *offset,
612 struct xrt_space_relation *out_relation)
613{
614 struct u_space_overseer *uso = u_space_overseer(xso);
615
616 struct u_space *ubase_space = u_space(base_space);
617 struct u_space *uspace = u_space(space);
618
619 struct xrt_relation_chain xrc = {0};
620
621 m_relation_chain_push_pose_if_not_identity(&xrc, offset);
622
623 // crude optimization: If locating a space in itself, we don't actually need to locate the space itself.
624 // only the offsets need to be applied.
625 if (uspace != ubase_space) {
626 build_relation_chain(uso, &xrc, ubase_space, uspace, at_timestamp_ns);
627 }
628
629 m_relation_chain_push_inverted_pose_if_not_identity(&xrc, base_offset);
630
631 // For base_space =~= space (approx equals).
632 special_resolve(&xrc, out_relation);
633
634 return XRT_SUCCESS;
635}
636
637static bool
638pose_approx(const struct xrt_pose *a, const struct xrt_pose *b)
639{
640 const float e = 0.00001f;
641 return fabsf(a->orientation.x - b->orientation.x) < e && //
642 fabsf(a->orientation.y - b->orientation.y) < e && //
643 fabsf(a->orientation.z - b->orientation.z) < e && //
644 fabsf(a->orientation.w - b->orientation.w) < e && //
645 fabsf(a->position.x - b->position.x) < e && //
646 fabsf(a->position.y - b->position.y) < e && //
647 fabsf(a->position.z - b->position.z) < e;
648}
649
650static int32_t
651find_same_space_before(struct xrt_space **spaces, const struct xrt_pose *offsets, uint32_t space_index)
652{
653 for (int32_t i = 0; i < (int32_t)space_index; i++) {
654 if (spaces[i] == spaces[space_index] && pose_approx(&offsets[i], &offsets[space_index])) {
655 return i;
656 }
657 }
658 return -1;
659}
660
661static xrt_result_t
662locate_spaces(struct xrt_space_overseer *xso,
663 struct xrt_space *base_space,
664 const struct xrt_pose *base_offset,
665 int64_t at_timestamp_ns,
666 struct xrt_space **spaces,
667 uint32_t space_count,
668 const struct xrt_pose *offsets,
669 struct xrt_space_relation *out_relations)
670{
671 struct u_space_overseer *uso = u_space_overseer(xso);
672
673 struct u_space *ubase_space = u_space(base_space);
674
675 for (uint32_t i = 0; i < space_count; i++) {
676 // spaces are allowed to be NULL
677 if (spaces[i] == NULL) {
678 out_relations[i].relation_flags = XRT_SPACE_RELATION_BITMASK_NONE;
679 continue;
680 }
681
682 // crude optimization: If space ptr is equal to one already located, don't locate again, just copy
683 {
684 int32_t found = find_same_space_before(spaces, offsets, i);
685 if (found >= 0) {
686 out_relations[i] = out_relations[found];
687 continue;
688 }
689 }
690
691 struct u_space *uspace = u_space(spaces[i]);
692 struct xrt_relation_chain xrc = {0};
693
694 m_relation_chain_push_pose_if_not_identity(&xrc, &offsets[i]);
695
696 // crude optimization: If locating a space in itself, we don't actually need to locate the space itself.
697 // only the offsets need to be applied.
698 if (spaces[i] != base_space) {
699 build_relation_chain(uso, &xrc, ubase_space, uspace, at_timestamp_ns);
700 }
701
702 m_relation_chain_push_inverted_pose_if_not_identity(&xrc, base_offset);
703
704 // For base_space =~= space (approx equals).
705 special_resolve(&xrc, &out_relations[i]);
706 }
707
708 return XRT_SUCCESS;
709}
710
711static xrt_result_t
712locate_device(struct xrt_space_overseer *xso,
713 struct xrt_space *base_space,
714 const struct xrt_pose *base_offset,
715 int64_t at_timestamp_ns,
716 struct xrt_device *xdev,
717 struct xrt_space_relation *out_relation)
718{
719 struct u_space_overseer *uso = u_space_overseer(xso);
720
721 struct u_space *ubase_space = u_space(base_space);
722
723 struct xrt_relation_chain xrc = {0};
724
725 // Only need the read lock.
726 pthread_rwlock_rdlock(&uso->lock);
727
728 struct u_space *uspace = find_xdev_space_read_locked(uso, xdev);
729 build_relation_chain_read_locked(uso, &xrc, ubase_space, uspace, at_timestamp_ns);
730
731 // Safe to unlock now.
732 pthread_rwlock_unlock(&uso->lock);
733
734 // Do as much work outside of the lock.
735 m_relation_chain_push_inverted_pose_if_not_identity(&xrc, base_offset);
736 special_resolve(&xrc, out_relation);
737
738 return XRT_SUCCESS;
739}
740
741static xrt_result_t
742ref_space_inc(struct xrt_space_overseer *xso, enum xrt_reference_space_type type)
743{
744 struct u_space_overseer *uso = u_space_overseer(xso);
745
746 // No more checking then this.
747 assert(type < XRT_SPACE_REFERENCE_TYPE_COUNT);
748
749 // If it wasn't zero nothing to do.
750 if (!xrt_reference_inc_and_was_zero(&uso->ref_space_use[type])) {
751 return XRT_SUCCESS;
752 }
753
754 U_LOG_D("Ref-space %s in use", type_to_small_string(type));
755
756
757 /*
758 * We have a reference space that was not in use but is now in used.
759 */
760
761 // Notify any device that might want to know about it.
762 notify_ref_space_usage_device(uso, type, true);
763
764 return XRT_SUCCESS;
765}
766
767static xrt_result_t
768ref_space_dec(struct xrt_space_overseer *xso, enum xrt_reference_space_type type)
769{
770 struct u_space_overseer *uso = u_space_overseer(xso);
771
772 // No more checking then this.
773 assert(type < XRT_SPACE_REFERENCE_TYPE_COUNT);
774
775 // If it is not zero we are done.
776 if (!xrt_reference_dec_and_is_zero(&uso->ref_space_use[type])) {
777 return XRT_SUCCESS;
778 }
779
780 U_LOG_D("Ref-space %s no longer in use", type_to_small_string(type));
781
782
783 /*
784 * We have a reference space that was in use but is no longer used.
785 */
786
787 // Notify any device that might want to know about it.
788 notify_ref_space_usage_device(uso, type, false);
789
790 return XRT_SUCCESS;
791}
792
793static xrt_result_t
794recenter_local_spaces(struct xrt_space_overseer *xso)
795{
796 struct u_space_overseer *uso = u_space_overseer(xso);
797 xrt_result_t xret;
798
799 // Take the full lock from the start.
800 pthread_rwlock_wrlock(&uso->lock);
801
802 // Can we do recentering, check with lock held.
803 if (uso->can_do_local_spaces_recenter) {
804 goto err_unlock;
805 }
806
807
808 /*
809 * We go from the view to the parent of local/local_view,
810 * they must share the same parent.
811 */
812
813 uint64_t new_ns = os_monotonic_get_ns();
814 struct u_space *uview = u_space(xso->semantic.view);
815 struct u_space *ulocal = u_space(xso->semantic.local);
816 struct u_space *ulocal_floor = u_space(xso->semantic.local_floor);
817 assert(uview != NULL);
818 assert(ulocal != NULL);
819 assert(ulocal_floor != NULL);
820
821 struct u_space *uparent = ulocal->next;
822 assert(uparent != NULL);
823 assert(uparent == ulocal_floor->next);
824
825
826 /*
827 * Get the offset of view in the parent space of local and local_floor.
828 */
829
830 struct xrt_relation_chain xrc = {0};
831 build_relation_chain_read_locked(uso, &xrc, uparent, uview, new_ns);
832
833 struct xrt_space_relation rel;
834 special_resolve(&xrc, &rel);
835
836 bool pos_valid = (rel.relation_flags & XRT_SPACE_RELATION_POSITION_VALID_BIT) != 0;
837 bool ori_valid = (rel.relation_flags & XRT_SPACE_RELATION_ORIENTATION_VALID_BIT) != 0;
838
839 if (!pos_valid || !ori_valid) {
840 goto err_unlock;
841 }
842
843
844 /*
845 * Calculate new offsets for the spaces.
846 */
847
848 // Only save the rotation around y axis.
849 rel.pose.orientation.x = 0;
850 rel.pose.orientation.z = 0;
851 math_quat_normalize(&rel.pose.orientation);
852
853 struct xrt_pose local_offset, local_floor_offset;
854 get_offset_or_ident_read_locked(ulocal, &local_offset);
855 get_offset_or_ident_read_locked(ulocal_floor, &local_floor_offset);
856
857 // Take the "flat" rotations and apply to both.
858 local_offset.orientation = rel.pose.orientation;
859 local_floor_offset.orientation = rel.pose.orientation;
860
861 // Keep y offset the same.
862 local_offset.position.x = rel.pose.position.x;
863 local_offset.position.z = rel.pose.position.z;
864 local_floor_offset.position.x = rel.pose.position.x;
865 local_floor_offset.position.z = rel.pose.position.z;
866
867 // Update the offsets.
868 update_offset_write_locked(ulocal, &local_offset);
869 update_offset_write_locked(ulocal_floor, &local_floor_offset);
870
871 // Push the events.
872 union xrt_session_event xse = XRT_STRUCT_INIT;
873
874 // Basics
875 xse.ref_change.event_type = XRT_SESSION_EVENT_REFERENCE_SPACE_CHANGE_PENDING;
876 xse.ref_change.pose_valid = false;
877 xse.ref_change.pose_in_previous_space = (struct xrt_pose)XRT_POSE_IDENTITY;
878 xse.ref_change.timestamp_ns = os_monotonic_get_ns();
879
880 // Event for local space.
881 xse.ref_change.ref_type = XRT_SPACE_REFERENCE_TYPE_LOCAL;
882 xret = xrt_session_event_sink_push(uso->broadcast, &xse);
883 if (xret != XRT_SUCCESS) {
884 U_LOG_E("Failed to push event for LOCAL!");
885 }
886
887 // Event for local floor space.
888 xse.ref_change.ref_type = XRT_SPACE_REFERENCE_TYPE_LOCAL_FLOOR;
889 xret = xrt_session_event_sink_push(uso->broadcast, &xse);
890 if (xret != XRT_SUCCESS) {
891 U_LOG_E("Failed to push event LOCAL_FLOOR!");
892 }
893
894 pthread_rwlock_unlock(&uso->lock);
895
896 return XRT_SUCCESS;
897
898err_unlock:
899 pthread_rwlock_unlock(&uso->lock);
900
901 return XRT_ERROR_RECENTERING_NOT_SUPPORTED;
902}
903
904static xrt_result_t
905create_local_space(struct xrt_space_overseer *xso,
906 struct xrt_space **out_local_space,
907 struct xrt_space **out_local_floor_space)
908{
909 assert(xso->semantic.root != NULL);
910 struct u_space_overseer *uso = u_space_overseer(xso);
911 if (!uso->per_app_local_spaces) {
912 xrt_space_reference(out_local_space, xso->semantic.local);
913 xrt_space_reference(out_local_floor_space, xso->semantic.local_floor);
914 return XRT_SUCCESS;
915 }
916
917 struct xrt_pose identity = XRT_POSE_IDENTITY;
918 struct xrt_space_relation xsr = XRT_SPACE_RELATION_ZERO;
919 xrt_space_overseer_locate_space(xso, xso->semantic.root, &identity, os_monotonic_get_ns(), xso->semantic.view,
920 &identity, &xsr);
921
922 bool pos_valid = (xsr.relation_flags & XRT_SPACE_RELATION_POSITION_VALID_BIT) != 0;
923 bool ori_valid = (xsr.relation_flags & XRT_SPACE_RELATION_ORIENTATION_VALID_BIT) != 0;
924 if (!pos_valid || !ori_valid) {
925 xsr.pose = (struct xrt_pose)XRT_POSE_IDENTITY;
926 xsr.pose.position.y = 1.6;
927 }
928
929 xsr.pose.orientation.x = 0;
930 xsr.pose.orientation.z = 0;
931 math_quat_normalize(&xsr.pose.orientation);
932
933 xrt_result_t xret = XRT_SUCCESS;
934
935 if (out_local_space != NULL) {
936 xret = create_offset_space(xso, xso->semantic.root, &xsr.pose, out_local_space);
937 if (xret != XRT_SUCCESS) {
938 U_LOG_E("Failed to create offset space LOCAL!");
939 return xret;
940 }
941 }
942
943 if (out_local_floor_space != NULL) {
944 if (xso->semantic.stage != NULL) {
945 struct u_space *ustage = u_space(xso->semantic.stage);
946 xsr.pose.position.y = ustage->offset.pose.position.y;
947 } else {
948 xsr.pose.position.y = 0;
949 }
950 xret = create_offset_space(xso, xso->semantic.root, &xsr.pose, out_local_floor_space);
951 if (xret != XRT_SUCCESS) {
952 U_LOG_E("Failed to create offset space LOCAL_FLOOR!");
953 return xret;
954 }
955 }
956
957 return xret;
958}
959
960static xrt_result_t
961get_tracking_origin_offset(struct xrt_space_overseer *xso, struct xrt_tracking_origin *xto, struct xrt_pose *out_offset)
962{
963 struct u_space_overseer *uso = u_space_overseer(xso);
964 xrt_result_t xret = XRT_SUCCESS;
965
966 pthread_rwlock_rdlock(&uso->lock);
967
968 struct u_space *us = find_xto_space_read_locked(uso, xto);
969 if (!space_is_offset_compatible(us)) {
970 xret = XRT_ERROR_UNSUPPORTED_SPACE_TYPE;
971 goto unlock;
972 }
973
974 get_offset_or_ident_read_locked(us, out_offset);
975
976unlock:
977 pthread_rwlock_unlock(&uso->lock);
978 return xret;
979}
980
981static xrt_result_t
982set_tracking_origin_offset(struct xrt_space_overseer *xso,
983 struct xrt_tracking_origin *xto,
984 const struct xrt_pose *offset)
985{
986 struct u_space_overseer *uso = u_space_overseer(xso);
987 xrt_result_t xret = XRT_SUCCESS;
988
989 pthread_rwlock_rdlock(&uso->lock);
990
991 struct u_space *us = find_xto_space_read_locked(uso, xto);
992 if (!space_is_offset_compatible(us)) {
993 xret = XRT_ERROR_UNSUPPORTED_SPACE_TYPE;
994 goto unlock;
995 }
996
997 update_offset_write_locked(us, offset);
998
999unlock:
1000 pthread_rwlock_unlock(&uso->lock);
1001 return xret;
1002}
1003
1004static xrt_result_t
1005get_reference_space_offset(struct xrt_space_overseer *xso,
1006 enum xrt_reference_space_type type,
1007 struct xrt_pose *out_offset)
1008{
1009 struct u_space_overseer *uso = u_space_overseer(xso);
1010 xrt_result_t xret = XRT_SUCCESS;
1011
1012 pthread_rwlock_rdlock(&uso->lock);
1013
1014 struct u_space *us = get_semantic_space(uso, type);
1015 if (!space_is_offset_compatible(us)) {
1016 xret = XRT_ERROR_UNSUPPORTED_SPACE_TYPE;
1017 goto unlock;
1018 }
1019
1020 get_offset_or_ident_read_locked(us, out_offset);
1021
1022unlock:
1023 pthread_rwlock_unlock(&uso->lock);
1024 return xret;
1025}
1026
1027static xrt_result_t
1028set_reference_space_offset(struct xrt_space_overseer *xso,
1029 enum xrt_reference_space_type type,
1030 const struct xrt_pose *offset)
1031{
1032 if (type == XRT_SPACE_REFERENCE_TYPE_LOCAL_FLOOR) {
1033 // LOCAL_FLOOR is calculated from LOCAL and STAGE
1034 return XRT_ERROR_UNSUPPORTED_SPACE_TYPE;
1035 }
1036
1037 struct u_space_overseer *uso = u_space_overseer(xso);
1038
1039 if (uso->can_do_local_spaces_recenter) {
1040 return XRT_ERROR_RECENTERING_NOT_SUPPORTED;
1041 }
1042
1043 xrt_result_t xret = XRT_SUCCESS;
1044
1045 pthread_rwlock_wrlock(&uso->lock);
1046
1047 struct u_space *us = get_semantic_space(uso, type);
1048 if (!space_is_offset_compatible(us)) {
1049 xret = XRT_ERROR_UNSUPPORTED_SPACE_TYPE;
1050 goto unlock;
1051 }
1052
1053 // can_do_local_spaces_recenter ensures that local_floor can be offset
1054 struct u_space *ufloor = u_space(xso->semantic.local_floor);
1055 struct xrt_pose floor;
1056 get_offset_or_ident_read_locked(ufloor, &floor);
1057
1058 if (type == XRT_SPACE_REFERENCE_TYPE_STAGE) {
1059 floor.position.y = offset->position.y;
1060 } else if (type == XRT_SPACE_REFERENCE_TYPE_LOCAL) {
1061 floor.orientation = offset->orientation;
1062 floor.position.x = offset->position.x;
1063 floor.position.z = offset->position.z;
1064 }
1065
1066 update_offset_write_locked(us, offset);
1067 update_offset_write_locked(ufloor, &floor);
1068
1069 // Push the events.
1070 union xrt_session_event xse = XRT_STRUCT_INIT;
1071
1072 // Basics
1073 xse.ref_change.event_type = XRT_SESSION_EVENT_REFERENCE_SPACE_CHANGE_PENDING;
1074 xse.ref_change.pose_valid = false;
1075 xse.ref_change.pose_in_previous_space = (struct xrt_pose)XRT_POSE_IDENTITY;
1076 xse.ref_change.timestamp_ns = os_monotonic_get_ns();
1077
1078 if (type == XRT_SPACE_REFERENCE_TYPE_STAGE) {
1079 xse.ref_change.ref_type = XRT_SPACE_REFERENCE_TYPE_STAGE;
1080 xret = xrt_session_event_sink_push(uso->broadcast, &xse);
1081 if (xret != XRT_SUCCESS) {
1082 U_LOG_E("Failed to push event STAGE!");
1083 }
1084 } else if (type == XRT_SPACE_REFERENCE_TYPE_LOCAL) {
1085 xse.ref_change.ref_type = XRT_SPACE_REFERENCE_TYPE_LOCAL;
1086 xret = xrt_session_event_sink_push(uso->broadcast, &xse);
1087 if (xret != XRT_SUCCESS) {
1088 U_LOG_E("Failed to push event LOCAL!");
1089 }
1090 } else {
1091 // did not change STAGE or LOCAL -> LOCAL_FLOOR also did not change
1092 goto unlock;
1093 }
1094
1095 xse.ref_change.ref_type = XRT_SPACE_REFERENCE_TYPE_LOCAL_FLOOR;
1096 xret = xrt_session_event_sink_push(uso->broadcast, &xse);
1097 if (xret != XRT_SUCCESS) {
1098 U_LOG_E("Failed to push event LOCAL_FLOOR!");
1099 }
1100
1101unlock:
1102 pthread_rwlock_unlock(&uso->lock);
1103 return xret;
1104}
1105
1106static xrt_result_t
1107add_device(struct xrt_space_overseer *xso, struct xrt_device *xdev)
1108{
1109 struct u_space_overseer *uso = u_space_overseer(xso);
1110
1111 return add_device_helper(uso, xdev);
1112}
1113
1114static xrt_result_t
1115attach_device(struct xrt_space_overseer *xso, struct xrt_device *xdev, struct xrt_space *space)
1116{
1117 struct u_space_overseer *uso = u_space_overseer(xso);
1118
1119 // Check that the device has the correct tracking origin type.
1120 if (xdev->tracking_origin == NULL || xdev->tracking_origin->type != XRT_TRACKING_TYPE_ATTACHABLE) {
1121 U_LOG_E("Device '%s' does not have XRT_TRACKING_TYPE_ATTACHABLE tracking origin type", xdev->str);
1122 return XRT_ERROR_DEVICE_NOT_ATTACHABLE;
1123 }
1124
1125 // If no space is provided, use the root space.
1126 struct xrt_space *target_space = space;
1127 if (target_space == NULL) {
1128 target_space = uso->base.semantic.root;
1129 }
1130
1131 xrt_result_t xret = XRT_SUCCESS;
1132 pthread_rwlock_wrlock(&uso->lock);
1133
1134
1135 void *ptr = NULL;
1136 uint64_t key = (uint64_t)(intptr_t)xdev->tracking_origin;
1137 u_hashmap_int_find(uso->xto_map, key, &ptr);
1138 if (ptr == NULL) {
1139 U_LOG_E("Device doesn't have space associated with it!");
1140 xret = XRT_ERROR_DEVICE_NOT_ATTACHABLE;
1141 goto err_unlock;
1142 }
1143
1144 struct u_space *us = (struct u_space *)ptr;
1145 if (us->type != U_SPACE_TYPE_ATTACHABLE) {
1146 U_LOG_E("Device doesn't have a attachable space!");
1147 xret = XRT_ERROR_DEVICE_NOT_ATTACHABLE;
1148 goto err_unlock;
1149 }
1150
1151 // Update the link.
1152 u_space_reference(&us->next, u_space(target_space));
1153
1154err_unlock:
1155 pthread_rwlock_unlock(&uso->lock);
1156
1157 return xret;
1158}
1159
1160static void
1161destroy(struct xrt_space_overseer *xso)
1162{
1163 struct u_space_overseer *uso = u_space_overseer(xso);
1164
1165 xrt_space_reference(&uso->base.semantic.unbounded, NULL);
1166 xrt_space_reference(&uso->base.semantic.stage, NULL);
1167 xrt_space_reference(&uso->base.semantic.local, NULL);
1168 xrt_space_reference(&uso->base.semantic.local_floor, NULL);
1169 xrt_space_reference(&uso->base.semantic.view, NULL);
1170 xrt_space_reference(&uso->base.semantic.root, NULL);
1171
1172 // Each device has a reference to its space, make sure to unreference before creating.
1173 u_hashmap_int_clear_and_call_for_each(uso->xdev_map, hashmap_unreference_space_items, uso);
1174 u_hashmap_int_destroy(&uso->xdev_map);
1175
1176 u_hashmap_int_clear_and_call_for_each(uso->xto_map, hashmap_unreference_space_items, uso);
1177 u_hashmap_int_destroy(&uso->xto_map);
1178
1179 for (int id = 0; id < XRT_MAX_CLIENT_SPACES; id++) {
1180 struct xrt_space **xslocal_ptr = (struct xrt_space **)&xso->localspace[id];
1181 xrt_space_reference(xslocal_ptr, NULL);
1182
1183 struct xrt_space **xslocalfloor_ptr = (struct xrt_space **)&xso->localfloorspace[id];
1184 xrt_space_reference(xslocalfloor_ptr, NULL);
1185 }
1186
1187 pthread_rwlock_destroy(&uso->lock);
1188
1189 free(uso);
1190}
1191
1192
1193/*
1194 *
1195 * 'Exported' functions.
1196 *
1197 */
1198
1199struct u_space_overseer *
1200u_space_overseer_create(struct xrt_session_event_sink *broadcast)
1201{
1202 struct u_space_overseer *uso = U_TYPED_CALLOC(struct u_space_overseer);
1203 uso->base.create_local_space = create_local_space;
1204 uso->base.create_offset_space = create_offset_space;
1205 uso->base.create_pose_space = create_pose_space;
1206 uso->base.locate_space = locate_space;
1207 uso->base.locate_spaces = locate_spaces;
1208 uso->base.locate_device = locate_device;
1209 uso->base.ref_space_inc = ref_space_inc;
1210 uso->base.ref_space_dec = ref_space_dec;
1211 uso->base.recenter_local_spaces = recenter_local_spaces;
1212 uso->base.get_tracking_origin_offset = get_tracking_origin_offset;
1213 uso->base.set_tracking_origin_offset = set_tracking_origin_offset;
1214 uso->base.get_reference_space_offset = get_reference_space_offset;
1215 uso->base.set_reference_space_offset = set_reference_space_offset;
1216 uso->base.add_device = add_device;
1217 uso->base.attach_device = attach_device;
1218 uso->base.destroy = destroy;
1219 uso->broadcast = broadcast;
1220
1221 XRT_MAYBE_UNUSED int ret = 0;
1222
1223 ret = pthread_rwlock_init(&uso->lock, NULL);
1224 assert(ret == 0);
1225
1226 ret = u_hashmap_int_create(&uso->xdev_map);
1227 assert(ret == 0);
1228
1229 ret = u_hashmap_int_create(&uso->xto_map);
1230 assert(ret == 0);
1231
1232 create_and_set_root_space(uso);
1233
1234 return uso;
1235}
1236
1237void
1238u_space_overseer_legacy_setup(struct u_space_overseer *uso,
1239 struct xrt_device **xdevs,
1240 uint32_t xdev_count,
1241 struct xrt_device *head,
1242 const struct xrt_pose *local_offset,
1243 bool root_is_unbounded,
1244 bool per_app_local_spaces)
1245{
1246 uso->per_app_local_spaces = per_app_local_spaces;
1247
1248 // Add all devices to the space overseer.
1249 for (uint32_t i = 0; i < xdev_count; i++) {
1250 xrt_result_t xret = add_device_helper(uso, xdevs[i]);
1251 if (xret != XRT_SUCCESS) {
1252 U_LOG_E("Failed to add device '%s' to space overseer!", xdevs[i]->str);
1253 }
1254 }
1255
1256 // If these are set something is probably wrong, but just in case unset them.
1257 assert(uso->base.semantic.view == NULL);
1258 assert(uso->base.semantic.stage == NULL);
1259 assert(uso->base.semantic.local == NULL);
1260 assert(uso->base.semantic.unbounded == NULL);
1261 xrt_space_reference(&uso->base.semantic.view, NULL);
1262 xrt_space_reference(&uso->base.semantic.stage, NULL);
1263 xrt_space_reference(&uso->base.semantic.local, NULL);
1264 xrt_space_reference(&uso->base.semantic.unbounded, NULL);
1265
1266 if (head != NULL && head->supported.stage) {
1267 // stage poses are polled from the driver
1268 u_space_overseer_create_pose_space(uso, head, XRT_INPUT_GENERIC_STAGE_SPACE_POSE,
1269 &uso->base.semantic.stage);
1270 } else {
1271 // stage offset is managed by space overseer
1272 u_space_overseer_create_null_space(uso, uso->base.semantic.root, &uso->base.semantic.stage);
1273 }
1274
1275 // If the system wants to support the space, set root as unbounded.
1276 if (root_is_unbounded) {
1277 xrt_space_reference(&uso->base.semantic.unbounded, uso->base.semantic.root);
1278 }
1279
1280 // Set local to the local offset.
1281 u_space_overseer_create_offset_space(uso, uso->base.semantic.root, local_offset, &uso->base.semantic.local);
1282
1283 // Set local floor to be under local, but at y == 0 from stage.
1284 struct xrt_pose local_floor_offset = {
1285 local_offset->orientation,
1286 {local_offset->position.x, 0.f, local_offset->position.z},
1287 };
1288
1289 u_space_overseer_create_offset_space(uso, uso->base.semantic.root, &local_floor_offset,
1290 &uso->base.semantic.local_floor);
1291
1292 // Setup view space if we have a head.
1293 if (head != NULL) {
1294 u_space_overseer_create_pose_space(uso, head, XRT_INPUT_GENERIC_HEAD_POSE, &uso->base.semantic.view);
1295
1296 // Set the head to the notify device, for reference space usage.
1297 uso->notify = head;
1298 }
1299}
1300
1301void
1302u_space_overseer_create_null_space(struct u_space_overseer *uso, struct xrt_space *parent, struct xrt_space **out_space)
1303{
1304 assert(out_space != NULL);
1305 assert(*out_space == NULL);
1306
1307 struct u_space *uparent = u_space(parent);
1308 struct u_space *us = create_space(U_SPACE_TYPE_NULL, uparent);
1309
1310 // Created with one references.
1311 *out_space = &us->base;
1312}
1313
1314void
1315u_space_overseer_link_space_to_device(struct u_space_overseer *uso, struct xrt_space *xs, struct xrt_device *xdev)
1316{
1317 pthread_rwlock_wrlock(&uso->lock);
1318
1319 void *ptr = NULL;
1320 uint64_t key = (uint64_t)(intptr_t)xdev;
1321 u_hashmap_int_find(uso->xdev_map, key, &ptr);
1322 if (ptr != NULL) {
1323 U_LOG_W("Device '%s' already have a space attached!", xdev->str);
1324 }
1325
1326 // Each xdev needs to add a reference to the space.
1327 struct xrt_space *new_space = NULL;
1328 xrt_space_reference(&new_space, xs);
1329
1330 u_hashmap_int_insert(uso->xdev_map, (uint64_t)(intptr_t)xdev, new_space);
1331
1332 pthread_rwlock_unlock(&uso->lock);
1333
1334 // Dereferrence old space outside of lock.
1335 struct xrt_space *old_space = (struct xrt_space *)ptr;
1336 xrt_space_reference(&old_space, NULL);
1337}