The open source OpenXR runtime
at prediction-2 222 lines 5.3 kB view raw
1// Copyright 2020-2021, Collabora, Ltd. 2// SPDX-License-Identifier: BSL-1.0 3/*! 4 * @file 5 * @brief Server mainloop details on Android. 6 * @author Rylie Pavlik <rylie.pavlik@collabora.com> 7 * @author Pete Black <pblack@collabora.com> 8 * @author Jakob Bornecrantz <jakob@collabora.com> 9 * @ingroup ipc_server 10 */ 11 12#include "xrt/xrt_config_have.h" 13#include "xrt/xrt_config_os.h" 14 15#include "os/os_time.h" 16#include "util/u_var.h" 17#include "util/u_misc.h" 18#include "util/u_debug.h" 19 20#include "server/ipc_server.h" 21#include "server/ipc_server_mainloop_android.h" 22 23#include <stdlib.h> 24#include <unistd.h> 25#include <stdbool.h> 26#include <sys/types.h> 27#include <sys/stat.h> 28#include <sys/epoll.h> 29#include <fcntl.h> 30#include <errno.h> 31#include <stdio.h> 32#include <string.h> 33#include <assert.h> 34 35#define SHUTTING_DOWN (-1) 36 37/* 38 * 39 * Static functions. 40 * 41 */ 42 43static int 44init_pipe(struct ipc_server_mainloop *ml) 45{ 46 int pipefd[2]; 47 int ret = pipe(pipefd); 48 if (ret < 0) { 49 U_LOG_E("pipe2() failed '%i'", ret); 50 return ret; 51 } 52 ml->pipe_read = pipefd[0]; 53 ml->pipe_write = pipefd[1]; 54 return 0; 55} 56 57static int 58init_epoll(struct ipc_server_mainloop *ml) 59{ 60 int ret = epoll_create1(EPOLL_CLOEXEC); 61 if (ret < 0) { 62 return ret; 63 } 64 65 pthread_mutex_init(&ml->client_push_mutex, NULL); 66 pthread_cond_init(&ml->accept_cond, NULL); 67 pthread_mutex_init(&ml->accept_mutex, NULL); 68 ml->epoll_fd = ret; 69 70 struct epoll_event ev = {0}; 71 72 73 ev.events = EPOLLIN; 74 ev.data.fd = ml->pipe_read; 75 ret = epoll_ctl(ml->epoll_fd, EPOLL_CTL_ADD, ml->pipe_read, &ev); 76 if (ret < 0) { 77 U_LOG_E("epoll_ctl(pipe_read) failed '%i'", ret); 78 return ret; 79 } 80 81 return 0; 82} 83 84 85static void 86handle_listen(struct ipc_server *vs, struct ipc_server_mainloop *ml) 87{ 88 int newfd = 0; 89 pthread_mutex_lock(&ml->accept_mutex); 90 if (read(ml->pipe_read, &newfd, sizeof(newfd)) == sizeof(newfd)) { 91 // client_push_mutex should prevent dropping acknowledgements 92 assert(ml->last_accepted_fd == 0); 93 94 // Release the thread that gave us this fd. 95 ml->last_accepted_fd = newfd; 96 97 // Call into the generic client connected handling code. 98 ipc_server_handle_client_connected(vs, newfd); 99 100 // If we are waiting to shutdown, wake that thread up. 101 pthread_cond_broadcast(&ml->accept_cond); 102 } else { 103 U_LOG_E("error on pipe read"); 104 ipc_server_handle_failure(vs); 105 return; 106 } 107 pthread_mutex_unlock(&ml->accept_mutex); 108} 109 110#define NUM_POLL_EVENTS 8 111#define NO_SLEEP 0 112 113/* 114 * 115 * Exported functions 116 * 117 */ 118void 119ipc_server_mainloop_poll(struct ipc_server *vs, struct ipc_server_mainloop *ml) 120{ 121 int epoll_fd = ml->epoll_fd; 122 123 struct epoll_event events[NUM_POLL_EVENTS] = {0}; 124 125 // No sleeping, returns immediately. 126 int ret = epoll_wait(epoll_fd, events, NUM_POLL_EVENTS, NO_SLEEP); 127 if (ret < 0) { 128 U_LOG_E("epoll_wait failed with '%i'.", ret); 129 ipc_server_handle_failure(vs); 130 return; 131 } 132 133 for (int i = 0; i < ret; i++) { 134 // Somebody new at the door. 135 if (events[i].data.fd == ml->pipe_read) { 136 handle_listen(vs, ml); 137 } 138 } 139} 140 141int 142ipc_server_mainloop_init(struct ipc_server_mainloop *ml, bool no_stdin) 143{ 144 int ret = init_pipe(ml); 145 if (ret < 0) { 146 ipc_server_mainloop_deinit(ml); 147 return ret; 148 } 149 150 ret = init_epoll(ml); 151 if (ret < 0) { 152 ipc_server_mainloop_deinit(ml); 153 return ret; 154 } 155 156 return 0; 157} 158 159void 160ipc_server_mainloop_deinit(struct ipc_server_mainloop *ml) 161{ 162 if (ml == NULL) { 163 return; 164 } 165 if (ml->pipe_read > 0) { 166 // Close pipe on exit 167 close(ml->pipe_read); 168 ml->pipe_read = -1; 169 } 170 //! @todo close pipe_write or epoll_fd? 171 172 // Tell everybody we're done and they should go away. 173 pthread_mutex_lock(&ml->accept_mutex); 174 while (ml->last_accepted_fd != 0) { 175 // Don't accidentally intervene in somebody else's message, 176 // wait until there's no unblocks pending. 177 pthread_cond_wait(&ml->accept_cond, &ml->accept_mutex); 178 } 179 ml->last_accepted_fd = SHUTTING_DOWN; 180 pthread_cond_broadcast(&ml->accept_cond); 181 pthread_mutex_unlock(&ml->accept_mutex); 182} 183 184int 185ipc_server_mainloop_add_fd(struct ipc_server *vs, struct ipc_server_mainloop *ml, int newfd) 186{ 187 // Take the client push lock here, serializing clients attempting to connect. 188 // This one won't be unlocked when waiting on the condition variable, ensuring we keep other clients out. 189 pthread_mutex_lock(&ml->client_push_mutex); 190 191 // Take the lock here, so we don't accidentally miss our fd being accepted. 192 pthread_mutex_lock(&ml->accept_mutex); 193 194 // Write our fd number: the other side of the pipe is in the same process, so passing just the number is OK. 195 int ret = write(ml->pipe_write, &newfd, sizeof(newfd)); 196 if (ret < 0) { 197 U_LOG_E("write to pipe failed with '%i'.", ret); 198 goto exit; 199 } 200 201 // Normal looping on the condition variable's condition. 202 while (ml->last_accepted_fd != newfd && ml->last_accepted_fd != SHUTTING_DOWN) { 203 ret = pthread_cond_wait(&ml->accept_cond, &ml->accept_mutex); 204 if (ret < 0) { 205 U_LOG_E("pthread_cond_wait failed with '%i'.", ret); 206 goto exit; 207 } 208 } 209 if (ml->last_accepted_fd == SHUTTING_DOWN) { 210 // we actually didn't hand off our client, we should error out. 211 U_LOG_W("server was shutting down."); 212 ret = -1; 213 } else { 214 // OK, we have now been accepted. Zero out the last accepted fd. 215 ml->last_accepted_fd = 0; 216 ret = 0; 217 } 218exit: 219 pthread_mutex_unlock(&ml->accept_mutex); 220 pthread_mutex_unlock(&ml->client_push_mutex); 221 return ret; 222}