at v6.19-rc6 8.5 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_VIRTIO_VSOCK_H 3#define _LINUX_VIRTIO_VSOCK_H 4 5#include <uapi/linux/virtio_vsock.h> 6#include <linux/socket.h> 7#include <net/sock.h> 8#include <net/af_vsock.h> 9 10#define VIRTIO_VSOCK_SKB_HEADROOM (sizeof(struct virtio_vsock_hdr)) 11 12struct virtio_vsock_skb_cb { 13 bool reply; 14 bool tap_delivered; 15 u32 offset; 16}; 17 18#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb)) 19 20static inline struct virtio_vsock_hdr *virtio_vsock_hdr(struct sk_buff *skb) 21{ 22 return (struct virtio_vsock_hdr *)skb->head; 23} 24 25static inline bool virtio_vsock_skb_reply(struct sk_buff *skb) 26{ 27 return VIRTIO_VSOCK_SKB_CB(skb)->reply; 28} 29 30static inline void virtio_vsock_skb_set_reply(struct sk_buff *skb) 31{ 32 VIRTIO_VSOCK_SKB_CB(skb)->reply = true; 33} 34 35static inline bool virtio_vsock_skb_tap_delivered(struct sk_buff *skb) 36{ 37 return VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered; 38} 39 40static inline void virtio_vsock_skb_set_tap_delivered(struct sk_buff *skb) 41{ 42 VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = true; 43} 44 45static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb) 46{ 47 VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false; 48} 49 50static inline void virtio_vsock_skb_put(struct sk_buff *skb, u32 len) 51{ 52 DEBUG_NET_WARN_ON_ONCE(skb->len); 53 54 if (skb_is_nonlinear(skb)) 55 skb->len = len; 56 else 57 skb_put(skb, len); 58} 59 60static inline struct sk_buff * 61__virtio_vsock_alloc_skb_with_frags(unsigned int header_len, 62 unsigned int data_len, 63 gfp_t mask) 64{ 65 struct sk_buff *skb; 66 int err; 67 68 skb = alloc_skb_with_frags(header_len, data_len, 69 PAGE_ALLOC_COSTLY_ORDER, &err, mask); 70 if (!skb) 71 return NULL; 72 73 skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM); 74 skb->data_len = data_len; 75 return skb; 76} 77 78static inline struct sk_buff * 79virtio_vsock_alloc_linear_skb(unsigned int size, gfp_t mask) 80{ 81 return __virtio_vsock_alloc_skb_with_frags(size, 0, mask); 82} 83 84static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask) 85{ 86 if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) 87 return virtio_vsock_alloc_linear_skb(size, mask); 88 89 size -= VIRTIO_VSOCK_SKB_HEADROOM; 90 return __virtio_vsock_alloc_skb_with_frags(VIRTIO_VSOCK_SKB_HEADROOM, 91 size, mask); 92} 93 94static inline void 95virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb) 96{ 97 spin_lock_bh(&list->lock); 98 __skb_queue_head(list, skb); 99 spin_unlock_bh(&list->lock); 100} 101 102static inline void 103virtio_vsock_skb_queue_tail(struct sk_buff_head *list, struct sk_buff *skb) 104{ 105 spin_lock_bh(&list->lock); 106 __skb_queue_tail(list, skb); 107 spin_unlock_bh(&list->lock); 108} 109 110static inline struct sk_buff *virtio_vsock_skb_dequeue(struct sk_buff_head *list) 111{ 112 struct sk_buff *skb; 113 114 spin_lock_bh(&list->lock); 115 skb = __skb_dequeue(list); 116 spin_unlock_bh(&list->lock); 117 118 return skb; 119} 120 121static inline void virtio_vsock_skb_queue_purge(struct sk_buff_head *list) 122{ 123 spin_lock_bh(&list->lock); 124 __skb_queue_purge(list); 125 spin_unlock_bh(&list->lock); 126} 127 128static inline size_t virtio_vsock_skb_len(struct sk_buff *skb) 129{ 130 return (size_t)(skb_end_pointer(skb) - skb->head); 131} 132 133/* Dimension the RX SKB so that the entire thing fits exactly into 134 * a single 4KiB page. This avoids wasting memory due to alloc_skb() 135 * rounding up to the next page order and also means that we 136 * don't leave higher-order pages sitting around in the RX queue. 137 */ 138#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE SKB_WITH_OVERHEAD(1024 * 4) 139#define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL 140#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64) 141 142enum { 143 VSOCK_VQ_RX = 0, /* for host to guest data */ 144 VSOCK_VQ_TX = 1, /* for guest to host data */ 145 VSOCK_VQ_EVENT = 2, 146 VSOCK_VQ_MAX = 3, 147}; 148 149/* Per-socket state (accessed via vsk->trans) */ 150struct virtio_vsock_sock { 151 struct vsock_sock *vsk; 152 153 spinlock_t tx_lock; 154 spinlock_t rx_lock; 155 156 /* Protected by tx_lock */ 157 u32 tx_cnt; 158 u32 peer_fwd_cnt; 159 u32 peer_buf_alloc; 160 size_t bytes_unsent; 161 162 /* Protected by rx_lock */ 163 u32 fwd_cnt; 164 u32 last_fwd_cnt; 165 u32 rx_bytes; 166 u32 buf_alloc; 167 u32 buf_used; 168 struct sk_buff_head rx_queue; 169 u32 msg_count; 170}; 171 172struct virtio_vsock_pkt_info { 173 u32 remote_cid, remote_port; 174 struct vsock_sock *vsk; 175 struct msghdr *msg; 176 u32 pkt_len; 177 u16 type; 178 u16 op; 179 u32 flags; 180 bool reply; 181}; 182 183struct virtio_transport { 184 /* This must be the first field */ 185 struct vsock_transport transport; 186 187 /* Takes ownership of the packet */ 188 int (*send_pkt)(struct sk_buff *skb); 189 190 /* Used in MSG_ZEROCOPY mode. Checks, that provided data 191 * (number of buffers) could be transmitted with zerocopy 192 * mode. If this callback is not implemented for the current 193 * transport - this means that this transport doesn't need 194 * extra checks and can perform zerocopy transmission by 195 * default. 196 */ 197 bool (*can_msgzerocopy)(int bufs_num); 198}; 199 200ssize_t 201virtio_transport_stream_dequeue(struct vsock_sock *vsk, 202 struct msghdr *msg, 203 size_t len, 204 int type); 205int 206virtio_transport_dgram_dequeue(struct vsock_sock *vsk, 207 struct msghdr *msg, 208 size_t len, int flags); 209 210int 211virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk, 212 struct msghdr *msg, 213 size_t len); 214ssize_t 215virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk, 216 struct msghdr *msg, 217 int flags); 218s64 virtio_transport_stream_has_data(struct vsock_sock *vsk); 219s64 virtio_transport_stream_has_space(struct vsock_sock *vsk); 220u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk); 221 222ssize_t virtio_transport_unsent_bytes(struct vsock_sock *vsk); 223 224void virtio_transport_consume_skb_sent(struct sk_buff *skb, 225 bool consume); 226 227int virtio_transport_do_socket_init(struct vsock_sock *vsk, 228 struct vsock_sock *psk); 229int 230virtio_transport_notify_poll_in(struct vsock_sock *vsk, 231 size_t target, 232 bool *data_ready_now); 233int 234virtio_transport_notify_poll_out(struct vsock_sock *vsk, 235 size_t target, 236 bool *space_available_now); 237 238int virtio_transport_notify_recv_init(struct vsock_sock *vsk, 239 size_t target, struct vsock_transport_recv_notify_data *data); 240int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk, 241 size_t target, struct vsock_transport_recv_notify_data *data); 242int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk, 243 size_t target, struct vsock_transport_recv_notify_data *data); 244int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk, 245 size_t target, ssize_t copied, bool data_read, 246 struct vsock_transport_recv_notify_data *data); 247int virtio_transport_notify_send_init(struct vsock_sock *vsk, 248 struct vsock_transport_send_notify_data *data); 249int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk, 250 struct vsock_transport_send_notify_data *data); 251int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk, 252 struct vsock_transport_send_notify_data *data); 253int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk, 254 ssize_t written, struct vsock_transport_send_notify_data *data); 255void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val); 256 257u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk); 258bool virtio_transport_stream_is_active(struct vsock_sock *vsk); 259bool virtio_transport_stream_allow(u32 cid, u32 port); 260int virtio_transport_dgram_bind(struct vsock_sock *vsk, 261 struct sockaddr_vm *addr); 262bool virtio_transport_dgram_allow(u32 cid, u32 port); 263 264int virtio_transport_connect(struct vsock_sock *vsk); 265 266int virtio_transport_shutdown(struct vsock_sock *vsk, int mode); 267 268void virtio_transport_release(struct vsock_sock *vsk); 269 270ssize_t 271virtio_transport_stream_enqueue(struct vsock_sock *vsk, 272 struct msghdr *msg, 273 size_t len); 274int 275virtio_transport_dgram_enqueue(struct vsock_sock *vsk, 276 struct sockaddr_vm *remote_addr, 277 struct msghdr *msg, 278 size_t len); 279 280void virtio_transport_destruct(struct vsock_sock *vsk); 281 282void virtio_transport_recv_pkt(struct virtio_transport *t, 283 struct sk_buff *skb); 284void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb); 285u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted); 286void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit); 287void virtio_transport_deliver_tap_pkt(struct sk_buff *skb); 288int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list); 289int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t read_actor); 290int virtio_transport_notify_set_rcvlowat(struct vsock_sock *vsk, int val); 291#endif /* _LINUX_VIRTIO_VSOCK_H */