at v5.7-rc4 11 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4#ifndef _LINUX_SKMSG_H 5#define _LINUX_SKMSG_H 6 7#include <linux/bpf.h> 8#include <linux/filter.h> 9#include <linux/scatterlist.h> 10#include <linux/skbuff.h> 11 12#include <net/sock.h> 13#include <net/tcp.h> 14#include <net/strparser.h> 15 16#define MAX_MSG_FRAGS MAX_SKB_FRAGS 17#define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1) 18 19enum __sk_action { 20 __SK_DROP = 0, 21 __SK_PASS, 22 __SK_REDIRECT, 23 __SK_NONE, 24}; 25 26struct sk_msg_sg { 27 u32 start; 28 u32 curr; 29 u32 end; 30 u32 size; 31 u32 copybreak; 32 unsigned long copy; 33 /* The extra two elements: 34 * 1) used for chaining the front and sections when the list becomes 35 * partitioned (e.g. end < start). The crypto APIs require the 36 * chaining; 37 * 2) to chain tailer SG entries after the message. 38 */ 39 struct scatterlist data[MAX_MSG_FRAGS + 2]; 40}; 41static_assert(BITS_PER_LONG >= NR_MSG_FRAG_IDS); 42 43/* UAPI in filter.c depends on struct sk_msg_sg being first element. */ 44struct sk_msg { 45 struct sk_msg_sg sg; 46 void *data; 47 void *data_end; 48 u32 apply_bytes; 49 u32 cork_bytes; 50 u32 flags; 51 struct sk_buff *skb; 52 struct sock *sk_redir; 53 struct sock *sk; 54 struct list_head list; 55}; 56 57struct sk_psock_progs { 58 struct bpf_prog *msg_parser; 59 struct bpf_prog *skb_parser; 60 struct bpf_prog *skb_verdict; 61}; 62 63enum sk_psock_state_bits { 64 SK_PSOCK_TX_ENABLED, 65}; 66 67struct sk_psock_link { 68 struct list_head list; 69 struct bpf_map *map; 70 void *link_raw; 71}; 72 73struct sk_psock_parser { 74 struct strparser strp; 75 bool enabled; 76 void (*saved_data_ready)(struct sock *sk); 77}; 78 79struct sk_psock_work_state { 80 struct sk_buff *skb; 81 u32 len; 82 u32 off; 83}; 84 85struct sk_psock { 86 struct sock *sk; 87 struct sock *sk_redir; 88 u32 apply_bytes; 89 u32 cork_bytes; 90 u32 eval; 91 struct sk_msg *cork; 92 struct sk_psock_progs progs; 93 struct sk_psock_parser parser; 94 struct sk_buff_head ingress_skb; 95 struct list_head ingress_msg; 96 unsigned long state; 97 struct list_head link; 98 spinlock_t link_lock; 99 refcount_t refcnt; 100 void (*saved_unhash)(struct sock *sk); 101 void (*saved_close)(struct sock *sk, long timeout); 102 void (*saved_write_space)(struct sock *sk); 103 struct proto *sk_proto; 104 struct sk_psock_work_state work_state; 105 struct work_struct work; 106 union { 107 struct rcu_head rcu; 108 struct work_struct gc; 109 }; 110}; 111 112int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, 113 int elem_first_coalesce); 114int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, 115 u32 off, u32 len); 116void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len); 117int sk_msg_free(struct sock *sk, struct sk_msg *msg); 118int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg); 119void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes); 120void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, 121 u32 bytes); 122 123void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes); 124void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes); 125 126int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, 127 struct sk_msg *msg, u32 bytes); 128int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, 129 struct sk_msg *msg, u32 bytes); 130 131static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes) 132{ 133 WARN_ON(i == msg->sg.end && bytes); 134} 135 136static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes) 137{ 138 if (psock->apply_bytes) { 139 if (psock->apply_bytes < bytes) 140 psock->apply_bytes = 0; 141 else 142 psock->apply_bytes -= bytes; 143 } 144} 145 146static inline u32 sk_msg_iter_dist(u32 start, u32 end) 147{ 148 return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start); 149} 150 151#define sk_msg_iter_var_prev(var) \ 152 do { \ 153 if (var == 0) \ 154 var = NR_MSG_FRAG_IDS - 1; \ 155 else \ 156 var--; \ 157 } while (0) 158 159#define sk_msg_iter_var_next(var) \ 160 do { \ 161 var++; \ 162 if (var == NR_MSG_FRAG_IDS) \ 163 var = 0; \ 164 } while (0) 165 166#define sk_msg_iter_prev(msg, which) \ 167 sk_msg_iter_var_prev(msg->sg.which) 168 169#define sk_msg_iter_next(msg, which) \ 170 sk_msg_iter_var_next(msg->sg.which) 171 172static inline void sk_msg_clear_meta(struct sk_msg *msg) 173{ 174 memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy)); 175} 176 177static inline void sk_msg_init(struct sk_msg *msg) 178{ 179 BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS); 180 memset(msg, 0, sizeof(*msg)); 181 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); 182} 183 184static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, 185 int which, u32 size) 186{ 187 dst->sg.data[which] = src->sg.data[which]; 188 dst->sg.data[which].length = size; 189 dst->sg.size += size; 190 src->sg.data[which].length -= size; 191 src->sg.data[which].offset += size; 192} 193 194static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src) 195{ 196 memcpy(dst, src, sizeof(*src)); 197 sk_msg_init(src); 198} 199 200static inline bool sk_msg_full(const struct sk_msg *msg) 201{ 202 return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS; 203} 204 205static inline u32 sk_msg_elem_used(const struct sk_msg *msg) 206{ 207 return sk_msg_iter_dist(msg->sg.start, msg->sg.end); 208} 209 210static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which) 211{ 212 return &msg->sg.data[which]; 213} 214 215static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which) 216{ 217 return msg->sg.data[which]; 218} 219 220static inline struct page *sk_msg_page(struct sk_msg *msg, int which) 221{ 222 return sg_page(sk_msg_elem(msg, which)); 223} 224 225static inline bool sk_msg_to_ingress(const struct sk_msg *msg) 226{ 227 return msg->flags & BPF_F_INGRESS; 228} 229 230static inline void sk_msg_compute_data_pointers(struct sk_msg *msg) 231{ 232 struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start); 233 234 if (test_bit(msg->sg.start, &msg->sg.copy)) { 235 msg->data = NULL; 236 msg->data_end = NULL; 237 } else { 238 msg->data = sg_virt(sge); 239 msg->data_end = msg->data + sge->length; 240 } 241} 242 243static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page, 244 u32 len, u32 offset) 245{ 246 struct scatterlist *sge; 247 248 get_page(page); 249 sge = sk_msg_elem(msg, msg->sg.end); 250 sg_set_page(sge, page, len, offset); 251 sg_unmark_end(sge); 252 253 __set_bit(msg->sg.end, &msg->sg.copy); 254 msg->sg.size += len; 255 sk_msg_iter_next(msg, end); 256} 257 258static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state) 259{ 260 do { 261 if (copy_state) 262 __set_bit(i, &msg->sg.copy); 263 else 264 __clear_bit(i, &msg->sg.copy); 265 sk_msg_iter_var_next(i); 266 if (i == msg->sg.end) 267 break; 268 } while (1); 269} 270 271static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start) 272{ 273 sk_msg_sg_copy(msg, start, true); 274} 275 276static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start) 277{ 278 sk_msg_sg_copy(msg, start, false); 279} 280 281static inline struct sk_psock *sk_psock(const struct sock *sk) 282{ 283 return rcu_dereference_sk_user_data(sk); 284} 285 286static inline void sk_psock_queue_msg(struct sk_psock *psock, 287 struct sk_msg *msg) 288{ 289 list_add_tail(&msg->list, &psock->ingress_msg); 290} 291 292static inline bool sk_psock_queue_empty(const struct sk_psock *psock) 293{ 294 return psock ? list_empty(&psock->ingress_msg) : true; 295} 296 297static inline void sk_psock_report_error(struct sk_psock *psock, int err) 298{ 299 struct sock *sk = psock->sk; 300 301 sk->sk_err = err; 302 sk->sk_error_report(sk); 303} 304 305struct sk_psock *sk_psock_init(struct sock *sk, int node); 306 307int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock); 308void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock); 309void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock); 310 311int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, 312 struct sk_msg *msg); 313 314static inline struct sk_psock_link *sk_psock_init_link(void) 315{ 316 return kzalloc(sizeof(struct sk_psock_link), 317 GFP_ATOMIC | __GFP_NOWARN); 318} 319 320static inline void sk_psock_free_link(struct sk_psock_link *link) 321{ 322 kfree(link); 323} 324 325struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock); 326 327void __sk_psock_purge_ingress_msg(struct sk_psock *psock); 328 329static inline void sk_psock_cork_free(struct sk_psock *psock) 330{ 331 if (psock->cork) { 332 sk_msg_free(psock->sk, psock->cork); 333 kfree(psock->cork); 334 psock->cork = NULL; 335 } 336} 337 338static inline void sk_psock_update_proto(struct sock *sk, 339 struct sk_psock *psock, 340 struct proto *ops) 341{ 342 /* Initialize saved callbacks and original proto only once, since this 343 * function may be called multiple times for a psock, e.g. when 344 * psock->progs.msg_parser is updated. 345 * 346 * Since we've not installed the new proto, psock is not yet in use and 347 * we can initialize it without synchronization. 348 */ 349 if (!psock->sk_proto) { 350 struct proto *orig = READ_ONCE(sk->sk_prot); 351 352 psock->saved_unhash = orig->unhash; 353 psock->saved_close = orig->close; 354 psock->saved_write_space = sk->sk_write_space; 355 356 psock->sk_proto = orig; 357 } 358 359 /* Pairs with lockless read in sk_clone_lock() */ 360 WRITE_ONCE(sk->sk_prot, ops); 361} 362 363static inline void sk_psock_restore_proto(struct sock *sk, 364 struct sk_psock *psock) 365{ 366 sk->sk_prot->unhash = psock->saved_unhash; 367 if (inet_csk_has_ulp(sk)) { 368 tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space); 369 } else { 370 sk->sk_write_space = psock->saved_write_space; 371 /* Pairs with lockless read in sk_clone_lock() */ 372 WRITE_ONCE(sk->sk_prot, psock->sk_proto); 373 } 374} 375 376static inline void sk_psock_set_state(struct sk_psock *psock, 377 enum sk_psock_state_bits bit) 378{ 379 set_bit(bit, &psock->state); 380} 381 382static inline void sk_psock_clear_state(struct sk_psock *psock, 383 enum sk_psock_state_bits bit) 384{ 385 clear_bit(bit, &psock->state); 386} 387 388static inline bool sk_psock_test_state(const struct sk_psock *psock, 389 enum sk_psock_state_bits bit) 390{ 391 return test_bit(bit, &psock->state); 392} 393 394static inline struct sk_psock *sk_psock_get(struct sock *sk) 395{ 396 struct sk_psock *psock; 397 398 rcu_read_lock(); 399 psock = sk_psock(sk); 400 if (psock && !refcount_inc_not_zero(&psock->refcnt)) 401 psock = NULL; 402 rcu_read_unlock(); 403 return psock; 404} 405 406void sk_psock_stop(struct sock *sk, struct sk_psock *psock); 407void sk_psock_destroy(struct rcu_head *rcu); 408void sk_psock_drop(struct sock *sk, struct sk_psock *psock); 409 410static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) 411{ 412 if (refcount_dec_and_test(&psock->refcnt)) 413 sk_psock_drop(sk, psock); 414} 415 416static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock) 417{ 418 if (psock->parser.enabled) 419 psock->parser.saved_data_ready(sk); 420 else 421 sk->sk_data_ready(sk); 422} 423 424static inline void psock_set_prog(struct bpf_prog **pprog, 425 struct bpf_prog *prog) 426{ 427 prog = xchg(pprog, prog); 428 if (prog) 429 bpf_prog_put(prog); 430} 431 432static inline void psock_progs_drop(struct sk_psock_progs *progs) 433{ 434 psock_set_prog(&progs->msg_parser, NULL); 435 psock_set_prog(&progs->skb_parser, NULL); 436 psock_set_prog(&progs->skb_verdict, NULL); 437} 438 439#endif /* _LINUX_SKMSG_H */