Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftest/bpf: Add test for bpf_getsockopt()

This patch removes the __bpf_getsockopt() which directly
reads the sk by using PTR_TO_BTF_ID. Instead, the test now directly
uses the kernel bpf helper bpf_getsockopt() which supports all
the required optname now.

TCP_SAVE[D]_SYN and TCP_MAXSEG are not tested in a loop for all
the hooks and sock_ops's cb. TCP_SAVE[D]_SYN only works
in passive connection. TCP_MAXSEG only works when
it is setsockopt before the connection is established and
the getsockopt return value can only be tested after
the connection is established.

Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20220902002937.2896904-1-kafai@fb.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Martin KaFai Lau and committed by
Alexei Starovoitov
f649f992 38566ec0

+43 -106
+1
tools/testing/selftests/bpf/progs/bpf_tracing_net.h
··· 38 38 #define TCP_USER_TIMEOUT 18 39 39 #define TCP_NOTSENT_LOWAT 25 40 40 #define TCP_SAVE_SYN 27 41 + #define TCP_SAVED_SYN 28 41 42 #define TCP_CA_NAME_MAX 16 42 43 #define TCP_NAGLE_OFF 1 43 44
+42 -106
tools/testing/selftests/bpf/progs/setget_sockopt.c
··· 52 52 53 53 static const struct sockopt_test sol_tcp_tests[] = { 54 54 { .opt = TCP_NODELAY, .flip = 1, }, 55 - { .opt = TCP_MAXSEG, .new = 1314, .expected = 1314, }, 56 55 { .opt = TCP_KEEPIDLE, .new = 123, .expected = 123, .restore = 321, }, 57 56 { .opt = TCP_KEEPINTVL, .new = 123, .expected = 123, .restore = 321, }, 58 57 { .opt = TCP_KEEPCNT, .new = 123, .expected = 123, .restore = 124, }, ··· 61 62 { .opt = TCP_THIN_LINEAR_TIMEOUTS, .flip = 1, }, 62 63 { .opt = TCP_USER_TIMEOUT, .new = 123400, .expected = 123400, }, 63 64 { .opt = TCP_NOTSENT_LOWAT, .new = 1314, .expected = 1314, }, 64 - { .opt = TCP_SAVE_SYN, .new = 1, .expected = 1, }, 65 65 { .opt = 0, }, 66 66 }; 67 67 ··· 80 82 struct sock *sk; 81 83 }; 82 84 83 - static int __bpf_getsockopt(void *ctx, struct sock *sk, 84 - int level, int opt, int *optval, 85 - int optlen) 86 - { 87 - if (level == SOL_SOCKET) { 88 - switch (opt) { 89 - case SO_REUSEADDR: 90 - *optval = !!BPF_CORE_READ_BITFIELD(sk, sk_reuse); 91 - break; 92 - case SO_KEEPALIVE: 93 - *optval = !!(sk->sk_flags & (1UL << 3)); 94 - break; 95 - case SO_RCVLOWAT: 96 - *optval = sk->sk_rcvlowat; 97 - break; 98 - case SO_MAX_PACING_RATE: 99 - *optval = sk->sk_max_pacing_rate; 100 - break; 101 - default: 102 - return bpf_getsockopt(ctx, level, opt, optval, optlen); 103 - } 104 - return 0; 105 - } 106 - 107 - if (level == IPPROTO_TCP) { 108 - struct tcp_sock *tp = bpf_skc_to_tcp_sock(sk); 109 - 110 - if (!tp) 111 - return -1; 112 - 113 - switch (opt) { 114 - case TCP_NODELAY: 115 - *optval = !!(BPF_CORE_READ_BITFIELD(tp, nonagle) & TCP_NAGLE_OFF); 116 - break; 117 - case TCP_MAXSEG: 118 - *optval = tp->rx_opt.user_mss; 119 - break; 120 - case TCP_KEEPIDLE: 121 - *optval = tp->keepalive_time / CONFIG_HZ; 122 - break; 123 - case TCP_SYNCNT: 124 - *optval = tp->inet_conn.icsk_syn_retries; 125 - break; 126 - case TCP_KEEPINTVL: 127 - *optval = tp->keepalive_intvl / CONFIG_HZ; 128 - break; 129 - case TCP_KEEPCNT: 130 - *optval = tp->keepalive_probes; 131 - break; 132 - case TCP_WINDOW_CLAMP: 133 - *optval = tp->window_clamp; 134 - break; 135 - case TCP_THIN_LINEAR_TIMEOUTS: 136 - *optval = !!BPF_CORE_READ_BITFIELD(tp, thin_lto); 137 - break; 138 - case TCP_USER_TIMEOUT: 139 - *optval = tp->inet_conn.icsk_user_timeout; 140 - break; 141 - case TCP_NOTSENT_LOWAT: 142 - *optval = tp->notsent_lowat; 143 - break; 144 - case TCP_SAVE_SYN: 145 - *optval = BPF_CORE_READ_BITFIELD(tp, save_syn); 146 - break; 147 - default: 148 - return bpf_getsockopt(ctx, level, opt, optval, optlen); 149 - } 150 - return 0; 151 - } 152 - 153 - if (level == IPPROTO_IPV6) { 154 - switch (opt) { 155 - case IPV6_AUTOFLOWLABEL: { 156 - __u16 proto = sk->sk_protocol; 157 - struct inet_sock *inet_sk; 158 - 159 - if (proto == IPPROTO_TCP) 160 - inet_sk = (struct inet_sock *)bpf_skc_to_tcp_sock(sk); 161 - else 162 - inet_sk = (struct inet_sock *)bpf_skc_to_udp6_sock(sk); 163 - 164 - if (!inet_sk) 165 - return -1; 166 - 167 - *optval = !!inet_sk->pinet6->autoflowlabel; 168 - break; 169 - } 170 - default: 171 - return bpf_getsockopt(ctx, level, opt, optval, optlen); 172 - } 173 - return 0; 174 - } 175 - 176 - return bpf_getsockopt(ctx, level, opt, optval, optlen); 177 - } 178 - 179 85 static int bpf_test_sockopt_flip(void *ctx, struct sock *sk, 180 86 const struct sockopt_test *t, 181 87 int level) ··· 88 186 89 187 opt = t->opt; 90 188 91 - if (__bpf_getsockopt(ctx, sk, level, opt, &old, sizeof(old))) 189 + if (bpf_getsockopt(ctx, level, opt, &old, sizeof(old))) 92 190 return 1; 93 191 /* kernel initialized txrehash to 255 */ 94 192 if (level == SOL_SOCKET && opt == SO_TXREHASH && old != 0 && old != 1) ··· 97 195 new = !old; 98 196 if (bpf_setsockopt(ctx, level, opt, &new, sizeof(new))) 99 197 return 1; 100 - if (__bpf_getsockopt(ctx, sk, level, opt, &tmp, sizeof(tmp)) || 198 + if (bpf_getsockopt(ctx, level, opt, &tmp, sizeof(tmp)) || 101 199 tmp != new) 102 200 return 1; 103 201 ··· 120 218 else 121 219 expected = t->expected; 122 220 123 - if (__bpf_getsockopt(ctx, sk, level, opt, &old, sizeof(old)) || 221 + if (bpf_getsockopt(ctx, level, opt, &old, sizeof(old)) || 124 222 old == new) 125 223 return 1; 126 224 127 225 if (bpf_setsockopt(ctx, level, opt, &new, sizeof(new))) 128 226 return 1; 129 - if (__bpf_getsockopt(ctx, sk, level, opt, &tmp, sizeof(tmp)) || 227 + if (bpf_getsockopt(ctx, level, opt, &tmp, sizeof(tmp)) || 130 228 tmp != expected) 131 229 return 1; 132 230 ··· 312 410 return 0; 313 411 } 314 412 413 + static int test_tcp_maxseg(void *ctx, struct sock *sk) 414 + { 415 + int val = 1314, tmp; 416 + 417 + if (sk->sk_state != TCP_ESTABLISHED) 418 + return bpf_setsockopt(ctx, IPPROTO_TCP, TCP_MAXSEG, 419 + &val, sizeof(val)); 420 + 421 + if (bpf_getsockopt(ctx, IPPROTO_TCP, TCP_MAXSEG, &tmp, sizeof(tmp)) || 422 + tmp > val) 423 + return -1; 424 + 425 + return 0; 426 + } 427 + 428 + static int test_tcp_saved_syn(void *ctx, struct sock *sk) 429 + { 430 + __u8 saved_syn[20]; 431 + int one = 1; 432 + 433 + if (sk->sk_state == TCP_LISTEN) 434 + return bpf_setsockopt(ctx, IPPROTO_TCP, TCP_SAVE_SYN, 435 + &one, sizeof(one)); 436 + 437 + return bpf_getsockopt(ctx, IPPROTO_TCP, TCP_SAVED_SYN, 438 + saved_syn, sizeof(saved_syn)); 439 + } 440 + 315 441 SEC("lsm_cgroup/socket_post_create") 316 442 int BPF_PROG(socket_post_create, struct socket *sock, int family, 317 443 int type, int protocol, int kern) ··· 370 440 371 441 switch (skops->op) { 372 442 case BPF_SOCK_OPS_TCP_LISTEN_CB: 373 - nr_listen += !bpf_test_sockopt(skops, sk); 443 + nr_listen += !(bpf_test_sockopt(skops, sk) || 444 + test_tcp_maxseg(skops, sk) || 445 + test_tcp_saved_syn(skops, sk)); 374 446 break; 375 447 case BPF_SOCK_OPS_TCP_CONNECT_CB: 376 - nr_connect += !bpf_test_sockopt(skops, sk); 448 + nr_connect += !(bpf_test_sockopt(skops, sk) || 449 + test_tcp_maxseg(skops, sk)); 377 450 break; 378 451 case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: 379 - nr_active += !bpf_test_sockopt(skops, sk); 452 + nr_active += !(bpf_test_sockopt(skops, sk) || 453 + test_tcp_maxseg(skops, sk)); 380 454 break; 381 455 case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: 382 - nr_passive += !bpf_test_sockopt(skops, sk); 456 + nr_passive += !(bpf_test_sockopt(skops, sk) || 457 + test_tcp_maxseg(skops, sk) || 458 + test_tcp_saved_syn(skops, sk)); 383 459 break; 384 460 } 385 461