Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'selftests: xsk: various simplifications'

Magnus Karlsson says:

====================

This patch set mainly contains various simplifications to the xsk
selftests. The only exception is the introduction of packet streams
that describes what the Tx process should send and what the Rx process
should receive. If it receives anything else, the test fails. This
mechanism can be used to produce tests were all packets are not
received by the Rx thread or modified in some way. An example of this
is if an XDP program does XDP_PASS on some of the packets.

This patch set will be followed by another patch set that implements a
new structure that will facilitate adding new tests. A couple of new
tests will also be included in that patch set.

v2 -> v3:

* Reworked patch 12 so that it now has functions for creating and
destroying ifobjects. Simplifies the code. [Maciej]
* The packet stream now allocates the supplied buffer array length,
instead of the default one. [Maciej]
* pkt_stream_get_pkt() now returns NULL when indexing a non-existing
packet. [Maciej]
* pkt_validate() is now is_pkt_valid(). [Maciej]
* Slowed down packet sending speed even more in patch 11 so that slow
systems do not silenty drop packets in skb mode.

v1 -> v2:

* Dropped the patch with per process limit changes as it is not needed
[Yonghong]
* Improved the commit message of patch 1 [Yonghong]
* Fixed a spelling error in patch 9

Thanks: Magnus
====================

Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

+377 -449
+3 -7
tools/testing/selftests/bpf/test_xsk.sh
··· 63 63 # ---------------- 64 64 # Must run with CAP_NET_ADMIN capability. 65 65 # 66 - # Run (full color-coded output): 67 - # sudo ./test_xsk.sh -c 66 + # Run: 67 + # sudo ./test_xsk.sh 68 68 # 69 69 # If running from kselftests: 70 - # sudo make colorconsole=1 run_tests 71 - # 72 - # Run (full output without color-coding): 73 - # sudo ./test_xsk.sh 70 + # sudo make run_tests 74 71 # 75 72 # Run with verbose output: 76 73 # sudo ./test_xsk.sh -v ··· 80 83 while getopts "cvD" flag 81 84 do 82 85 case "${flag}" in 83 - c) colorconsole=1;; 84 86 v) verbose=1;; 85 87 D) dump_pkts=1;; 86 88 esac
+346 -377
tools/testing/selftests/bpf/xdpxceiver.c
··· 70 70 #include <errno.h> 71 71 #include <getopt.h> 72 72 #include <asm/barrier.h> 73 - typedef __u16 __sum16; 74 73 #include <linux/if_link.h> 75 74 #include <linux/if_ether.h> 76 75 #include <linux/ip.h> ··· 105 106 106 107 static void __exit_with_error(int error, const char *file, const char *func, int line) 107 108 { 108 - if (configured_mode == TEST_MODE_UNCONFIGURED) { 109 - ksft_exit_fail_msg 110 - ("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error, strerror(error)); 111 - } else { 112 - ksft_test_result_fail 113 - ("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error, strerror(error)); 114 - ksft_exit_xfail(); 115 - } 109 + ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error, 110 + strerror(error)); 111 + ksft_exit_xfail(); 116 112 } 117 113 118 114 #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__) ··· 120 126 test_type == TEST_TYPE_STATS ? "Stats" : "",\ 121 127 test_type == TEST_TYPE_BPF_RES ? "BPF RES" : "")) 122 128 123 - static void *memset32_htonl(void *dest, u32 val, u32 size) 129 + static void memset32_htonl(void *dest, u32 val, u32 size) 124 130 { 125 131 u32 *ptr = (u32 *)dest; 126 132 int i; ··· 129 135 130 136 for (i = 0; i < (size & (~0x3)); i += 4) 131 137 ptr[i >> 2] = val; 132 - 133 - for (; i < size; i++) 134 - ((char *)dest)[i] = ((char *)&val)[i & 3]; 135 - 136 - return dest; 137 138 } 138 139 139 140 /* ··· 219 230 ip_hdr->check = 0; 220 231 } 221 232 222 - static void gen_udp_hdr(struct generic_data *data, struct ifobject *ifobject, 233 + static void gen_udp_hdr(u32 payload, void *pkt, struct ifobject *ifobject, 223 234 struct udphdr *udp_hdr) 224 235 { 225 236 udp_hdr->source = htons(ifobject->src_port); 226 237 udp_hdr->dest = htons(ifobject->dst_port); 227 238 udp_hdr->len = htons(UDP_PKT_SIZE); 228 - memset32_htonl(pkt_data + PKT_HDR_SIZE, htonl(data->seqnum), UDP_PKT_DATA_SIZE); 239 + memset32_htonl(pkt + PKT_HDR_SIZE, payload, UDP_PKT_DATA_SIZE); 229 240 } 230 241 231 242 static void gen_udp_csum(struct udphdr *udp_hdr, struct iphdr *ip_hdr) ··· 235 246 udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE, IPPROTO_UDP, (u16 *)udp_hdr); 236 247 } 237 248 238 - static void gen_eth_frame(struct xsk_umem_info *umem, u64 addr) 239 - { 240 - memcpy(xsk_umem__get_data(umem->buffer, addr), pkt_data, PKT_SIZE); 241 - } 242 - 243 - static void xsk_configure_umem(struct ifobject *data, void *buffer, int idx) 249 + static void xsk_configure_umem(struct ifobject *data, void *buffer, u64 size, int idx) 244 250 { 245 251 struct xsk_umem_config cfg = { 246 252 .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, ··· 244 260 .frame_headroom = frame_headroom, 245 261 .flags = XSK_UMEM__DEFAULT_FLAGS 246 262 }; 247 - int size = num_frames * XSK_UMEM__DEFAULT_FRAME_SIZE; 248 263 struct xsk_umem_info *umem; 249 264 int ret; 250 265 ··· 254 271 ret = xsk_umem__create(&umem->umem, buffer, size, 255 272 &umem->fq, &umem->cq, &cfg); 256 273 if (ret) 257 - exit_with_error(ret); 274 + exit_with_error(-ret); 258 275 259 276 umem->buffer = buffer; 260 277 ··· 268 285 269 286 ret = xsk_ring_prod__reserve(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS, &idx); 270 287 if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS) 271 - exit_with_error(ret); 288 + exit_with_error(-ret); 272 289 for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS; i++) 273 290 *xsk_ring_prod__fill_addr(&umem->fq, idx++) = i * XSK_UMEM__DEFAULT_FRAME_SIZE; 274 291 xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS); ··· 316 333 {"queue", optional_argument, 0, 'q'}, 317 334 {"dump-pkts", optional_argument, 0, 'D'}, 318 335 {"verbose", no_argument, 0, 'v'}, 319 - {"tx-pkt-count", optional_argument, 0, 'C'}, 320 336 {0, 0, 0, 0} 321 337 }; 322 338 323 339 static void usage(const char *prog) 324 340 { 325 341 const char *str = 326 - " Usage: %s [OPTIONS]\n" 327 - " Options:\n" 328 - " -i, --interface Use interface\n" 329 - " -q, --queue=n Use queue n (default 0)\n" 330 - " -D, --dump-pkts Dump packets L2 - L5\n" 331 - " -v, --verbose Verbose output\n" 332 - " -C, --tx-pkt-count=n Number of packets to send\n"; 342 + " Usage: %s [OPTIONS]\n" 343 + " Options:\n" 344 + " -i, --interface Use interface\n" 345 + " -q, --queue=n Use queue n (default 0)\n" 346 + " -D, --dump-pkts Dump packets L2 - L5\n" 347 + " -v, --verbose Verbose output\n"; 348 + 333 349 ksft_print_msg(str, prog); 334 350 } 335 351 ··· 374 392 opterr = 0; 375 393 376 394 for (;;) { 377 - c = getopt_long(argc, argv, "i:DC:v", long_options, &option_index); 395 + c = getopt_long(argc, argv, "i:Dv", long_options, &option_index); 378 396 379 397 if (c == -1) 380 398 break; ··· 395 413 interface_index++; 396 414 break; 397 415 case 'D': 398 - debug_pkt_dump = 1; 399 - break; 400 - case 'C': 401 - opt_pkt_count = atoi(optarg); 416 + opt_pkt_dump = true; 402 417 break; 403 418 case 'v': 404 - opt_verbose = 1; 419 + opt_verbose = true; 405 420 break; 406 421 default: 407 422 usage(basename(argv[0])); ··· 406 427 } 407 428 } 408 429 409 - if (!opt_pkt_count) { 410 - print_verbose("No tx-pkt-count specified, using default %u\n", DEFAULT_PKT_CNT); 411 - opt_pkt_count = DEFAULT_PKT_CNT; 412 - } 413 - 414 430 if (!validate_interfaces()) { 415 431 usage(basename(argv[0])); 416 432 ksft_exit_xfail(); 417 433 } 434 + } 435 + 436 + static struct pkt *pkt_stream_get_pkt(struct pkt_stream *pkt_stream, u32 pkt_nb) 437 + { 438 + if (pkt_nb >= pkt_stream->nb_pkts) 439 + return NULL; 440 + 441 + return &pkt_stream->pkts[pkt_nb]; 442 + } 443 + 444 + static struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len) 445 + { 446 + struct pkt_stream *pkt_stream; 447 + u32 i; 448 + 449 + pkt_stream = malloc(sizeof(*pkt_stream)); 450 + if (!pkt_stream) 451 + exit_with_error(ENOMEM); 452 + 453 + pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts)); 454 + if (!pkt_stream->pkts) 455 + exit_with_error(ENOMEM); 456 + 457 + pkt_stream->nb_pkts = nb_pkts; 458 + for (i = 0; i < nb_pkts; i++) { 459 + pkt_stream->pkts[i].addr = (i % num_frames) * XSK_UMEM__DEFAULT_FRAME_SIZE; 460 + pkt_stream->pkts[i].len = pkt_len; 461 + pkt_stream->pkts[i].payload = i; 462 + } 463 + 464 + return pkt_stream; 465 + } 466 + 467 + static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb) 468 + { 469 + struct pkt *pkt = pkt_stream_get_pkt(ifobject->pkt_stream, pkt_nb); 470 + struct udphdr *udp_hdr; 471 + struct ethhdr *eth_hdr; 472 + struct iphdr *ip_hdr; 473 + void *data; 474 + 475 + if (!pkt) 476 + return NULL; 477 + 478 + data = xsk_umem__get_data(ifobject->umem->buffer, pkt->addr); 479 + udp_hdr = (struct udphdr *)(data + sizeof(struct ethhdr) + sizeof(struct iphdr)); 480 + ip_hdr = (struct iphdr *)(data + sizeof(struct ethhdr)); 481 + eth_hdr = (struct ethhdr *)data; 482 + 483 + gen_udp_hdr(pkt_nb, data, ifobject, udp_hdr); 484 + gen_ip_hdr(ifobject, ip_hdr); 485 + gen_udp_csum(udp_hdr, ip_hdr); 486 + gen_eth_hdr(ifobject, eth_hdr); 487 + 488 + return pkt; 489 + } 490 + 491 + static void pkt_dump(void *pkt, u32 len) 492 + { 493 + char s[INET_ADDRSTRLEN]; 494 + struct ethhdr *ethhdr; 495 + struct udphdr *udphdr; 496 + struct iphdr *iphdr; 497 + int payload, i; 498 + 499 + ethhdr = pkt; 500 + iphdr = pkt + sizeof(*ethhdr); 501 + udphdr = pkt + sizeof(*ethhdr) + sizeof(*iphdr); 502 + 503 + /*extract L2 frame */ 504 + fprintf(stdout, "DEBUG>> L2: dst mac: "); 505 + for (i = 0; i < ETH_ALEN; i++) 506 + fprintf(stdout, "%02X", ethhdr->h_dest[i]); 507 + 508 + fprintf(stdout, "\nDEBUG>> L2: src mac: "); 509 + for (i = 0; i < ETH_ALEN; i++) 510 + fprintf(stdout, "%02X", ethhdr->h_source[i]); 511 + 512 + /*extract L3 frame */ 513 + fprintf(stdout, "\nDEBUG>> L3: ip_hdr->ihl: %02X\n", iphdr->ihl); 514 + fprintf(stdout, "DEBUG>> L3: ip_hdr->saddr: %s\n", 515 + inet_ntop(AF_INET, &iphdr->saddr, s, sizeof(s))); 516 + fprintf(stdout, "DEBUG>> L3: ip_hdr->daddr: %s\n", 517 + inet_ntop(AF_INET, &iphdr->daddr, s, sizeof(s))); 518 + /*extract L4 frame */ 519 + fprintf(stdout, "DEBUG>> L4: udp_hdr->src: %d\n", ntohs(udphdr->source)); 520 + fprintf(stdout, "DEBUG>> L4: udp_hdr->dst: %d\n", ntohs(udphdr->dest)); 521 + /*extract L5 frame */ 522 + payload = *((uint32_t *)(pkt + PKT_HDR_SIZE)); 523 + 524 + fprintf(stdout, "DEBUG>> L5: payload: %d\n", payload); 525 + fprintf(stdout, "---------------------------------------\n"); 526 + } 527 + 528 + static bool is_pkt_valid(struct pkt *pkt, void *buffer, const struct xdp_desc *desc) 529 + { 530 + void *data = xsk_umem__get_data(buffer, desc->addr); 531 + struct iphdr *iphdr = (struct iphdr *)(data + sizeof(struct ethhdr)); 532 + 533 + if (!pkt) { 534 + ksft_test_result_fail("ERROR: [%s] too many packets received\n", __func__); 535 + return false; 536 + } 537 + 538 + if (iphdr->version == IP_PKT_VER && iphdr->tos == IP_PKT_TOS) { 539 + u32 seqnum = ntohl(*((u32 *)(data + PKT_HDR_SIZE))); 540 + 541 + if (opt_pkt_dump && test_type != TEST_TYPE_STATS) 542 + pkt_dump(data, PKT_SIZE); 543 + 544 + if (pkt->len != desc->len) { 545 + ksft_test_result_fail 546 + ("ERROR: [%s] expected length [%d], got length [%d]\n", 547 + __func__, pkt->len, desc->len); 548 + return false; 549 + } 550 + 551 + if (pkt->payload != seqnum) { 552 + ksft_test_result_fail 553 + ("ERROR: [%s] expected seqnum [%d], got seqnum [%d]\n", 554 + __func__, pkt->payload, seqnum); 555 + return false; 556 + } 557 + } else { 558 + ksft_print_msg("Invalid frame received: "); 559 + ksft_print_msg("[IP_PKT_VER: %02X], [IP_PKT_TOS: %02X]\n", iphdr->version, 560 + iphdr->tos); 561 + return false; 562 + } 563 + 564 + return true; 418 565 } 419 566 420 567 static void kick_tx(struct xsk_socket_info *xsk) ··· 553 448 exit_with_error(errno); 554 449 } 555 450 556 - static void complete_tx_only(struct xsk_socket_info *xsk, int batch_size) 451 + static void complete_pkts(struct xsk_socket_info *xsk, int batch_size) 557 452 { 558 453 unsigned int rcvd; 559 454 u32 idx; ··· 568 463 if (rcvd) { 569 464 xsk_ring_cons__release(&xsk->umem->cq, rcvd); 570 465 xsk->outstanding_tx -= rcvd; 571 - xsk->tx_npkts += rcvd; 572 466 } 573 467 } 574 468 575 - static void rx_pkt(struct xsk_socket_info *xsk, struct pollfd *fds) 469 + static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *xsk, 470 + struct pollfd *fds) 576 471 { 577 - unsigned int rcvd, i; 578 - u32 idx_rx = 0, idx_fq = 0; 472 + u32 idx_rx = 0, idx_fq = 0, rcvd, i, pkt_count = 0; 473 + struct pkt *pkt; 579 474 int ret; 580 475 581 - rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx); 582 - if (!rcvd) { 583 - if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) { 584 - ret = poll(fds, 1, POLL_TMOUT); 585 - if (ret < 0) 586 - exit_with_error(ret); 476 + pkt = pkt_stream_get_pkt(pkt_stream, pkt_count++); 477 + while (pkt) { 478 + rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx); 479 + if (!rcvd) { 480 + if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) { 481 + ret = poll(fds, 1, POLL_TMOUT); 482 + if (ret < 0) 483 + exit_with_error(-ret); 484 + } 485 + continue; 587 486 } 588 - return; 589 - } 590 487 591 - ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); 592 - while (ret != rcvd) { 593 - if (ret < 0) 594 - exit_with_error(ret); 595 - if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) { 596 - ret = poll(fds, 1, POLL_TMOUT); 597 - if (ret < 0) 598 - exit_with_error(ret); 599 - } 600 488 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); 601 - } 602 - 603 - for (i = 0; i < rcvd; i++) { 604 - u64 addr, orig; 605 - 606 - addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr; 607 - xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++); 608 - orig = xsk_umem__extract_addr(addr); 609 - 610 - addr = xsk_umem__add_offset_to_addr(addr); 611 - pkt_node_rx = malloc(sizeof(struct pkt) + PKT_SIZE); 612 - if (!pkt_node_rx) 613 - exit_with_error(errno); 614 - 615 - pkt_node_rx->pkt_frame = malloc(PKT_SIZE); 616 - if (!pkt_node_rx->pkt_frame) 617 - exit_with_error(errno); 618 - 619 - memcpy(pkt_node_rx->pkt_frame, xsk_umem__get_data(xsk->umem->buffer, addr), 620 - PKT_SIZE); 621 - 622 - TAILQ_INSERT_HEAD(&head, pkt_node_rx, pkt_nodes); 623 - 624 - *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig; 625 - } 626 - 627 - xsk_ring_prod__submit(&xsk->umem->fq, rcvd); 628 - xsk_ring_cons__release(&xsk->rx, rcvd); 629 - xsk->rx_npkts += rcvd; 630 - } 631 - 632 - static void tx_only(struct xsk_socket_info *xsk, u32 *frameptr, int batch_size) 633 - { 634 - u32 idx = 0; 635 - unsigned int i; 636 - bool tx_invalid_test = stat_test_type == STAT_TEST_TX_INVALID; 637 - u32 len = tx_invalid_test ? XSK_UMEM__DEFAULT_FRAME_SIZE + 1 : PKT_SIZE; 638 - 639 - while (xsk_ring_prod__reserve(&xsk->tx, batch_size, &idx) < batch_size) 640 - complete_tx_only(xsk, batch_size); 641 - 642 - for (i = 0; i < batch_size; i++) { 643 - struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i); 644 - 645 - tx_desc->addr = (*frameptr + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT; 646 - tx_desc->len = len; 647 - } 648 - 649 - xsk_ring_prod__submit(&xsk->tx, batch_size); 650 - if (!tx_invalid_test) { 651 - xsk->outstanding_tx += batch_size; 652 - } else if (xsk_ring_prod__needs_wakeup(&xsk->tx)) { 653 - kick_tx(xsk); 654 - } 655 - *frameptr += batch_size; 656 - *frameptr %= num_frames; 657 - complete_tx_only(xsk, batch_size); 658 - } 659 - 660 - static int get_batch_size(int pkt_cnt) 661 - { 662 - if (!opt_pkt_count) 663 - return BATCH_SIZE; 664 - 665 - if (pkt_cnt + BATCH_SIZE <= opt_pkt_count) 666 - return BATCH_SIZE; 667 - 668 - return opt_pkt_count - pkt_cnt; 669 - } 670 - 671 - static void complete_tx_only_all(struct ifobject *ifobject) 672 - { 673 - bool pending; 674 - 675 - do { 676 - pending = false; 677 - if (ifobject->xsk->outstanding_tx) { 678 - complete_tx_only(ifobject->xsk, BATCH_SIZE); 679 - pending = !!ifobject->xsk->outstanding_tx; 489 + while (ret != rcvd) { 490 + if (ret < 0) 491 + exit_with_error(-ret); 492 + if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) { 493 + ret = poll(fds, 1, POLL_TMOUT); 494 + if (ret < 0) 495 + exit_with_error(-ret); 496 + } 497 + ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); 680 498 } 681 - } while (pending); 499 + 500 + for (i = 0; i < rcvd; i++) { 501 + const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++); 502 + u64 addr = desc->addr, orig; 503 + 504 + orig = xsk_umem__extract_addr(addr); 505 + addr = xsk_umem__add_offset_to_addr(addr); 506 + if (!is_pkt_valid(pkt, xsk->umem->buffer, desc)) 507 + return; 508 + 509 + *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig; 510 + pkt = pkt_stream_get_pkt(pkt_stream, pkt_count++); 511 + } 512 + 513 + xsk_ring_prod__submit(&xsk->umem->fq, rcvd); 514 + xsk_ring_cons__release(&xsk->rx, rcvd); 515 + } 682 516 } 683 517 684 - static void tx_only_all(struct ifobject *ifobject) 518 + static u32 __send_pkts(struct ifobject *ifobject, u32 pkt_nb) 519 + { 520 + struct xsk_socket_info *xsk = ifobject->xsk; 521 + u32 i, idx; 522 + 523 + while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) 524 + complete_pkts(xsk, BATCH_SIZE); 525 + 526 + for (i = 0; i < BATCH_SIZE; i++) { 527 + struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i); 528 + struct pkt *pkt = pkt_generate(ifobject, pkt_nb); 529 + 530 + if (!pkt) 531 + break; 532 + 533 + tx_desc->addr = pkt->addr; 534 + tx_desc->len = pkt->len; 535 + pkt_nb++; 536 + } 537 + 538 + xsk_ring_prod__submit(&xsk->tx, i); 539 + if (stat_test_type != STAT_TEST_TX_INVALID) 540 + xsk->outstanding_tx += i; 541 + else if (xsk_ring_prod__needs_wakeup(&xsk->tx)) 542 + kick_tx(xsk); 543 + complete_pkts(xsk, i); 544 + 545 + return i; 546 + } 547 + 548 + static void wait_for_tx_completion(struct xsk_socket_info *xsk) 549 + { 550 + while (xsk->outstanding_tx) 551 + complete_pkts(xsk, BATCH_SIZE); 552 + } 553 + 554 + static void send_pkts(struct ifobject *ifobject) 685 555 { 686 556 struct pollfd fds[MAX_SOCKS] = { }; 687 - u32 frame_nb = 0; 688 - int pkt_cnt = 0; 689 - int ret; 557 + u32 pkt_cnt = 0; 690 558 691 559 fds[0].fd = xsk_socket__fd(ifobject->xsk->xsk); 692 560 fds[0].events = POLLOUT; 693 561 694 - while ((opt_pkt_count && pkt_cnt < opt_pkt_count) || !opt_pkt_count) { 695 - int batch_size = get_batch_size(pkt_cnt); 562 + while (pkt_cnt < ifobject->pkt_stream->nb_pkts) { 563 + u32 sent; 696 564 697 565 if (test_type == TEST_TYPE_POLL) { 566 + int ret; 567 + 698 568 ret = poll(fds, 1, POLL_TMOUT); 699 569 if (ret <= 0) 700 570 continue; ··· 678 598 continue; 679 599 } 680 600 681 - tx_only(ifobject->xsk, &frame_nb, batch_size); 682 - pkt_cnt += batch_size; 601 + sent = __send_pkts(ifobject, pkt_cnt); 602 + pkt_cnt += sent; 603 + usleep(10); 683 604 } 684 605 685 - if (opt_pkt_count) 686 - complete_tx_only_all(ifobject); 606 + wait_for_tx_completion(ifobject->xsk); 687 607 } 688 608 689 - static void worker_pkt_dump(void) 609 + static bool rx_stats_are_valid(struct ifobject *ifobject) 690 610 { 691 - struct ethhdr *ethhdr; 692 - struct iphdr *iphdr; 693 - struct udphdr *udphdr; 694 - char s[128]; 695 - int payload; 696 - void *ptr; 697 - 698 - fprintf(stdout, "---------------------------------------\n"); 699 - for (int iter = 0; iter < num_frames - 1; iter++) { 700 - ptr = pkt_buf[iter]->payload; 701 - ethhdr = ptr; 702 - iphdr = ptr + sizeof(*ethhdr); 703 - udphdr = ptr + sizeof(*ethhdr) + sizeof(*iphdr); 704 - 705 - /*extract L2 frame */ 706 - fprintf(stdout, "DEBUG>> L2: dst mac: "); 707 - for (int i = 0; i < ETH_ALEN; i++) 708 - fprintf(stdout, "%02X", ethhdr->h_dest[i]); 709 - 710 - fprintf(stdout, "\nDEBUG>> L2: src mac: "); 711 - for (int i = 0; i < ETH_ALEN; i++) 712 - fprintf(stdout, "%02X", ethhdr->h_source[i]); 713 - 714 - /*extract L3 frame */ 715 - fprintf(stdout, "\nDEBUG>> L3: ip_hdr->ihl: %02X\n", iphdr->ihl); 716 - fprintf(stdout, "DEBUG>> L3: ip_hdr->saddr: %s\n", 717 - inet_ntop(AF_INET, &iphdr->saddr, s, sizeof(s))); 718 - fprintf(stdout, "DEBUG>> L3: ip_hdr->daddr: %s\n", 719 - inet_ntop(AF_INET, &iphdr->daddr, s, sizeof(s))); 720 - /*extract L4 frame */ 721 - fprintf(stdout, "DEBUG>> L4: udp_hdr->src: %d\n", ntohs(udphdr->source)); 722 - fprintf(stdout, "DEBUG>> L4: udp_hdr->dst: %d\n", ntohs(udphdr->dest)); 723 - /*extract L5 frame */ 724 - payload = *((uint32_t *)(ptr + PKT_HDR_SIZE)); 725 - 726 - if (payload == EOT) { 727 - print_verbose("End-of-transmission frame received\n"); 728 - fprintf(stdout, "---------------------------------------\n"); 729 - break; 730 - } 731 - fprintf(stdout, "DEBUG>> L5: payload: %d\n", payload); 732 - fprintf(stdout, "---------------------------------------\n"); 733 - } 734 - } 735 - 736 - static void worker_stats_validate(struct ifobject *ifobject) 737 - { 611 + u32 xsk_stat = 0, expected_stat = ifobject->pkt_stream->nb_pkts; 612 + struct xsk_socket *xsk = ifobject->xsk->xsk; 613 + int fd = xsk_socket__fd(xsk); 738 614 struct xdp_statistics stats; 739 615 socklen_t optlen; 740 616 int err; 741 - struct xsk_socket *xsk = stat_test_type == STAT_TEST_TX_INVALID ? 742 - ifdict[!ifobject->ifdict_index]->xsk->xsk : 743 - ifobject->xsk->xsk; 744 - int fd = xsk_socket__fd(xsk); 745 - unsigned long xsk_stat = 0, expected_stat = opt_pkt_count; 746 - 747 - sigvar = 0; 748 617 749 618 optlen = sizeof(stats); 750 619 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen); 751 - if (err) 752 - return; 620 + if (err) { 621 + ksft_test_result_fail("ERROR: [%s] getsockopt(XDP_STATISTICS) error %u %s\n", 622 + __func__, -err, strerror(-err)); 623 + return true; 624 + } 753 625 754 626 if (optlen == sizeof(struct xdp_statistics)) { 755 627 switch (stat_test_type) { ··· 709 677 xsk_stat = stats.rx_dropped; 710 678 break; 711 679 case STAT_TEST_TX_INVALID: 712 - xsk_stat = stats.tx_invalid_descs; 713 - break; 680 + return true; 714 681 case STAT_TEST_RX_FULL: 715 682 xsk_stat = stats.rx_ring_full; 716 683 expected_stat -= RX_FULL_RXQSIZE; ··· 722 691 } 723 692 724 693 if (xsk_stat == expected_stat) 725 - sigvar = 1; 694 + return true; 726 695 } 696 + 697 + return false; 727 698 } 728 699 729 - static void worker_pkt_validate(void) 700 + static void tx_stats_validate(struct ifobject *ifobject) 730 701 { 731 - u32 payloadseqnum = -2; 732 - struct iphdr *iphdr; 702 + struct xsk_socket *xsk = ifobject->xsk->xsk; 703 + int fd = xsk_socket__fd(xsk); 704 + struct xdp_statistics stats; 705 + socklen_t optlen; 706 + int err; 733 707 734 - while (1) { 735 - pkt_node_rx_q = TAILQ_LAST(&head, head_s); 736 - if (!pkt_node_rx_q) 737 - break; 738 - 739 - iphdr = (struct iphdr *)(pkt_node_rx_q->pkt_frame + sizeof(struct ethhdr)); 740 - 741 - /*do not increment pktcounter if !(tos=0x9 and ipv4) */ 742 - if (iphdr->version == IP_PKT_VER && iphdr->tos == IP_PKT_TOS) { 743 - payloadseqnum = *((uint32_t *)(pkt_node_rx_q->pkt_frame + PKT_HDR_SIZE)); 744 - if (debug_pkt_dump && payloadseqnum != EOT) { 745 - pkt_obj = malloc(sizeof(*pkt_obj)); 746 - pkt_obj->payload = malloc(PKT_SIZE); 747 - memcpy(pkt_obj->payload, pkt_node_rx_q->pkt_frame, PKT_SIZE); 748 - pkt_buf[payloadseqnum] = pkt_obj; 749 - } 750 - 751 - if (payloadseqnum == EOT) { 752 - print_verbose("End-of-transmission frame received: PASS\n"); 753 - sigvar = 1; 754 - break; 755 - } 756 - 757 - if (prev_pkt + 1 != payloadseqnum) { 758 - ksft_test_result_fail 759 - ("ERROR: [%s] prev_pkt [%d], payloadseqnum [%d]\n", 760 - __func__, prev_pkt, payloadseqnum); 761 - ksft_exit_xfail(); 762 - } 763 - 764 - prev_pkt = payloadseqnum; 765 - pkt_counter++; 766 - } else { 767 - ksft_print_msg("Invalid frame received: "); 768 - ksft_print_msg("[IP_PKT_VER: %02X], [IP_PKT_TOS: %02X]\n", iphdr->version, 769 - iphdr->tos); 770 - } 771 - 772 - TAILQ_REMOVE(&head, pkt_node_rx_q, pkt_nodes); 773 - free(pkt_node_rx_q->pkt_frame); 774 - free(pkt_node_rx_q); 775 - pkt_node_rx_q = NULL; 708 + optlen = sizeof(stats); 709 + err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen); 710 + if (err) { 711 + ksft_test_result_fail("ERROR: [%s] getsockopt(XDP_STATISTICS) error %u %s\n", 712 + __func__, -err, strerror(-err)); 713 + return; 776 714 } 715 + 716 + if (stats.tx_invalid_descs == ifobject->pkt_stream->nb_pkts) 717 + return; 718 + 719 + ksft_test_result_fail("ERROR: [%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n", 720 + __func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts); 777 721 } 778 722 779 723 static void thread_common_ops(struct ifobject *ifobject, void *bufs) 780 724 { 781 - int umem_sz = num_frames * XSK_UMEM__DEFAULT_FRAME_SIZE; 725 + u64 umem_sz = num_frames * XSK_UMEM__DEFAULT_FRAME_SIZE; 726 + int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; 727 + size_t mmap_sz = umem_sz; 782 728 int ctr = 0; 783 729 int ret; 784 730 785 731 ifobject->ns_fd = switch_namespace(ifobject->nsname); 786 732 787 733 if (test_type == TEST_TYPE_BPF_RES) 788 - umem_sz *= 2; 734 + mmap_sz *= 2; 789 735 790 - bufs = mmap(NULL, umem_sz, 791 - PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 736 + bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); 792 737 if (bufs == MAP_FAILED) 793 738 exit_with_error(errno); 794 739 795 - xsk_configure_umem(ifobject, bufs, 0); 796 - ifobject->umem = ifobject->umem_arr[0]; 797 - ret = xsk_configure_socket(ifobject, 0); 798 - 799 - /* Retry Create Socket if it fails as xsk_socket__create() 800 - * is asynchronous 801 - */ 802 - while (ret && ctr < SOCK_RECONF_CTR) { 803 - xsk_configure_umem(ifobject, bufs, 0); 740 + while (ctr++ < SOCK_RECONF_CTR) { 741 + xsk_configure_umem(ifobject, bufs, umem_sz, 0); 804 742 ifobject->umem = ifobject->umem_arr[0]; 805 743 ret = xsk_configure_socket(ifobject, 0); 806 - usleep(USLEEP_MAX); 807 - ctr++; 808 - } 744 + if (!ret) 745 + break; 809 746 810 - if (ctr >= SOCK_RECONF_CTR) 811 - exit_with_error(ret); 747 + /* Retry Create Socket if it fails as xsk_socket__create() is asynchronous */ 748 + usleep(USLEEP_MAX); 749 + if (ctr >= SOCK_RECONF_CTR) 750 + exit_with_error(-ret); 751 + } 812 752 813 753 ifobject->umem = ifobject->umem_arr[0]; 814 754 ifobject->xsk = ifobject->xsk_arr[0]; 815 755 816 756 if (test_type == TEST_TYPE_BPF_RES) { 817 - xsk_configure_umem(ifobject, (u8 *)bufs + (umem_sz / 2), 1); 757 + xsk_configure_umem(ifobject, (u8 *)bufs + umem_sz, umem_sz, 1); 818 758 ifobject->umem = ifobject->umem_arr[1]; 819 759 ret = xsk_configure_socket(ifobject, 1); 820 760 } ··· 811 809 812 810 static void *worker_testapp_validate_tx(void *arg) 813 811 { 814 - struct udphdr *udp_hdr = 815 - (struct udphdr *)(pkt_data + sizeof(struct ethhdr) + sizeof(struct iphdr)); 816 - struct iphdr *ip_hdr = (struct iphdr *)(pkt_data + sizeof(struct ethhdr)); 817 - struct ethhdr *eth_hdr = (struct ethhdr *)pkt_data; 818 812 struct ifobject *ifobject = (struct ifobject *)arg; 819 - struct generic_data data; 820 813 void *bufs = NULL; 821 814 822 815 if (!second_step) 823 816 thread_common_ops(ifobject, bufs); 824 817 825 - for (int i = 0; i < num_frames; i++) { 826 - /*send EOT frame */ 827 - if (i == (num_frames - 1)) 828 - data.seqnum = -1; 829 - else 830 - data.seqnum = i; 831 - gen_udp_hdr(&data, ifobject, udp_hdr); 832 - gen_ip_hdr(ifobject, ip_hdr); 833 - gen_udp_csum(udp_hdr, ip_hdr); 834 - gen_eth_hdr(ifobject, eth_hdr); 835 - gen_eth_frame(ifobject->umem, i * XSK_UMEM__DEFAULT_FRAME_SIZE); 836 - } 818 + print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts, 819 + ifobject->ifname); 820 + send_pkts(ifobject); 837 821 838 - print_verbose("Sending %d packets on interface %s\n", 839 - (opt_pkt_count - 1), ifobject->ifname); 840 - tx_only_all(ifobject); 822 + if (stat_test_type == STAT_TEST_TX_INVALID) 823 + tx_stats_validate(ifobject); 841 824 842 825 testapp_cleanup_xsk_res(ifobject); 843 826 pthread_exit(NULL); ··· 840 853 if (stat_test_type != STAT_TEST_RX_FILL_EMPTY) 841 854 xsk_populate_fill_ring(ifobject->umem); 842 855 843 - TAILQ_INIT(&head); 844 - if (debug_pkt_dump) { 845 - pkt_buf = calloc(num_frames, sizeof(*pkt_buf)); 846 - if (!pkt_buf) 847 - exit_with_error(errno); 848 - } 849 - 850 856 fds[0].fd = xsk_socket__fd(ifobject->xsk->xsk); 851 857 fds[0].events = POLLIN; 852 858 853 859 pthread_barrier_wait(&barr); 854 860 855 - while (1) { 856 - if (test_type != TEST_TYPE_STATS) { 857 - rx_pkt(ifobject->xsk, fds); 858 - worker_pkt_validate(); 859 - } else { 860 - worker_stats_validate(ifobject); 861 - } 862 - if (sigvar) 863 - break; 864 - } 865 - 866 - print_verbose("Received %d packets on interface %s\n", 867 - pkt_counter, ifobject->ifname); 861 + if (test_type == TEST_TYPE_STATS) 862 + while (!rx_stats_are_valid(ifobject)) 863 + continue; 864 + else 865 + receive_pkts(ifobject->pkt_stream, ifobject->xsk, fds); 868 866 869 867 if (test_type == TEST_TYPE_TEARDOWN) 870 868 print_verbose("Destroying socket\n"); ··· 862 890 { 863 891 bool bidi = test_type == TEST_TYPE_BIDI; 864 892 bool bpf = test_type == TEST_TYPE_BPF_RES; 893 + struct pkt_stream *pkt_stream; 865 894 866 895 if (pthread_barrier_init(&barr, NULL, 2)) 867 896 exit_with_error(errno); 897 + 898 + if (stat_test_type == STAT_TEST_TX_INVALID) 899 + pkt_stream = pkt_stream_generate(DEFAULT_PKT_CNT, XSK_UMEM__INVALID_FRAME_SIZE); 900 + else 901 + pkt_stream = pkt_stream_generate(DEFAULT_PKT_CNT, PKT_SIZE); 902 + ifdict_tx->pkt_stream = pkt_stream; 903 + ifdict_rx->pkt_stream = pkt_stream; 868 904 869 905 /*Spawn RX thread */ 870 906 pthread_create(&t0, NULL, ifdict_rx->func_ptr, ifdict_rx); ··· 887 907 pthread_join(t1, NULL); 888 908 pthread_join(t0, NULL); 889 909 890 - if (debug_pkt_dump && test_type != TEST_TYPE_STATS) { 891 - worker_pkt_dump(); 892 - for (int iter = 0; iter < num_frames - 1; iter++) { 893 - free(pkt_buf[iter]->payload); 894 - free(pkt_buf[iter]); 895 - } 896 - free(pkt_buf); 897 - } 898 - 899 910 if (!(test_type == TEST_TYPE_TEARDOWN) && !bidi && !bpf && !(test_type == TEST_TYPE_STATS)) 900 911 print_ksft_result(); 901 912 } ··· 896 925 int i; 897 926 898 927 for (i = 0; i < MAX_TEARDOWN_ITER; i++) { 899 - pkt_counter = 0; 900 - prev_pkt = -1; 901 - sigvar = 0; 902 928 print_verbose("Creating socket\n"); 903 929 testapp_validate(); 904 930 } ··· 921 953 static void testapp_bidi(void) 922 954 { 923 955 for (int i = 0; i < MAX_BIDI_ITER; i++) { 924 - pkt_counter = 0; 925 - prev_pkt = -1; 926 - sigvar = 0; 927 956 print_verbose("Creating socket\n"); 928 957 testapp_validate(); 929 958 if (!second_step) { ··· 952 987 int i; 953 988 954 989 for (i = 0; i < MAX_BPF_ITER; i++) { 955 - pkt_counter = 0; 956 - prev_pkt = -1; 957 - sigvar = 0; 958 990 print_verbose("Creating socket\n"); 959 991 testapp_validate(); 960 992 if (!second_step) ··· 979 1017 case STAT_TEST_RX_FULL: 980 1018 rxqsize = RX_FULL_RXQSIZE; 981 1019 break; 1020 + case STAT_TEST_TX_INVALID: 1021 + continue; 982 1022 default: 983 1023 break; 984 1024 } ··· 1026 1062 1027 1063 /* reset defaults after potential previous test */ 1028 1064 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; 1029 - pkt_counter = 0; 1030 1065 second_step = 0; 1031 - prev_pkt = -1; 1032 - sigvar = 0; 1033 1066 stat_test_type = -1; 1034 1067 rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; 1035 1068 frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM; ··· 1063 1102 } 1064 1103 } 1065 1104 1105 + static struct ifobject *ifobject_create(void) 1106 + { 1107 + struct ifobject *ifobj; 1108 + 1109 + ifobj = calloc(1, sizeof(struct ifobject)); 1110 + if (!ifobj) 1111 + return NULL; 1112 + 1113 + ifobj->xsk_arr = calloc(2, sizeof(struct xsk_socket_info *)); 1114 + if (!ifobj->xsk_arr) 1115 + goto out_xsk_arr; 1116 + 1117 + ifobj->umem_arr = calloc(2, sizeof(struct xsk_umem_info *)); 1118 + if (!ifobj->umem_arr) 1119 + goto out_umem_arr; 1120 + 1121 + return ifobj; 1122 + 1123 + out_umem_arr: 1124 + free(ifobj->xsk_arr); 1125 + out_xsk_arr: 1126 + free(ifobj); 1127 + return NULL; 1128 + } 1129 + 1130 + static void ifobject_delete(struct ifobject *ifobj) 1131 + { 1132 + free(ifobj->umem_arr); 1133 + free(ifobj->xsk_arr); 1134 + free(ifobj); 1135 + } 1136 + 1066 1137 int main(int argc, char **argv) 1067 1138 { 1068 1139 struct rlimit _rlim = { RLIM_INFINITY, RLIM_INFINITY }; 1069 - bool failure = false; 1070 1140 int i, j; 1071 1141 1072 1142 if (setrlimit(RLIMIT_MEMLOCK, &_rlim)) 1073 1143 exit_with_error(errno); 1074 1144 1075 - for (int i = 0; i < MAX_INTERFACES; i++) { 1076 - ifdict[i] = malloc(sizeof(struct ifobject)); 1145 + for (i = 0; i < MAX_INTERFACES; i++) { 1146 + ifdict[i] = ifobject_create(); 1077 1147 if (!ifdict[i]) 1078 - exit_with_error(errno); 1079 - 1080 - ifdict[i]->ifdict_index = i; 1081 - ifdict[i]->xsk_arr = calloc(2, sizeof(struct xsk_socket_info *)); 1082 - if (!ifdict[i]->xsk_arr) { 1083 - failure = true; 1084 - goto cleanup; 1085 - } 1086 - ifdict[i]->umem_arr = calloc(2, sizeof(struct xsk_umem_info *)); 1087 - if (!ifdict[i]->umem_arr) { 1088 - failure = true; 1089 - goto cleanup; 1090 - } 1148 + exit_with_error(ENOMEM); 1091 1149 } 1092 1150 1093 1151 setlocale(LC_ALL, ""); 1094 1152 1095 1153 parse_command_line(argc, argv); 1096 1154 1097 - num_frames = ++opt_pkt_count; 1098 - 1099 - init_iface(ifdict[0], MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2, tx); 1100 - init_iface(ifdict[1], MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1, rx); 1155 + init_iface(ifdict[tx], MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2, tx); 1156 + init_iface(ifdict[rx], MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1, rx); 1101 1157 1102 1158 ksft_set_plan(TEST_MODE_MAX * TEST_TYPE_MAX); 1103 1159 1104 - for (i = 0; i < TEST_MODE_MAX; i++) { 1105 - for (j = 0; j < TEST_TYPE_MAX; j++) 1160 + for (i = 0; i < TEST_MODE_MAX; i++) 1161 + for (j = 0; j < TEST_TYPE_MAX; j++) { 1106 1162 run_pkt_test(i, j); 1107 - } 1163 + usleep(USLEEP_MAX); 1164 + } 1108 1165 1109 - cleanup: 1110 - for (int i = 0; i < MAX_INTERFACES; i++) { 1111 - if (ifdict[i]->ns_fd != -1) 1112 - close(ifdict[i]->ns_fd); 1113 - free(ifdict[i]->xsk_arr); 1114 - free(ifdict[i]->umem_arr); 1115 - free(ifdict[i]); 1116 - } 1117 - 1118 - if (failure) 1119 - exit_with_error(errno); 1166 + for (i = 0; i < MAX_INTERFACES; i++) 1167 + ifobject_delete(ifdict[i]); 1120 1168 1121 1169 ksft_exit_pass(); 1122 - 1123 1170 return 0; 1124 1171 }
+21 -42
tools/testing/selftests/bpf/xdpxceiver.h
··· 34 34 #define IP_PKT_TOS 0x9 35 35 #define UDP_PKT_SIZE (IP_PKT_SIZE - sizeof(struct iphdr)) 36 36 #define UDP_PKT_DATA_SIZE (UDP_PKT_SIZE - sizeof(struct udphdr)) 37 - #define EOT (-1) 38 - #define USLEEP_MAX 200000 37 + #define USLEEP_MAX 10000 39 38 #define SOCK_RECONF_CTR 10 40 - #define BATCH_SIZE 64 39 + #define BATCH_SIZE 8 41 40 #define POLL_TMOUT 1000 42 - #define DEFAULT_PKT_CNT 10000 41 + #define DEFAULT_PKT_CNT (4 * 1024) 43 42 #define RX_FULL_RXQSIZE 32 43 + #define XSK_UMEM__INVALID_FRAME_SIZE (XSK_UMEM__DEFAULT_FRAME_SIZE + 1) 44 44 45 45 #define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0) 46 46 47 - typedef __u32 u32; 48 - typedef __u16 u16; 49 - typedef __u8 u8; 50 - 51 - enum TEST_MODES { 52 - TEST_MODE_UNCONFIGURED = -1, 47 + enum test_mode { 53 48 TEST_MODE_SKB, 54 49 TEST_MODE_DRV, 55 50 TEST_MODE_MAX 56 51 }; 57 52 58 - enum TEST_TYPES { 53 + enum test_type { 59 54 TEST_TYPE_NOPOLL, 60 55 TEST_TYPE_POLL, 61 56 TEST_TYPE_TEARDOWN, ··· 60 65 TEST_TYPE_MAX 61 66 }; 62 67 63 - enum STAT_TEST_TYPES { 68 + enum stat_test_type { 64 69 STAT_TEST_RX_DROPPED, 65 70 STAT_TEST_TX_INVALID, 66 71 STAT_TEST_RX_FULL, ··· 68 73 STAT_TEST_TYPE_MAX 69 74 }; 70 75 71 - static int configured_mode = TEST_MODE_UNCONFIGURED; 72 - static u8 debug_pkt_dump; 73 - static u32 num_frames; 76 + static int configured_mode; 77 + static bool opt_pkt_dump; 78 + static u32 num_frames = DEFAULT_PKT_CNT / 4; 74 79 static bool second_step; 75 80 static int test_type; 76 81 77 - static int opt_pkt_count; 78 - static u8 opt_verbose; 82 + static bool opt_verbose; 79 83 80 84 static u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; 81 85 static u32 xdp_bind_flags = XDP_USE_NEED_WAKEUP | XDP_COPY; 82 - static u8 pkt_data[XSK_UMEM__DEFAULT_FRAME_SIZE]; 83 - static u32 pkt_counter; 84 - static long prev_pkt = -1; 85 - static int sigvar; 86 86 static int stat_test_type; 87 87 static u32 rxqsize; 88 88 static u32 frame_headroom; ··· 94 104 struct xsk_ring_prod tx; 95 105 struct xsk_umem_info *umem; 96 106 struct xsk_socket *xsk; 97 - unsigned long rx_npkts; 98 - unsigned long tx_npkts; 99 - unsigned long prev_rx_npkts; 100 - unsigned long prev_tx_npkts; 101 107 u32 outstanding_tx; 102 108 }; 103 109 ··· 104 118 } vector; 105 119 }; 106 120 107 - struct generic_data { 108 - u32 seqnum; 121 + struct pkt { 122 + u64 addr; 123 + u32 len; 124 + u32 payload; 125 + }; 126 + 127 + struct pkt_stream { 128 + u32 nb_pkts; 129 + struct pkt *pkts; 109 130 }; 110 131 111 132 struct ifobject { ··· 124 131 struct xsk_umem_info *umem; 125 132 void *(*func_ptr)(void *arg); 126 133 struct flow_vector fv; 134 + struct pkt_stream *pkt_stream; 127 135 int ns_fd; 128 - int ifdict_index; 129 136 u32 dst_ip; 130 137 u32 src_ip; 131 138 u16 src_port; ··· 141 148 /*threads*/ 142 149 pthread_barrier_t barr; 143 150 pthread_t t0, t1; 144 - 145 - TAILQ_HEAD(head_s, pkt) head = TAILQ_HEAD_INITIALIZER(head); 146 - struct head_s *head_p; 147 - struct pkt { 148 - char *pkt_frame; 149 - 150 - TAILQ_ENTRY(pkt) pkt_nodes; 151 - } *pkt_node_rx, *pkt_node_rx_q; 152 - 153 - struct pkt_frame { 154 - char *payload; 155 - } *pkt_obj; 156 - 157 - struct pkt_frame **pkt_buf; 158 151 159 152 #endif /* XDPXCEIVER_H */
+7 -23
tools/testing/selftests/bpf/xsk_prereqs.sh
··· 8 8 ksft_xpass=3 9 9 ksft_skip=4 10 10 11 - GREEN='\033[0;92m' 12 - YELLOW='\033[0;93m' 13 - RED='\033[0;31m' 14 - NC='\033[0m' 15 - STACK_LIM=131072 16 11 SPECFILE=veth.spec 17 12 XSKOBJ=xdpxceiver 18 - NUMPKTS=10000 19 13 20 14 validate_root_exec() 21 15 { ··· 44 50 test_status() 45 51 { 46 52 statusval=$1 47 - if [ -n "${colorconsole+set}" ]; then 48 - if [ $statusval -eq 2 ]; then 49 - echo -e "${YELLOW}$2${NC}: [ ${RED}FAIL${NC} ]" 50 - elif [ $statusval -eq 1 ]; then 51 - echo -e "${YELLOW}$2${NC}: [ ${RED}SKIPPED${NC} ]" 52 - elif [ $statusval -eq 0 ]; then 53 - echo -e "${YELLOW}$2${NC}: [ ${GREEN}PASS${NC} ]" 54 - fi 55 - else 56 - if [ $statusval -eq 2 ]; then 57 - echo -e "$2: [ FAIL ]" 58 - elif [ $statusval -eq 1 ]; then 59 - echo -e "$2: [ SKIPPED ]" 60 - elif [ $statusval -eq 0 ]; then 61 - echo -e "$2: [ PASS ]" 62 - fi 53 + if [ $statusval -eq 2 ]; then 54 + echo -e "$2: [ FAIL ]" 55 + elif [ $statusval -eq 1 ]; then 56 + echo -e "$2: [ SKIPPED ]" 57 + elif [ $statusval -eq 0 ]; then 58 + echo -e "$2: [ PASS ]" 63 59 fi 64 60 } 65 61 ··· 91 107 92 108 execxdpxceiver() 93 109 { 94 - ./${XSKOBJ} -i ${VETH0} -i ${VETH1},${NS1} -C ${NUMPKTS} ${VERBOSE_ARG} ${DUMP_PKTS_ARG} 110 + ./${XSKOBJ} -i ${VETH0} -i ${VETH1},${NS1} ${VERBOSE_ARG} ${DUMP_PKTS_ARG} 95 111 }