Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: tcrypt - include 1420 byte blocks in aead and skcipher benchmarks

WireGuard and IPsec both typically operate on input blocks that are
~1420 bytes in size, given the default Ethernet MTU of 1500 bytes and
the overhead of the VPN metadata.

Many aead and sckipher implementations are optimized for power-of-2
block sizes, and whether they perform well when operating on 1420
byte blocks cannot be easily extrapolated from the performance on
power-of-2 block size. So let's add 1420 bytes explicitly, and round
it up to the next blocksize multiple of the algo in question if it
does not support 1420 byte blocks.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ard Biesheuvel and committed by
Herbert Xu
ad6d66bc 00ea27f1

+44 -37
+44 -37
crypto/tcrypt.c
··· 77 77 NULL 78 78 }; 79 79 80 - static u32 block_sizes[] = { 16, 64, 256, 1024, 1472, 8192, 0 }; 81 - static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 }; 80 + static const int block_sizes[] = { 16, 64, 256, 1024, 1420, 4096, 0 }; 81 + static const int aead_sizes[] = { 16, 64, 256, 512, 1024, 1420, 4096, 8192, 0 }; 82 82 83 83 #define XBUFSIZE 8 84 84 #define MAX_IVLEN 32 ··· 256 256 struct test_mb_aead_data *data; 257 257 struct crypto_aead *tfm; 258 258 unsigned int i, j, iv_len; 259 + const int *b_size; 259 260 const char *key; 260 261 const char *e; 261 262 void *assoc; 262 - u32 *b_size; 263 263 char *iv; 264 264 int ret; 265 265 ··· 337 337 do { 338 338 b_size = aead_sizes; 339 339 do { 340 - if (*b_size + authsize > XBUFSIZE * PAGE_SIZE) { 340 + int bs = round_up(*b_size, crypto_aead_blocksize(tfm)); 341 + 342 + if (bs + authsize > XBUFSIZE * PAGE_SIZE) { 341 343 pr_err("template (%u) too big for buffer (%lu)\n", 342 - authsize + *b_size, 344 + authsize + bs, 343 345 XBUFSIZE * PAGE_SIZE); 344 346 goto out; 345 347 } 346 348 347 349 pr_info("test %u (%d bit key, %d byte blocks): ", i, 348 - *keysize * 8, *b_size); 350 + *keysize * 8, bs); 349 351 350 352 /* Set up tfm global state, i.e. the key */ 351 353 ··· 382 380 memset(assoc, 0xff, aad_size); 383 381 384 382 sg_init_aead(cur->sg, cur->xbuf, 385 - *b_size + (enc ? 0 : authsize), 383 + bs + (enc ? 0 : authsize), 386 384 assoc, aad_size); 387 385 388 386 sg_init_aead(cur->sgout, cur->xoutbuf, 389 - *b_size + (enc ? authsize : 0), 387 + bs + (enc ? authsize : 0), 390 388 assoc, aad_size); 391 389 392 390 aead_request_set_ad(cur->req, aad_size); ··· 396 394 aead_request_set_crypt(cur->req, 397 395 cur->sgout, 398 396 cur->sg, 399 - *b_size, iv); 397 + bs, iv); 400 398 ret = crypto_aead_encrypt(cur->req); 401 399 ret = do_one_aead_op(cur->req, ret); 402 400 ··· 408 406 } 409 407 410 408 aead_request_set_crypt(cur->req, cur->sg, 411 - cur->sgout, *b_size + 409 + cur->sgout, bs + 412 410 (enc ? 0 : authsize), 413 411 iv); 414 412 415 413 } 416 414 417 415 if (secs) { 418 - ret = test_mb_aead_jiffies(data, enc, *b_size, 416 + ret = test_mb_aead_jiffies(data, enc, bs, 419 417 secs, num_mb); 420 418 cond_resched(); 421 419 } else { 422 - ret = test_mb_aead_cycles(data, enc, *b_size, 420 + ret = test_mb_aead_cycles(data, enc, bs, 423 421 num_mb); 424 422 } 425 423 ··· 536 534 char *xbuf[XBUFSIZE]; 537 535 char *xoutbuf[XBUFSIZE]; 538 536 char *axbuf[XBUFSIZE]; 539 - unsigned int *b_size; 537 + const int *b_size; 540 538 unsigned int iv_len; 541 539 struct crypto_wait wait; 542 540 ··· 592 590 do { 593 591 b_size = aead_sizes; 594 592 do { 593 + u32 bs = round_up(*b_size, crypto_aead_blocksize(tfm)); 594 + 595 595 assoc = axbuf[0]; 596 596 memset(assoc, 0xff, aad_size); 597 597 598 - if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) { 598 + if ((*keysize + bs) > TVMEMSIZE * PAGE_SIZE) { 599 599 pr_err("template (%u) too big for tvmem (%lu)\n", 600 - *keysize + *b_size, 600 + *keysize + bs, 601 601 TVMEMSIZE * PAGE_SIZE); 602 602 goto out; 603 603 } ··· 620 616 621 617 crypto_aead_clear_flags(tfm, ~0); 622 618 printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ", 623 - i, *keysize * 8, *b_size); 619 + i, *keysize * 8, bs); 624 620 625 621 626 622 memset(tvmem[0], 0xff, PAGE_SIZE); ··· 631 627 goto out; 632 628 } 633 629 634 - sg_init_aead(sg, xbuf, *b_size + (enc ? 0 : authsize), 630 + sg_init_aead(sg, xbuf, bs + (enc ? 0 : authsize), 635 631 assoc, aad_size); 636 632 637 633 sg_init_aead(sgout, xoutbuf, 638 - *b_size + (enc ? authsize : 0), assoc, 634 + bs + (enc ? authsize : 0), assoc, 639 635 aad_size); 640 636 641 637 aead_request_set_ad(req, aad_size); ··· 648 644 * reversed (input <-> output) to calculate it 649 645 */ 650 646 aead_request_set_crypt(req, sgout, sg, 651 - *b_size, iv); 647 + bs, iv); 652 648 ret = do_one_aead_op(req, 653 649 crypto_aead_encrypt(req)); 654 650 ··· 660 656 } 661 657 662 658 aead_request_set_crypt(req, sg, sgout, 663 - *b_size + (enc ? 0 : authsize), 659 + bs + (enc ? 0 : authsize), 664 660 iv); 665 661 666 662 if (secs) { 667 - ret = test_aead_jiffies(req, enc, *b_size, 663 + ret = test_aead_jiffies(req, enc, bs, 668 664 secs); 669 665 cond_resched(); 670 666 } else { 671 - ret = test_aead_cycles(req, enc, *b_size); 667 + ret = test_aead_cycles(req, enc, bs); 672 668 } 673 669 674 670 if (ret) { ··· 1257 1253 struct test_mb_skcipher_data *data; 1258 1254 struct crypto_skcipher *tfm; 1259 1255 unsigned int i, j, iv_len; 1256 + const int *b_size; 1260 1257 const char *key; 1261 1258 const char *e; 1262 - u32 *b_size; 1263 1259 char iv[128]; 1264 1260 int ret; 1265 1261 ··· 1320 1316 do { 1321 1317 b_size = block_sizes; 1322 1318 do { 1323 - if (*b_size > XBUFSIZE * PAGE_SIZE) { 1319 + u32 bs = round_up(*b_size, crypto_skcipher_blocksize(tfm)); 1320 + 1321 + if (bs > XBUFSIZE * PAGE_SIZE) { 1324 1322 pr_err("template (%u) too big for buffer (%lu)\n", 1325 1323 *b_size, XBUFSIZE * PAGE_SIZE); 1326 1324 goto out; 1327 1325 } 1328 1326 1329 1327 pr_info("test %u (%d bit key, %d byte blocks): ", i, 1330 - *keysize * 8, *b_size); 1328 + *keysize * 8, bs); 1331 1329 1332 1330 /* Set up tfm global state, i.e. the key */ 1333 1331 ··· 1359 1353 1360 1354 for (j = 0; j < num_mb; ++j) { 1361 1355 struct test_mb_skcipher_data *cur = &data[j]; 1362 - unsigned int k = *b_size; 1356 + unsigned int k = bs; 1363 1357 unsigned int pages = DIV_ROUND_UP(k, PAGE_SIZE); 1364 1358 unsigned int p = 0; 1365 1359 ··· 1383 1377 1384 1378 if (secs) { 1385 1379 ret = test_mb_acipher_jiffies(data, enc, 1386 - *b_size, secs, 1380 + bs, secs, 1387 1381 num_mb); 1388 1382 cond_resched(); 1389 1383 } else { 1390 1384 ret = test_mb_acipher_cycles(data, enc, 1391 - *b_size, num_mb); 1385 + bs, num_mb); 1392 1386 } 1393 1387 1394 1388 if (ret) { ··· 1503 1497 char iv[128]; 1504 1498 struct skcipher_request *req; 1505 1499 struct crypto_skcipher *tfm; 1500 + const int *b_size; 1506 1501 const char *e; 1507 - u32 *b_size; 1508 1502 1509 1503 if (enc == ENCRYPT) 1510 1504 e = "encryption"; ··· 1539 1533 b_size = block_sizes; 1540 1534 1541 1535 do { 1536 + u32 bs = round_up(*b_size, crypto_skcipher_blocksize(tfm)); 1542 1537 struct scatterlist sg[TVMEMSIZE]; 1543 1538 1544 - if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) { 1539 + if ((*keysize + bs) > TVMEMSIZE * PAGE_SIZE) { 1545 1540 pr_err("template (%u) too big for " 1546 - "tvmem (%lu)\n", *keysize + *b_size, 1541 + "tvmem (%lu)\n", *keysize + bs, 1547 1542 TVMEMSIZE * PAGE_SIZE); 1548 1543 goto out_free_req; 1549 1544 } 1550 1545 1551 1546 pr_info("test %u (%d bit key, %d byte blocks): ", i, 1552 - *keysize * 8, *b_size); 1547 + *keysize * 8, bs); 1553 1548 1554 1549 memset(tvmem[0], 0xff, PAGE_SIZE); 1555 1550 ··· 1572 1565 goto out_free_req; 1573 1566 } 1574 1567 1575 - k = *keysize + *b_size; 1568 + k = *keysize + bs; 1576 1569 sg_init_table(sg, DIV_ROUND_UP(k, PAGE_SIZE)); 1577 1570 1578 1571 if (k > PAGE_SIZE) { ··· 1589 1582 sg_set_buf(sg + j, tvmem[j], k); 1590 1583 memset(tvmem[j], 0xff, k); 1591 1584 } else { 1592 - sg_set_buf(sg, tvmem[0] + *keysize, *b_size); 1585 + sg_set_buf(sg, tvmem[0] + *keysize, bs); 1593 1586 } 1594 1587 1595 1588 iv_len = crypto_skcipher_ivsize(tfm); 1596 1589 if (iv_len) 1597 1590 memset(&iv, 0xff, iv_len); 1598 1591 1599 - skcipher_request_set_crypt(req, sg, sg, *b_size, iv); 1592 + skcipher_request_set_crypt(req, sg, sg, bs, iv); 1600 1593 1601 1594 if (secs) { 1602 1595 ret = test_acipher_jiffies(req, enc, 1603 - *b_size, secs); 1596 + bs, secs); 1604 1597 cond_resched(); 1605 1598 } else { 1606 1599 ret = test_acipher_cycles(req, enc, 1607 - *b_size); 1600 + bs); 1608 1601 } 1609 1602 1610 1603 if (ret) {