Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cifs: use ALIGN() and round_up() macros

Improve code readability by using existing macros:

Replace hardcoded alignment computations (e.g. (len + 7) & ~0x7) by
ALIGN()/IS_ALIGNED() macros.

Also replace (DIV_ROUND_UP(len, 8) * 8) with ALIGN(len, 8), which, if
not optimized by the compiler, has the overhead of a multiplication
and a division. Do the same for roundup() by replacing it by round_up()
(division-less version, but requires the multiple to be a power of 2,
which is always the case for us).

And remove some unnecessary checks where !IS_ALIGNED() would fit, but
calling round_up() directly is fine as it's a no-op if the value is
already aligned.

Signed-off-by: Enzo Matsumiya <ematsumiya@suse.de>
Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
Signed-off-by: Steve French <stfrench@microsoft.com>

authored by

Enzo Matsumiya and committed by
Steve French
d7173623 e4029e07

+33 -38
+1 -1
fs/cifs/cifssmb.c
··· 2305 2305 remap); 2306 2306 } 2307 2307 rename_info->target_name_len = cpu_to_le32(2 * len_of_str); 2308 - count = 12 /* sizeof(struct set_file_rename) */ + (2 * len_of_str); 2308 + count = sizeof(struct set_file_rename) + (2 * len_of_str); 2309 2309 byte_count += count; 2310 2310 pSMB->DataCount = cpu_to_le16(count); 2311 2311 pSMB->TotalDataCount = pSMB->DataCount;
+9 -2
fs/cifs/connect.c
··· 2832 2832 * sessinit is sent but no second negprot 2833 2833 */ 2834 2834 struct rfc1002_session_packet *ses_init_buf; 2835 + unsigned int req_noscope_len; 2835 2836 struct smb_hdr *smb_buf; 2837 + 2836 2838 ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet), 2837 2839 GFP_KERNEL); 2840 + 2838 2841 if (ses_init_buf) { 2839 2842 ses_init_buf->trailer.session_req.called_len = 32; 2840 2843 ··· 2873 2870 ses_init_buf->trailer.session_req.scope2 = 0; 2874 2871 smb_buf = (struct smb_hdr *)ses_init_buf; 2875 2872 2876 - /* sizeof RFC1002_SESSION_REQUEST with no scope */ 2877 - smb_buf->smb_buf_length = cpu_to_be32(0x81000044); 2873 + /* sizeof RFC1002_SESSION_REQUEST with no scopes */ 2874 + req_noscope_len = sizeof(struct rfc1002_session_packet) - 2; 2875 + 2876 + /* == cpu_to_be32(0x81000044) */ 2877 + smb_buf->smb_buf_length = 2878 + cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | req_noscope_len); 2878 2879 rc = smb_send(server, smb_buf, 0x44); 2879 2880 kfree(ses_init_buf); 2880 2881 /*
+6 -12
fs/cifs/sess.c
··· 601 601 /* BB FIXME add check that strings total less 602 602 than 335 or will need to send them as arrays */ 603 603 604 - /* unicode strings, must be word aligned before the call */ 605 - /* if ((long) bcc_ptr % 2) { 606 - *bcc_ptr = 0; 607 - bcc_ptr++; 608 - } */ 609 604 /* copy user */ 610 605 if (ses->user_name == NULL) { 611 606 /* null user mount */ ··· 1319 1324 } 1320 1325 1321 1326 if (ses->capabilities & CAP_UNICODE) { 1322 - if (sess_data->iov[0].iov_len % 2) { 1327 + if (!IS_ALIGNED(sess_data->iov[0].iov_len, 2)) { 1323 1328 *bcc_ptr = 0; 1324 1329 bcc_ptr++; 1325 1330 } ··· 1359 1364 /* no string area to decode, do nothing */ 1360 1365 } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { 1361 1366 /* unicode string area must be word-aligned */ 1362 - if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { 1367 + if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) { 1363 1368 ++bcc_ptr; 1364 1369 --bytes_remaining; 1365 1370 } ··· 1443 1448 1444 1449 if (ses->capabilities & CAP_UNICODE) { 1445 1450 /* unicode strings must be word aligned */ 1446 - if ((sess_data->iov[0].iov_len 1447 - + sess_data->iov[1].iov_len) % 2) { 1451 + if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) { 1448 1452 *bcc_ptr = 0; 1449 1453 bcc_ptr++; 1450 1454 } ··· 1494 1500 /* no string area to decode, do nothing */ 1495 1501 } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { 1496 1502 /* unicode string area must be word-aligned */ 1497 - if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { 1503 + if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) { 1498 1504 ++bcc_ptr; 1499 1505 --bytes_remaining; 1500 1506 } ··· 1546 1552 1547 1553 bcc_ptr = sess_data->iov[2].iov_base; 1548 1554 /* unicode strings must be word aligned */ 1549 - if ((sess_data->iov[0].iov_len + sess_data->iov[1].iov_len) % 2) { 1555 + if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) { 1550 1556 *bcc_ptr = 0; 1551 1557 bcc_ptr++; 1552 1558 } ··· 1747 1753 /* no string area to decode, do nothing */ 1748 1754 } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { 1749 1755 /* unicode string area must be word-aligned */ 1750 - if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { 1756 + if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) { 1751 1757 ++bcc_ptr; 1752 1758 --bytes_remaining; 1753 1759 }
+1 -1
fs/cifs/smb2misc.c
··· 248 248 * Some windows servers (win2016) will pad also the final 249 249 * PDU in a compound to 8 bytes. 250 250 */ 251 - if (((calc_len + 7) & ~7) == len) 251 + if (ALIGN(calc_len, 8) == len) 252 252 return 0; 253 253 254 254 /*
+16 -22
fs/cifs/smb2pdu.c
··· 466 466 /* 467 467 * Context Data length must be rounded to multiple of 8 for some servers 468 468 */ 469 - pneg_ctxt->DataLength = cpu_to_le16(DIV_ROUND_UP( 470 - sizeof(struct smb2_signing_capabilities) - 471 - sizeof(struct smb2_neg_context) + 472 - (num_algs * 2 /* sizeof u16 */), 8) * 8); 469 + pneg_ctxt->DataLength = cpu_to_le16(ALIGN(sizeof(struct smb2_signing_capabilities) - 470 + sizeof(struct smb2_neg_context) + 471 + (num_algs * sizeof(u16)), 8)); 473 472 pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs); 474 473 pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC); 475 474 476 - ctxt_len += 2 /* sizeof le16 */ * num_algs; 477 - ctxt_len = DIV_ROUND_UP(ctxt_len, 8) * 8; 475 + ctxt_len += sizeof(__le16) * num_algs; 476 + ctxt_len = ALIGN(ctxt_len, 8); 478 477 return ctxt_len; 479 478 /* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */ 480 479 } ··· 510 511 /* copy up to max of first 100 bytes of server name to NetName field */ 511 512 pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp)); 512 513 /* context size is DataLength + minimal smb2_neg_context */ 513 - return DIV_ROUND_UP(le16_to_cpu(pneg_ctxt->DataLength) + 514 - sizeof(struct smb2_neg_context), 8) * 8; 514 + return ALIGN(le16_to_cpu(pneg_ctxt->DataLength) + sizeof(struct smb2_neg_context), 8); 515 515 } 516 516 517 517 static void ··· 555 557 * round up total_len of fixed part of SMB3 negotiate request to 8 556 558 * byte boundary before adding negotiate contexts 557 559 */ 558 - *total_len = roundup(*total_len, 8); 560 + *total_len = ALIGN(*total_len, 8); 559 561 560 562 pneg_ctxt = (*total_len) + (char *)req; 561 563 req->NegotiateContextOffset = cpu_to_le32(*total_len); 562 564 563 565 build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt); 564 - ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_preauth_neg_context), 8) * 8; 566 + ctxt_len = ALIGN(sizeof(struct smb2_preauth_neg_context), 8); 565 567 *total_len += ctxt_len; 566 568 pneg_ctxt += ctxt_len; 567 569 568 570 build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt); 569 - ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_encryption_neg_context), 8) * 8; 571 + ctxt_len = ALIGN(sizeof(struct smb2_encryption_neg_context), 8); 570 572 *total_len += ctxt_len; 571 573 pneg_ctxt += ctxt_len; 572 574 ··· 593 595 if (server->compress_algorithm) { 594 596 build_compression_ctxt((struct smb2_compression_capabilities_context *) 595 597 pneg_ctxt); 596 - ctxt_len = DIV_ROUND_UP( 597 - sizeof(struct smb2_compression_capabilities_context), 598 - 8) * 8; 598 + ctxt_len = ALIGN(sizeof(struct smb2_compression_capabilities_context), 8); 599 599 *total_len += ctxt_len; 600 600 pneg_ctxt += ctxt_len; 601 601 neg_context_count++; ··· 776 780 if (rc) 777 781 break; 778 782 /* offsets must be 8 byte aligned */ 779 - clen = (clen + 7) & ~0x7; 783 + clen = ALIGN(clen, 8); 780 784 offset += clen + sizeof(struct smb2_neg_context); 781 785 len_of_ctxts -= clen; 782 786 } ··· 2422 2426 unsigned int group_offset = 0; 2423 2427 struct smb3_acl acl; 2424 2428 2425 - *len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8); 2429 + *len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8); 2426 2430 2427 2431 if (set_owner) { 2428 2432 /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */ ··· 2496 2500 memcpy(aclptr, &acl, sizeof(struct smb3_acl)); 2497 2501 2498 2502 buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd); 2499 - *len = roundup(ptr - (__u8 *)buf, 8); 2503 + *len = round_up((unsigned int)(ptr - (__u8 *)buf), 8); 2500 2504 2501 2505 return buf; 2502 2506 } ··· 2590 2594 * final path needs to be 8-byte aligned as specified in 2591 2595 * MS-SMB2 2.2.13 SMB2 CREATE Request. 2592 2596 */ 2593 - *out_size = roundup(*out_len * sizeof(__le16), 8); 2597 + *out_size = round_up(*out_len * sizeof(__le16), 8); 2594 2598 *out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL); 2595 2599 if (!*out_path) 2596 2600 return -ENOMEM; ··· 2835 2839 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2; 2836 2840 /* MUST set path len (NameLength) to 0 opening root of share */ 2837 2841 req->NameLength = cpu_to_le16(uni_path_len - 2); 2838 - copy_size = uni_path_len; 2839 - if (copy_size % 8 != 0) 2840 - copy_size = roundup(copy_size, 8); 2842 + copy_size = round_up(uni_path_len, 8); 2841 2843 copy_path = kzalloc(copy_size, GFP_KERNEL); 2842 2844 if (!copy_path) 2843 2845 return -ENOMEM; ··· 4097 4103 if (request_type & CHAINED_REQUEST) { 4098 4104 if (!(request_type & END_OF_CHAIN)) { 4099 4105 /* next 8-byte aligned request */ 4100 - *total_len = DIV_ROUND_UP(*total_len, 8) * 8; 4106 + *total_len = ALIGN(*total_len, 8); 4101 4107 shdr->NextCommand = cpu_to_le32(*total_len); 4102 4108 } else /* END_OF_CHAIN */ 4103 4109 shdr->NextCommand = 0;