Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[CRYPTO] users: Fix up scatterlist conversion errors

This patch fixes the errors made in the users of the crypto layer during
the sg_init_table conversion. It also adds a few conversions that were
missing altogether.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Herbert Xu and committed by
David S. Miller
68e3f5dd a5a613a4

+93 -58
+2 -2
drivers/crypto/padlock-sha.c
··· 55 55 if (ctx(tfm)->data && ctx(tfm)->used) { 56 56 struct scatterlist sg; 57 57 58 - sg_set_buf(&sg, ctx(tfm)->data, ctx(tfm)->used); 58 + sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used); 59 59 crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length); 60 60 } 61 61 ··· 79 79 80 80 if (unlikely(ctx(tfm)->bypass)) { 81 81 struct scatterlist sg; 82 - sg_set_buf(&sg, (uint8_t *)data, length); 82 + sg_init_one(&sg, (uint8_t *)data, length); 83 83 crypto_hash_update(&ctx(tfm)->fallback, &sg, length); 84 84 return; 85 85 }
+1 -1
drivers/md/dm-crypt.c
··· 168 168 return -ENOMEM; 169 169 } 170 170 171 - sg_set_buf(&sg, cc->key, cc->key_size); 171 + sg_init_one(&sg, cc->key, cc->key_size); 172 172 desc.tfm = hash_tfm; 173 173 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 174 174 err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
+9 -1
drivers/net/ppp_mppe.c
··· 68 68 static unsigned int 69 69 setup_sg(struct scatterlist *sg, const void *address, unsigned int length) 70 70 { 71 - sg_init_one(sg, address, length); 71 + sg_set_buf(sg, address, length); 72 72 return length; 73 73 } 74 74 ··· 140 140 struct scatterlist sg[4]; 141 141 unsigned int nbytes; 142 142 143 + sg_init_table(sg, 4); 144 + 143 145 nbytes = setup_sg(&sg[0], state->master_key, state->keylen); 144 146 nbytes += setup_sg(&sg[1], sha_pad->sha_pad1, 145 147 sizeof(sha_pad->sha_pad1)); ··· 168 166 if (!initial_key) { 169 167 crypto_blkcipher_setkey(state->arc4, state->sha1_digest, 170 168 state->keylen); 169 + sg_init_table(sg_in, 1); 170 + sg_init_table(sg_out, 1); 171 171 setup_sg(sg_in, state->sha1_digest, state->keylen); 172 172 setup_sg(sg_out, state->session_key, state->keylen); 173 173 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, ··· 425 421 isize -= 2; 426 422 427 423 /* Encrypt packet */ 424 + sg_init_table(sg_in, 1); 425 + sg_init_table(sg_out, 1); 428 426 setup_sg(sg_in, ibuf, isize); 429 427 setup_sg(sg_out, obuf, osize); 430 428 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) { ··· 614 608 * Decrypt the first byte in order to check if it is 615 609 * a compressed or uncompressed protocol field. 616 610 */ 611 + sg_init_table(sg_in, 1); 612 + sg_init_table(sg_out, 1); 617 613 setup_sg(sg_in, ibuf, 1); 618 614 setup_sg(sg_out, obuf, 1); 619 615 if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) {
+2 -3
drivers/scsi/iscsi_tcp.c
··· 674 674 { 675 675 struct scatterlist temp; 676 676 677 - memcpy(&temp, sg, sizeof(struct scatterlist)); 678 - temp.offset = offset; 679 - temp.length = length; 677 + sg_init_table(&temp, 1); 678 + sg_set_page(&temp, sg_page(sg), length, offset); 680 679 crypto_hash_update(desc, &temp, length); 681 680 } 682 681
+2
fs/ecryptfs/crypto.c
··· 279 279 int offset; 280 280 int remainder_of_page; 281 281 282 + sg_init_table(sg, sg_size); 283 + 282 284 while (size > 0 && i < sg_size) { 283 285 pg = virt_to_page(addr); 284 286 offset = offset_in_page(addr);
+5 -2
net/ipv4/esp4.c
··· 111 111 goto unlock; 112 112 } 113 113 sg_init_table(sg, nfrags); 114 - skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 114 + sg_mark_end(sg, skb_to_sgvec(skb, sg, esph->enc_data + 115 + esp->conf.ivlen - 116 + skb->data, clen)); 115 117 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); 116 118 if (unlikely(sg != &esp->sgbuf[0])) 117 119 kfree(sg); ··· 205 203 goto out; 206 204 } 207 205 sg_init_table(sg, nfrags); 208 - skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); 206 + sg_mark_end(sg, skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, 207 + elen)); 209 208 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); 210 209 if (unlikely(sg != &esp->sgbuf[0])) 211 210 kfree(sg);
+6 -2
net/ipv6/esp6.c
··· 110 110 goto unlock; 111 111 } 112 112 sg_init_table(sg, nfrags); 113 - skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); 113 + sg_mark_end(sg, skb_to_sgvec(skb, sg, esph->enc_data + 114 + esp->conf.ivlen - 115 + skb->data, clen)); 114 116 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); 115 117 if (unlikely(sg != &esp->sgbuf[0])) 116 118 kfree(sg); ··· 209 207 } 210 208 } 211 209 sg_init_table(sg, nfrags); 212 - skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); 210 + sg_mark_end(sg, skb_to_sgvec(skb, sg, 211 + sizeof(*esph) + esp->conf.ivlen, 212 + elen)); 213 213 ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); 214 214 if (unlikely(sg != &esp->sgbuf[0])) 215 215 kfree(sg);
+33 -33
net/rxrpc/rxkad.c
··· 135 135 tmpbuf.x[2] = 0; 136 136 tmpbuf.x[3] = htonl(conn->security_ix); 137 137 138 - memset(sg, 0, sizeof(sg)); 139 - sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); 140 - sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); 138 + sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); 139 + sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); 141 140 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); 142 141 143 142 memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv)); ··· 179 180 desc.info = iv.x; 180 181 desc.flags = 0; 181 182 182 - memset(sg, 0, sizeof(sg)); 183 - sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); 184 - sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); 183 + sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); 184 + sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); 185 185 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); 186 186 187 187 memcpy(sechdr, &tmpbuf, sizeof(tmpbuf)); ··· 225 227 desc.info = iv.x; 226 228 desc.flags = 0; 227 229 228 - memset(sg, 0, sizeof(sg[0]) * 2); 229 - sg_set_buf(&sg[0], sechdr, sizeof(rxkhdr)); 230 - sg_set_buf(&sg[1], &rxkhdr, sizeof(rxkhdr)); 230 + sg_init_one(&sg[0], sechdr, sizeof(rxkhdr)); 231 + sg_init_one(&sg[1], &rxkhdr, sizeof(rxkhdr)); 231 232 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr)); 232 233 233 234 /* we want to encrypt the skbuff in-place */ ··· 237 240 len = data_size + call->conn->size_align - 1; 238 241 len &= ~(call->conn->size_align - 1); 239 242 240 - skb_to_sgvec(skb, sg, 0, len); 243 + sg_init_table(sg, skb_to_sgvec(skb, sg, 0, len)); 241 244 crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); 242 245 243 246 _leave(" = 0"); ··· 287 290 tmpbuf.x[0] = sp->hdr.callNumber; 288 291 tmpbuf.x[1] = x; 289 292 290 - memset(&sg, 0, sizeof(sg)); 291 - sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); 292 - sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); 293 + sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); 294 + sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); 293 295 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); 294 296 295 297 x = ntohl(tmpbuf.x[1]); ··· 328 332 struct rxrpc_skb_priv *sp; 329 333 struct blkcipher_desc desc; 330 334 struct rxrpc_crypt iv; 331 - struct scatterlist sg[2]; 335 + struct scatterlist sg[16]; 332 336 struct sk_buff *trailer; 333 337 u32 data_size, buf; 334 338 u16 check; 339 + int nsg; 335 340 336 341 _enter(""); 337 342 338 343 sp = rxrpc_skb(skb); 339 344 340 345 /* we want to decrypt the skbuff in-place */ 341 - if (skb_cow_data(skb, 0, &trailer) < 0) 346 + nsg = skb_cow_data(skb, 0, &trailer); 347 + if (nsg < 0 || nsg > 16) 342 348 goto nomem; 343 349 344 - skb_to_sgvec(skb, sg, 0, 8); 350 + sg_init_table(sg, nsg); 351 + sg_mark_end(sg, skb_to_sgvec(skb, sg, 0, 8)); 345 352 346 353 /* start the decryption afresh */ 347 354 memset(&iv, 0, sizeof(iv)); ··· 425 426 goto nomem; 426 427 } 427 428 428 - skb_to_sgvec(skb, sg, 0, skb->len); 429 + sg_init_table(sg, nsg); 430 + sg_mark_end(sg, skb_to_sgvec(skb, sg, 0, skb->len)); 429 431 430 432 /* decrypt from the session key */ 431 433 payload = call->conn->key->payload.data; ··· 521 521 tmpbuf.x[0] = call->call_id; 522 522 tmpbuf.x[1] = x; 523 523 524 - memset(&sg, 0, sizeof(sg)); 525 - sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); 526 - sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); 524 + sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); 525 + sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); 527 526 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); 528 527 529 528 x = ntohl(tmpbuf.x[1]); ··· 689 690 static void rxkad_sg_set_buf2(struct scatterlist sg[2], 690 691 void *buf, size_t buflen) 691 692 { 693 + int nsg = 1; 692 694 693 - memset(sg, 0, sizeof(sg)); 695 + sg_init_table(sg, 2); 694 696 695 697 sg_set_buf(&sg[0], buf, buflen); 696 698 if (sg[0].offset + buflen > PAGE_SIZE) { 697 699 /* the buffer was split over two pages */ 698 700 sg[0].length = PAGE_SIZE - sg[0].offset; 699 701 sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length); 702 + nsg++; 700 703 } 704 + 705 + sg_mark_end(sg, nsg); 701 706 702 707 ASSERTCMP(sg[0].length + sg[1].length, ==, buflen); 703 708 } ··· 715 712 { 716 713 struct blkcipher_desc desc; 717 714 struct rxrpc_crypt iv; 718 - struct scatterlist ssg[2], dsg[2]; 715 + struct scatterlist sg[2]; 719 716 720 717 /* continue encrypting from where we left off */ 721 718 memcpy(&iv, s2->session_key, sizeof(iv)); ··· 723 720 desc.info = iv.x; 724 721 desc.flags = 0; 725 722 726 - rxkad_sg_set_buf2(ssg, &resp->encrypted, sizeof(resp->encrypted)); 727 - memcpy(dsg, ssg, sizeof(dsg)); 728 - crypto_blkcipher_encrypt_iv(&desc, dsg, ssg, sizeof(resp->encrypted)); 723 + rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted)); 724 + crypto_blkcipher_encrypt_iv(&desc, sg, sg, sizeof(resp->encrypted)); 729 725 } 730 726 731 727 /* ··· 819 817 { 820 818 struct blkcipher_desc desc; 821 819 struct rxrpc_crypt iv, key; 822 - struct scatterlist ssg[1], dsg[1]; 820 + struct scatterlist sg[1]; 823 821 struct in_addr addr; 824 822 unsigned life; 825 823 time_t issue, now; ··· 852 850 desc.info = iv.x; 853 851 desc.flags = 0; 854 852 855 - sg_init_one(&ssg[0], ticket, ticket_len); 856 - memcpy(dsg, ssg, sizeof(dsg)); 857 - crypto_blkcipher_decrypt_iv(&desc, dsg, ssg, ticket_len); 853 + sg_init_one(&sg[0], ticket, ticket_len); 854 + crypto_blkcipher_decrypt_iv(&desc, sg, sg, ticket_len); 858 855 859 856 p = ticket; 860 857 end = p + ticket_len; ··· 962 961 const struct rxrpc_crypt *session_key) 963 962 { 964 963 struct blkcipher_desc desc; 965 - struct scatterlist ssg[2], dsg[2]; 964 + struct scatterlist sg[2]; 966 965 struct rxrpc_crypt iv; 967 966 968 967 _enter(",,%08x%08x", ··· 980 979 desc.info = iv.x; 981 980 desc.flags = 0; 982 981 983 - rxkad_sg_set_buf2(ssg, &resp->encrypted, sizeof(resp->encrypted)); 984 - memcpy(dsg, ssg, sizeof(dsg)); 985 - crypto_blkcipher_decrypt_iv(&desc, dsg, ssg, sizeof(resp->encrypted)); 982 + rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted)); 983 + crypto_blkcipher_decrypt_iv(&desc, sg, sg, sizeof(resp->encrypted)); 986 984 mutex_unlock(&rxkad_ci_mutex); 987 985 988 986 _leave("");
+1 -2
net/sctp/auth.c
··· 726 726 727 727 /* set up scatter list */ 728 728 end = skb_tail_pointer(skb); 729 - sg_init_table(&sg, 1); 730 - sg_set_buf(&sg, auth, end - (unsigned char *)auth); 729 + sg_init_one(&sg, auth, end - (unsigned char *)auth); 731 730 732 731 desc.tfm = asoc->ep->auth_hmacs[hmac_id]; 733 732 desc.flags = 0;
+2 -4
net/sctp/sm_make_chunk.c
··· 1513 1513 struct hash_desc desc; 1514 1514 1515 1515 /* Sign the message. */ 1516 - sg_init_table(&sg, 1); 1517 - sg_set_buf(&sg, &cookie->c, bodysize); 1516 + sg_init_one(&sg, &cookie->c, bodysize); 1518 1517 keylen = SCTP_SECRET_SIZE; 1519 1518 key = (char *)ep->secret_key[ep->current_key]; 1520 1519 desc.tfm = sctp_sk(ep->base.sk)->hmac; ··· 1583 1584 1584 1585 /* Check the signature. */ 1585 1586 keylen = SCTP_SECRET_SIZE; 1586 - sg_init_table(&sg, 1); 1587 - sg_set_buf(&sg, bear_cookie, bodysize); 1587 + sg_init_one(&sg, bear_cookie, bodysize); 1588 1588 key = (char *)ep->secret_key[ep->current_key]; 1589 1589 desc.tfm = sctp_sk(ep->base.sk)->hmac; 1590 1590 desc.flags = 0;
+27 -7
net/sunrpc/auth_gss/gss_krb5_crypto.c
··· 75 75 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm)); 76 76 77 77 memcpy(out, in, length); 78 - sg_set_buf(sg, out, length); 78 + sg_init_one(sg, out, length); 79 79 80 80 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); 81 81 out: ··· 110 110 memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm)); 111 111 112 112 memcpy(out, in, length); 113 - sg_set_buf(sg, out, length); 113 + sg_init_one(sg, out, length); 114 114 115 115 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); 116 116 out: ··· 146 146 err = crypto_hash_init(&desc); 147 147 if (err) 148 148 goto out; 149 - sg_set_buf(sg, header, hdrlen); 149 + sg_init_one(sg, header, hdrlen); 150 150 err = crypto_hash_update(&desc, sg, hdrlen); 151 151 if (err) 152 152 goto out; ··· 188 188 /* Worst case is 4 fragments: head, end of page 1, start 189 189 * of page 2, tail. Anything more is a bug. */ 190 190 BUG_ON(desc->fragno > 3); 191 - desc->infrags[desc->fragno] = *sg; 192 - desc->outfrags[desc->fragno] = *sg; 193 191 194 192 page_pos = desc->pos - outbuf->head[0].iov_len; 195 193 if (page_pos >= 0 && page_pos < outbuf->page_len) { ··· 197 199 } else { 198 200 in_page = sg_page(sg); 199 201 } 200 - sg_assign_page(&desc->infrags[desc->fragno], in_page); 202 + sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length, 203 + sg->offset); 204 + sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length, 205 + sg->offset); 201 206 desc->fragno++; 202 207 desc->fraglen += sg->length; 203 208 desc->pos += sg->length; ··· 211 210 if (thislen == 0) 212 211 return 0; 213 212 213 + sg_mark_end(desc->infrags, desc->fragno); 214 + sg_mark_end(desc->outfrags, desc->fragno); 215 + 214 216 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags, 215 217 desc->infrags, thislen); 216 218 if (ret) 217 219 return ret; 220 + 221 + sg_init_table(desc->infrags, 4); 222 + sg_init_table(desc->outfrags, 4); 223 + 218 224 if (fraglen) { 219 225 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, 220 226 sg->offset + sg->length - fraglen); ··· 255 247 desc.fragno = 0; 256 248 desc.fraglen = 0; 257 249 250 + sg_init_table(desc.infrags, 4); 251 + sg_init_table(desc.outfrags, 4); 252 + 258 253 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); 259 254 return ret; 260 255 } ··· 282 271 /* Worst case is 4 fragments: head, end of page 1, start 283 272 * of page 2, tail. Anything more is a bug. */ 284 273 BUG_ON(desc->fragno > 3); 285 - desc->frags[desc->fragno] = *sg; 274 + sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length, 275 + sg->offset); 286 276 desc->fragno++; 287 277 desc->fraglen += sg->length; 288 278 ··· 293 281 if (thislen == 0) 294 282 return 0; 295 283 284 + sg_mark_end(desc->frags, desc->fragno); 285 + 296 286 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags, 297 287 desc->frags, thislen); 298 288 if (ret) 299 289 return ret; 290 + 291 + sg_init_table(desc->frags, 4); 292 + 300 293 if (fraglen) { 301 294 sg_set_page(&desc->frags[0], sg_page(sg), fraglen, 302 295 sg->offset + sg->length - fraglen); ··· 329 312 desc.desc.flags = 0; 330 313 desc.fragno = 0; 331 314 desc.fraglen = 0; 315 + 316 + sg_init_table(desc.frags, 4); 317 + 332 318 return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); 333 319 } 334 320
+1 -1
net/sunrpc/auth_gss/gss_spkm3_seal.c
··· 173 173 if (err) 174 174 goto out; 175 175 176 - sg_set_buf(sg, header, hdrlen); 176 + sg_init_one(sg, header, hdrlen); 177 177 crypto_hash_update(&desc, sg, sg->length); 178 178 179 179 xdr_process_buf(body, body_offset, body->len - body_offset,
+2
net/sunrpc/xdr.c
··· 1030 1030 unsigned page_len, thislen, page_offset; 1031 1031 struct scatterlist sg[1]; 1032 1032 1033 + sg_init_table(sg, 1); 1034 + 1033 1035 if (offset >= buf->head[0].iov_len) { 1034 1036 offset -= buf->head[0].iov_len; 1035 1037 } else {