Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.32 731 lines 16 kB view raw
1/* 2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16#include <linux/bio.h> 17#include <linux/crypto.h> 18#include <linux/dst.h> 19#include <linux/kernel.h> 20#include <linux/scatterlist.h> 21#include <linux/slab.h> 22 23/* 24 * Tricky bastard, but IV can be more complex with time... 25 */ 26static inline u64 dst_gen_iv(struct dst_trans *t) 27{ 28 return t->gen; 29} 30 31/* 32 * Crypto machinery: hash/cipher support for the given crypto controls. 33 */ 34static struct crypto_hash *dst_init_hash(struct dst_crypto_ctl *ctl, u8 *key) 35{ 36 int err; 37 struct crypto_hash *hash; 38 39 hash = crypto_alloc_hash(ctl->hash_algo, 0, CRYPTO_ALG_ASYNC); 40 if (IS_ERR(hash)) { 41 err = PTR_ERR(hash); 42 dprintk("%s: failed to allocate hash '%s', err: %d.\n", 43 __func__, ctl->hash_algo, err); 44 goto err_out_exit; 45 } 46 47 ctl->crypto_attached_size = crypto_hash_digestsize(hash); 48 49 if (!ctl->hash_keysize) 50 return hash; 51 52 err = crypto_hash_setkey(hash, key, ctl->hash_keysize); 53 if (err) { 54 dprintk("%s: failed to set key for hash '%s', err: %d.\n", 55 __func__, ctl->hash_algo, err); 56 goto err_out_free; 57 } 58 59 return hash; 60 61err_out_free: 62 crypto_free_hash(hash); 63err_out_exit: 64 return ERR_PTR(err); 65} 66 67static struct crypto_ablkcipher *dst_init_cipher(struct dst_crypto_ctl *ctl, u8 *key) 68{ 69 int err = -EINVAL; 70 struct crypto_ablkcipher *cipher; 71 72 if (!ctl->cipher_keysize) 73 goto err_out_exit; 74 75 cipher = crypto_alloc_ablkcipher(ctl->cipher_algo, 0, 0); 76 if (IS_ERR(cipher)) { 77 err = PTR_ERR(cipher); 78 dprintk("%s: failed to allocate cipher '%s', err: %d.\n", 79 __func__, ctl->cipher_algo, err); 80 goto err_out_exit; 81 } 82 83 crypto_ablkcipher_clear_flags(cipher, ~0); 84 85 err = crypto_ablkcipher_setkey(cipher, key, ctl->cipher_keysize); 86 if (err) { 87 dprintk("%s: failed to set key for cipher '%s', err: %d.\n", 88 __func__, ctl->cipher_algo, err); 89 goto err_out_free; 90 } 91 92 return cipher; 93 94err_out_free: 95 crypto_free_ablkcipher(cipher); 96err_out_exit: 97 return ERR_PTR(err); 98} 99 100/* 101 * Crypto engine has a pool of pages to encrypt data into before sending 102 * it over the network. This pool is freed/allocated here. 103 */ 104static void dst_crypto_pages_free(struct dst_crypto_engine *e) 105{ 106 unsigned int i; 107 108 for (i=0; i<e->page_num; ++i) 109 __free_page(e->pages[i]); 110 kfree(e->pages); 111} 112 113static int dst_crypto_pages_alloc(struct dst_crypto_engine *e, int num) 114{ 115 int i; 116 117 e->pages = kmalloc(num * sizeof(struct page **), GFP_KERNEL); 118 if (!e->pages) 119 return -ENOMEM; 120 121 for (i=0; i<num; ++i) { 122 e->pages[i] = alloc_page(GFP_KERNEL); 123 if (!e->pages[i]) 124 goto err_out_free_pages; 125 } 126 127 e->page_num = num; 128 return 0; 129 130err_out_free_pages: 131 while (--i >= 0) 132 __free_page(e->pages[i]); 133 134 kfree(e->pages); 135 return -ENOMEM; 136} 137 138/* 139 * Initialize crypto engine for given node. 140 * Setup cipher/hash, keys, pool of threads and private data. 141 */ 142static int dst_crypto_engine_init(struct dst_crypto_engine *e, struct dst_node *n) 143{ 144 int err; 145 struct dst_crypto_ctl *ctl = &n->crypto; 146 147 err = dst_crypto_pages_alloc(e, n->max_pages); 148 if (err) 149 goto err_out_exit; 150 151 e->size = PAGE_SIZE; 152 e->data = kmalloc(e->size, GFP_KERNEL); 153 if (!e->data) { 154 err = -ENOMEM; 155 goto err_out_free_pages; 156 } 157 158 if (ctl->hash_algo[0]) { 159 e->hash = dst_init_hash(ctl, n->hash_key); 160 if (IS_ERR(e->hash)) { 161 err = PTR_ERR(e->hash); 162 e->hash = NULL; 163 goto err_out_free; 164 } 165 } 166 167 if (ctl->cipher_algo[0]) { 168 e->cipher = dst_init_cipher(ctl, n->cipher_key); 169 if (IS_ERR(e->cipher)) { 170 err = PTR_ERR(e->cipher); 171 e->cipher = NULL; 172 goto err_out_free_hash; 173 } 174 } 175 176 return 0; 177 178err_out_free_hash: 179 crypto_free_hash(e->hash); 180err_out_free: 181 kfree(e->data); 182err_out_free_pages: 183 dst_crypto_pages_free(e); 184err_out_exit: 185 return err; 186} 187 188static void dst_crypto_engine_exit(struct dst_crypto_engine *e) 189{ 190 if (e->hash) 191 crypto_free_hash(e->hash); 192 if (e->cipher) 193 crypto_free_ablkcipher(e->cipher); 194 dst_crypto_pages_free(e); 195 kfree(e->data); 196} 197 198/* 199 * Waiting for cipher processing to be completed. 200 */ 201struct dst_crypto_completion 202{ 203 struct completion complete; 204 int error; 205}; 206 207static void dst_crypto_complete(struct crypto_async_request *req, int err) 208{ 209 struct dst_crypto_completion *c = req->data; 210 211 if (err == -EINPROGRESS) 212 return; 213 214 dprintk("%s: req: %p, err: %d.\n", __func__, req, err); 215 c->error = err; 216 complete(&c->complete); 217} 218 219static int dst_crypto_process(struct ablkcipher_request *req, 220 struct scatterlist *sg_dst, struct scatterlist *sg_src, 221 void *iv, int enc, unsigned long timeout) 222{ 223 struct dst_crypto_completion c; 224 int err; 225 226 init_completion(&c.complete); 227 c.error = -EINPROGRESS; 228 229 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 230 dst_crypto_complete, &c); 231 232 ablkcipher_request_set_crypt(req, sg_src, sg_dst, sg_src->length, iv); 233 234 if (enc) 235 err = crypto_ablkcipher_encrypt(req); 236 else 237 err = crypto_ablkcipher_decrypt(req); 238 239 switch (err) { 240 case -EINPROGRESS: 241 case -EBUSY: 242 err = wait_for_completion_interruptible_timeout(&c.complete, 243 timeout); 244 if (!err) 245 err = -ETIMEDOUT; 246 else 247 err = c.error; 248 break; 249 default: 250 break; 251 } 252 253 return err; 254} 255 256/* 257 * DST uses generic iteration approach for data crypto processing. 258 * Single block IO request is switched into array of scatterlists, 259 * which are submitted to the crypto processing iterator. 260 * 261 * Input and output iterator initialization are different, since 262 * in output case we can not encrypt data in-place and need a 263 * temporary storage, which is then being sent to the remote peer. 264 */ 265static int dst_trans_iter_out(struct bio *bio, struct dst_crypto_engine *e, 266 int (* iterator) (struct dst_crypto_engine *e, 267 struct scatterlist *dst, 268 struct scatterlist *src)) 269{ 270 struct bio_vec *bv; 271 int err, i; 272 273 sg_init_table(e->src, bio->bi_vcnt); 274 sg_init_table(e->dst, bio->bi_vcnt); 275 276 bio_for_each_segment(bv, bio, i) { 277 sg_set_page(&e->src[i], bv->bv_page, bv->bv_len, bv->bv_offset); 278 sg_set_page(&e->dst[i], e->pages[i], bv->bv_len, bv->bv_offset); 279 280 err = iterator(e, &e->dst[i], &e->src[i]); 281 if (err) 282 return err; 283 } 284 285 return 0; 286} 287 288static int dst_trans_iter_in(struct bio *bio, struct dst_crypto_engine *e, 289 int (* iterator) (struct dst_crypto_engine *e, 290 struct scatterlist *dst, 291 struct scatterlist *src)) 292{ 293 struct bio_vec *bv; 294 int err, i; 295 296 sg_init_table(e->src, bio->bi_vcnt); 297 sg_init_table(e->dst, bio->bi_vcnt); 298 299 bio_for_each_segment(bv, bio, i) { 300 sg_set_page(&e->src[i], bv->bv_page, bv->bv_len, bv->bv_offset); 301 sg_set_page(&e->dst[i], bv->bv_page, bv->bv_len, bv->bv_offset); 302 303 err = iterator(e, &e->dst[i], &e->src[i]); 304 if (err) 305 return err; 306 } 307 308 return 0; 309} 310 311static int dst_crypt_iterator(struct dst_crypto_engine *e, 312 struct scatterlist *sg_dst, struct scatterlist *sg_src) 313{ 314 struct ablkcipher_request *req = e->data; 315 u8 iv[32]; 316 317 memset(iv, 0, sizeof(iv)); 318 319 memcpy(iv, &e->iv, sizeof(e->iv)); 320 321 return dst_crypto_process(req, sg_dst, sg_src, iv, e->enc, e->timeout); 322} 323 324static int dst_crypt(struct dst_crypto_engine *e, struct bio *bio) 325{ 326 struct ablkcipher_request *req = e->data; 327 328 memset(req, 0, sizeof(struct ablkcipher_request)); 329 ablkcipher_request_set_tfm(req, e->cipher); 330 331 if (e->enc) 332 return dst_trans_iter_out(bio, e, dst_crypt_iterator); 333 else 334 return dst_trans_iter_in(bio, e, dst_crypt_iterator); 335} 336 337static int dst_hash_iterator(struct dst_crypto_engine *e, 338 struct scatterlist *sg_dst, struct scatterlist *sg_src) 339{ 340 return crypto_hash_update(e->data, sg_src, sg_src->length); 341} 342 343static int dst_hash(struct dst_crypto_engine *e, struct bio *bio, void *dst) 344{ 345 struct hash_desc *desc = e->data; 346 int err; 347 348 desc->tfm = e->hash; 349 desc->flags = 0; 350 351 err = crypto_hash_init(desc); 352 if (err) 353 return err; 354 355 err = dst_trans_iter_in(bio, e, dst_hash_iterator); 356 if (err) 357 return err; 358 359 err = crypto_hash_final(desc, dst); 360 if (err) 361 return err; 362 363 return 0; 364} 365 366/* 367 * Initialize/cleanup a crypto thread. The only thing it should 368 * do is to allocate a pool of pages as temporary storage. 369 * And to setup cipher and/or hash. 370 */ 371static void *dst_crypto_thread_init(void *data) 372{ 373 struct dst_node *n = data; 374 struct dst_crypto_engine *e; 375 int err = -ENOMEM; 376 377 e = kzalloc(sizeof(struct dst_crypto_engine), GFP_KERNEL); 378 if (!e) 379 goto err_out_exit; 380 e->src = kcalloc(2 * n->max_pages, sizeof(struct scatterlist), 381 GFP_KERNEL); 382 if (!e->src) 383 goto err_out_free; 384 385 e->dst = e->src + n->max_pages; 386 387 err = dst_crypto_engine_init(e, n); 388 if (err) 389 goto err_out_free_all; 390 391 return e; 392 393err_out_free_all: 394 kfree(e->src); 395err_out_free: 396 kfree(e); 397err_out_exit: 398 return ERR_PTR(err); 399} 400 401static void dst_crypto_thread_cleanup(void *private) 402{ 403 struct dst_crypto_engine *e = private; 404 405 dst_crypto_engine_exit(e); 406 kfree(e->src); 407 kfree(e); 408} 409 410/* 411 * Initialize crypto engine for given node: store keys, create pool 412 * of threads, initialize each one. 413 * 414 * Each thread has unique ID, but 0 and 1 are reserved for receiving and accepting 415 * threads (if export node), so IDs could start from 2, but starting them 416 * from 10 allows easily understand what this thread is for. 417 */ 418int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl) 419{ 420 void *key = (ctl + 1); 421 int err = -ENOMEM, i; 422 char name[32]; 423 424 if (ctl->hash_keysize) { 425 n->hash_key = kmalloc(ctl->hash_keysize, GFP_KERNEL); 426 if (!n->hash_key) 427 goto err_out_exit; 428 memcpy(n->hash_key, key, ctl->hash_keysize); 429 } 430 431 if (ctl->cipher_keysize) { 432 n->cipher_key = kmalloc(ctl->cipher_keysize, GFP_KERNEL); 433 if (!n->cipher_key) 434 goto err_out_free_hash; 435 memcpy(n->cipher_key, key, ctl->cipher_keysize); 436 } 437 memcpy(&n->crypto, ctl, sizeof(struct dst_crypto_ctl)); 438 439 for (i=0; i<ctl->thread_num; ++i) { 440 snprintf(name, sizeof(name), "%s-crypto-%d", n->name, i); 441 /* Unique ids... */ 442 err = thread_pool_add_worker(n->pool, name, i+10, 443 dst_crypto_thread_init, dst_crypto_thread_cleanup, n); 444 if (err) 445 goto err_out_free_threads; 446 } 447 448 return 0; 449 450err_out_free_threads: 451 while (--i >= 0) 452 thread_pool_del_worker_id(n->pool, i+10); 453 454 if (ctl->cipher_keysize) 455 kfree(n->cipher_key); 456 ctl->cipher_keysize = 0; 457err_out_free_hash: 458 if (ctl->hash_keysize) 459 kfree(n->hash_key); 460 ctl->hash_keysize = 0; 461err_out_exit: 462 return err; 463} 464 465void dst_node_crypto_exit(struct dst_node *n) 466{ 467 struct dst_crypto_ctl *ctl = &n->crypto; 468 469 if (ctl->cipher_algo[0] || ctl->hash_algo[0]) { 470 kfree(n->hash_key); 471 kfree(n->cipher_key); 472 } 473} 474 475/* 476 * Thrad pool setup callback. Just stores a transaction in private data. 477 */ 478static int dst_trans_crypto_setup(void *crypto_engine, void *trans) 479{ 480 struct dst_crypto_engine *e = crypto_engine; 481 482 e->private = trans; 483 return 0; 484} 485 486#if 0 487static void dst_dump_bio(struct bio *bio) 488{ 489 u8 *p; 490 struct bio_vec *bv; 491 int i; 492 493 bio_for_each_segment(bv, bio, i) { 494 dprintk("%s: %llu/%u: size: %u, offset: %u, data: ", 495 __func__, bio->bi_sector, bio->bi_size, 496 bv->bv_len, bv->bv_offset); 497 498 p = kmap(bv->bv_page) + bv->bv_offset; 499 for (i=0; i<bv->bv_len; ++i) 500 printk("%02x ", p[i]); 501 kunmap(bv->bv_page); 502 printk("\n"); 503 } 504} 505#endif 506 507/* 508 * Encrypt/hash data and send it to the network. 509 */ 510static int dst_crypto_process_sending(struct dst_crypto_engine *e, 511 struct bio *bio, u8 *hash) 512{ 513 int err; 514 515 if (e->cipher) { 516 err = dst_crypt(e, bio); 517 if (err) 518 goto err_out_exit; 519 } 520 521 if (e->hash) { 522 err = dst_hash(e, bio, hash); 523 if (err) 524 goto err_out_exit; 525 526#ifdef CONFIG_DST_DEBUG 527 { 528 unsigned int i; 529 530 /* dst_dump_bio(bio); */ 531 532 printk(KERN_DEBUG "%s: bio: %llu/%u, rw: %lu, hash: ", 533 __func__, (u64)bio->bi_sector, 534 bio->bi_size, bio_data_dir(bio)); 535 for (i=0; i<crypto_hash_digestsize(e->hash); ++i) 536 printk("%02x ", hash[i]); 537 printk("\n"); 538 } 539#endif 540 } 541 542 return 0; 543 544err_out_exit: 545 return err; 546} 547 548/* 549 * Check if received data is valid. Decipher if it is. 550 */ 551static int dst_crypto_process_receiving(struct dst_crypto_engine *e, 552 struct bio *bio, u8 *hash, u8 *recv_hash) 553{ 554 int err; 555 556 if (e->hash) { 557 int mismatch; 558 559 err = dst_hash(e, bio, hash); 560 if (err) 561 goto err_out_exit; 562 563 mismatch = !!memcmp(recv_hash, hash, 564 crypto_hash_digestsize(e->hash)); 565#ifdef CONFIG_DST_DEBUG 566 /* dst_dump_bio(bio); */ 567 568 printk(KERN_DEBUG "%s: bio: %llu/%u, rw: %lu, hash mismatch: %d", 569 __func__, (u64)bio->bi_sector, bio->bi_size, 570 bio_data_dir(bio), mismatch); 571 if (mismatch) { 572 unsigned int i; 573 574 printk(", recv/calc: "); 575 for (i=0; i<crypto_hash_digestsize(e->hash); ++i) { 576 printk("%02x/%02x ", recv_hash[i], hash[i]); 577 } 578 } 579 printk("\n"); 580#endif 581 err = -1; 582 if (mismatch) 583 goto err_out_exit; 584 } 585 586 if (e->cipher) { 587 err = dst_crypt(e, bio); 588 if (err) 589 goto err_out_exit; 590 } 591 592 return 0; 593 594err_out_exit: 595 return err; 596} 597 598/* 599 * Thread pool callback to encrypt data and send it to the netowork. 600 */ 601static int dst_trans_crypto_action(void *crypto_engine, void *schedule_data) 602{ 603 struct dst_crypto_engine *e = crypto_engine; 604 struct dst_trans *t = schedule_data; 605 struct bio *bio = t->bio; 606 int err; 607 608 dprintk("%s: t: %p, gen: %llu, cipher: %p, hash: %p.\n", 609 __func__, t, t->gen, e->cipher, e->hash); 610 611 e->enc = t->enc; 612 e->iv = dst_gen_iv(t); 613 614 if (bio_data_dir(bio) == WRITE) { 615 err = dst_crypto_process_sending(e, bio, t->cmd.hash); 616 if (err) 617 goto err_out_exit; 618 619 if (e->hash) { 620 t->cmd.csize = crypto_hash_digestsize(e->hash); 621 t->cmd.size += t->cmd.csize; 622 } 623 624 return dst_trans_send(t); 625 } else { 626 u8 *hash = e->data + e->size/2; 627 628 err = dst_crypto_process_receiving(e, bio, hash, t->cmd.hash); 629 if (err) 630 goto err_out_exit; 631 632 dst_trans_remove(t); 633 dst_trans_put(t); 634 } 635 636 return 0; 637 638err_out_exit: 639 t->error = err; 640 dst_trans_put(t); 641 return err; 642} 643 644/* 645 * Schedule crypto processing for given transaction. 646 */ 647int dst_trans_crypto(struct dst_trans *t) 648{ 649 struct dst_node *n = t->n; 650 int err; 651 652 err = thread_pool_schedule(n->pool, 653 dst_trans_crypto_setup, dst_trans_crypto_action, 654 t, MAX_SCHEDULE_TIMEOUT); 655 if (err) 656 goto err_out_exit; 657 658 return 0; 659 660err_out_exit: 661 dst_trans_put(t); 662 return err; 663} 664 665/* 666 * Crypto machinery for the export node. 667 */ 668static int dst_export_crypto_setup(void *crypto_engine, void *bio) 669{ 670 struct dst_crypto_engine *e = crypto_engine; 671 672 e->private = bio; 673 return 0; 674} 675 676static int dst_export_crypto_action(void *crypto_engine, void *schedule_data) 677{ 678 struct dst_crypto_engine *e = crypto_engine; 679 struct bio *bio = schedule_data; 680 struct dst_export_priv *p = bio->bi_private; 681 int err; 682 683 dprintk("%s: e: %p, data: %p, bio: %llu/%u, dir: %lu.\n", __func__, 684 e, e->data, (u64)bio->bi_sector, bio->bi_size, bio_data_dir(bio)); 685 686 e->enc = (bio_data_dir(bio) == READ); 687 e->iv = p->cmd.id; 688 689 if (bio_data_dir(bio) == WRITE) { 690 u8 *hash = e->data + e->size/2; 691 692 err = dst_crypto_process_receiving(e, bio, hash, p->cmd.hash); 693 if (err) 694 goto err_out_exit; 695 696 generic_make_request(bio); 697 } else { 698 err = dst_crypto_process_sending(e, bio, p->cmd.hash); 699 if (err) 700 goto err_out_exit; 701 702 if (e->hash) { 703 p->cmd.csize = crypto_hash_digestsize(e->hash); 704 p->cmd.size += p->cmd.csize; 705 } 706 707 err = dst_export_send_bio(bio); 708 } 709 return 0; 710 711err_out_exit: 712 bio_put(bio); 713 return err; 714} 715 716int dst_export_crypto(struct dst_node *n, struct bio *bio) 717{ 718 int err; 719 720 err = thread_pool_schedule(n->pool, 721 dst_export_crypto_setup, dst_export_crypto_action, 722 bio, MAX_SCHEDULE_TIMEOUT); 723 if (err) 724 goto err_out_exit; 725 726 return 0; 727 728err_out_exit: 729 bio_put(bio); 730 return err; 731}