Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

+1020 -745
+18 -20
arch/i386/crypto/aes-i586-asm.S
··· 255 255 xor 8(%ebp),%r4 256 256 xor 12(%ebp),%r5 257 257 258 - sub $8,%esp // space for register saves on stack 259 - add $16,%ebp // increment to next round key 260 - sub $10,%r3 261 - je 4f // 10 rounds for 128-bit key 262 - add $32,%ebp 263 - sub $2,%r3 264 - je 3f // 12 rounds for 128-bit key 265 - add $32,%ebp 258 + sub $8,%esp // space for register saves on stack 259 + add $16,%ebp // increment to next round key 260 + cmp $12,%r3 261 + jb 4f // 10 rounds for 128-bit key 262 + lea 32(%ebp),%ebp 263 + je 3f // 12 rounds for 192-bit key 264 + lea 32(%ebp),%ebp 266 265 267 - 2: fwd_rnd1( -64(%ebp) ,ft_tab) // 14 rounds for 128-bit key 266 + 2: fwd_rnd1( -64(%ebp) ,ft_tab) // 14 rounds for 256-bit key 268 267 fwd_rnd2( -48(%ebp) ,ft_tab) 269 - 3: fwd_rnd1( -32(%ebp) ,ft_tab) // 12 rounds for 128-bit key 268 + 3: fwd_rnd1( -32(%ebp) ,ft_tab) // 12 rounds for 192-bit key 270 269 fwd_rnd2( -16(%ebp) ,ft_tab) 271 270 4: fwd_rnd1( (%ebp) ,ft_tab) // 10 rounds for 128-bit key 272 271 fwd_rnd2( +16(%ebp) ,ft_tab) ··· 333 334 xor 8(%ebp),%r4 334 335 xor 12(%ebp),%r5 335 336 336 - sub $8,%esp // space for register saves on stack 337 - sub $16,%ebp // increment to next round key 338 - sub $10,%r3 339 - je 4f // 10 rounds for 128-bit key 340 - sub $32,%ebp 341 - sub $2,%r3 342 - je 3f // 12 rounds for 128-bit key 343 - sub $32,%ebp 337 + sub $8,%esp // space for register saves on stack 338 + sub $16,%ebp // increment to next round key 339 + cmp $12,%r3 340 + jb 4f // 10 rounds for 128-bit key 341 + lea -32(%ebp),%ebp 342 + je 3f // 12 rounds for 192-bit key 343 + lea -32(%ebp),%ebp 344 344 345 - 2: inv_rnd1( +64(%ebp), it_tab) // 14 rounds for 128-bit key 345 + 2: inv_rnd1( +64(%ebp), it_tab) // 14 rounds for 256-bit key 346 346 inv_rnd2( +48(%ebp), it_tab) 347 - 3: inv_rnd1( +32(%ebp), it_tab) // 12 rounds for 128-bit key 347 + 3: inv_rnd1( +32(%ebp), it_tab) // 12 rounds for 192-bit key 348 348 inv_rnd2( +16(%ebp), it_tab) 349 349 4: inv_rnd1( (%ebp), it_tab) // 10 rounds for 128-bit key 350 350 inv_rnd2( -16(%ebp), it_tab)
+25 -31
arch/i386/crypto/aes.c
··· 36 36 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 37 37 * 38 38 */ 39 + 40 + #include <asm/byteorder.h> 39 41 #include <linux/kernel.h> 40 42 #include <linux/module.h> 41 43 #include <linux/init.h> ··· 61 59 }; 62 60 63 61 #define WPOLY 0x011b 64 - #define u32_in(x) le32_to_cpup((const __le32 *)(x)) 65 62 #define bytes2word(b0, b1, b2, b3) \ 66 63 (((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0)) 67 64 ··· 94 93 95 94 u32 ft_tab[4][256]; 96 95 u32 fl_tab[4][256]; 97 - static u32 ls_tab[4][256]; 98 96 static u32 im_tab[4][256]; 99 97 u32 il_tab[4][256]; 100 98 u32 it_tab[4][256]; ··· 143 143 fl_tab[1][i] = upr(w, 1); 144 144 fl_tab[2][i] = upr(w, 2); 145 145 fl_tab[3][i] = upr(w, 3); 146 - 147 - /* 148 - * table for key schedule if fl_tab above is 149 - * not of the required form 150 - */ 151 - ls_tab[0][i] = w; 152 - ls_tab[1][i] = upr(w, 1); 153 - ls_tab[2][i] = upr(w, 2); 154 - ls_tab[3][i] = upr(w, 3); 155 146 156 147 b = fi(inv_affine((u8)i)); 157 148 w = bytes2word(fe(b), f9(b), fd(b), fb(b)); ··· 384 393 int i; 385 394 u32 ss[8]; 386 395 struct aes_ctx *ctx = ctx_arg; 396 + const __le32 *key = (const __le32 *)in_key; 387 397 388 398 /* encryption schedule */ 389 399 390 - ctx->ekey[0] = ss[0] = u32_in(in_key); 391 - ctx->ekey[1] = ss[1] = u32_in(in_key + 4); 392 - ctx->ekey[2] = ss[2] = u32_in(in_key + 8); 393 - ctx->ekey[3] = ss[3] = u32_in(in_key + 12); 400 + ctx->ekey[0] = ss[0] = le32_to_cpu(key[0]); 401 + ctx->ekey[1] = ss[1] = le32_to_cpu(key[1]); 402 + ctx->ekey[2] = ss[2] = le32_to_cpu(key[2]); 403 + ctx->ekey[3] = ss[3] = le32_to_cpu(key[3]); 394 404 395 405 switch(key_len) { 396 406 case 16: ··· 402 410 break; 403 411 404 412 case 24: 405 - ctx->ekey[4] = ss[4] = u32_in(in_key + 16); 406 - ctx->ekey[5] = ss[5] = u32_in(in_key + 20); 413 + ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]); 414 + ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]); 407 415 for (i = 0; i < 7; i++) 408 416 ke6(ctx->ekey, i); 409 417 kel6(ctx->ekey, 7); ··· 411 419 break; 412 420 413 421 case 32: 414 - ctx->ekey[4] = ss[4] = u32_in(in_key + 16); 415 - ctx->ekey[5] = ss[5] = u32_in(in_key + 20); 416 - ctx->ekey[6] = ss[6] = u32_in(in_key + 24); 417 - ctx->ekey[7] = ss[7] = u32_in(in_key + 28); 422 + ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]); 423 + ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]); 424 + ctx->ekey[6] = ss[6] = le32_to_cpu(key[6]); 425 + ctx->ekey[7] = ss[7] = le32_to_cpu(key[7]); 418 426 for (i = 0; i < 6; i++) 419 427 ke8(ctx->ekey, i); 420 428 kel8(ctx->ekey, 6); ··· 428 436 429 437 /* decryption schedule */ 430 438 431 - ctx->dkey[0] = ss[0] = u32_in(in_key); 432 - ctx->dkey[1] = ss[1] = u32_in(in_key + 4); 433 - ctx->dkey[2] = ss[2] = u32_in(in_key + 8); 434 - ctx->dkey[3] = ss[3] = u32_in(in_key + 12); 439 + ctx->dkey[0] = ss[0] = le32_to_cpu(key[0]); 440 + ctx->dkey[1] = ss[1] = le32_to_cpu(key[1]); 441 + ctx->dkey[2] = ss[2] = le32_to_cpu(key[2]); 442 + ctx->dkey[3] = ss[3] = le32_to_cpu(key[3]); 435 443 436 444 switch (key_len) { 437 445 case 16: ··· 442 450 break; 443 451 444 452 case 24: 445 - ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16)); 446 - ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20)); 453 + ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4])); 454 + ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5])); 447 455 kdf6(ctx->dkey, 0); 448 456 for (i = 1; i < 7; i++) 449 457 kd6(ctx->dkey, i); ··· 451 459 break; 452 460 453 461 case 32: 454 - ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16)); 455 - ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20)); 456 - ctx->dkey[6] = ff(ss[6] = u32_in(in_key + 24)); 457 - ctx->dkey[7] = ff(ss[7] = u32_in(in_key + 28)); 462 + ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4])); 463 + ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5])); 464 + ctx->dkey[6] = ff(ss[6] = le32_to_cpu(key[6])); 465 + ctx->dkey[7] = ff(ss[7] = le32_to_cpu(key[7])); 458 466 kdf8(ctx->dkey, 0); 459 467 for (i = 1; i < 6; i++) 460 468 kd8(ctx->dkey, i); ··· 476 484 477 485 static struct crypto_alg aes_alg = { 478 486 .cra_name = "aes", 487 + .cra_driver_name = "aes-i586", 488 + .cra_priority = 200, 479 489 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 480 490 .cra_blocksize = AES_BLOCK_SIZE, 481 491 .cra_ctxsize = sizeof(struct aes_ctx),
+13 -12
arch/x86_64/crypto/aes.c
··· 74 74 return x >> (n << 3); 75 75 } 76 76 77 - #define u32_in(x) le32_to_cpu(*(const __le32 *)(x)) 78 - 79 77 struct aes_ctx 80 78 { 81 79 u32 key_length; ··· 232 234 u32 *flags) 233 235 { 234 236 struct aes_ctx *ctx = ctx_arg; 237 + const __le32 *key = (const __le32 *)in_key; 235 238 u32 i, j, t, u, v, w; 236 239 237 240 if (key_len != 16 && key_len != 24 && key_len != 32) { ··· 242 243 243 244 ctx->key_length = key_len; 244 245 245 - D_KEY[key_len + 24] = E_KEY[0] = u32_in(in_key); 246 - D_KEY[key_len + 25] = E_KEY[1] = u32_in(in_key + 4); 247 - D_KEY[key_len + 26] = E_KEY[2] = u32_in(in_key + 8); 248 - D_KEY[key_len + 27] = E_KEY[3] = u32_in(in_key + 12); 246 + D_KEY[key_len + 24] = E_KEY[0] = le32_to_cpu(key[0]); 247 + D_KEY[key_len + 25] = E_KEY[1] = le32_to_cpu(key[1]); 248 + D_KEY[key_len + 26] = E_KEY[2] = le32_to_cpu(key[2]); 249 + D_KEY[key_len + 27] = E_KEY[3] = le32_to_cpu(key[3]); 249 250 250 251 switch (key_len) { 251 252 case 16: ··· 255 256 break; 256 257 257 258 case 24: 258 - E_KEY[4] = u32_in(in_key + 16); 259 - t = E_KEY[5] = u32_in(in_key + 20); 259 + E_KEY[4] = le32_to_cpu(key[4]); 260 + t = E_KEY[5] = le32_to_cpu(key[5]); 260 261 for (i = 0; i < 8; ++i) 261 262 loop6 (i); 262 263 break; 263 264 264 265 case 32: 265 - E_KEY[4] = u32_in(in_key + 16); 266 - E_KEY[5] = u32_in(in_key + 20); 267 - E_KEY[6] = u32_in(in_key + 24); 268 - t = E_KEY[7] = u32_in(in_key + 28); 266 + E_KEY[4] = le32_to_cpu(key[4]); 267 + E_KEY[5] = le32_to_cpu(key[5]); 268 + E_KEY[6] = le32_to_cpu(key[6]); 269 + t = E_KEY[7] = le32_to_cpu(key[7]); 269 270 for (i = 0; i < 7; ++i) 270 271 loop8(i); 271 272 break; ··· 289 290 290 291 static struct crypto_alg aes_alg = { 291 292 .cra_name = "aes", 293 + .cra_driver_name = "aes-x86_64", 294 + .cra_priority = 200, 292 295 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 293 296 .cra_blocksize = AES_BLOCK_SIZE, 294 297 .cra_ctxsize = sizeof(struct aes_ctx),
+1 -1
crypto/Kconfig
··· 157 157 158 158 config CRYPTO_AES 159 159 tristate "AES cipher algorithms" 160 - depends on CRYPTO && !(X86 || UML_X86) 160 + depends on CRYPTO 161 161 help 162 162 AES cipher algorithms (FIPS-197). AES uses the Rijndael 163 163 algorithm.
+34 -29
crypto/aes.c
··· 73 73 return x >> (n << 3); 74 74 } 75 75 76 - #define u32_in(x) le32_to_cpu(*(const u32 *)(x)) 77 - #define u32_out(to, from) (*(u32 *)(to) = cpu_to_le32(from)) 78 - 79 76 struct aes_ctx { 80 77 int key_length; 81 78 u32 E[60]; ··· 253 256 aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) 254 257 { 255 258 struct aes_ctx *ctx = ctx_arg; 259 + const __le32 *key = (const __le32 *)in_key; 256 260 u32 i, t, u, v, w; 257 261 258 262 if (key_len != 16 && key_len != 24 && key_len != 32) { ··· 263 265 264 266 ctx->key_length = key_len; 265 267 266 - E_KEY[0] = u32_in (in_key); 267 - E_KEY[1] = u32_in (in_key + 4); 268 - E_KEY[2] = u32_in (in_key + 8); 269 - E_KEY[3] = u32_in (in_key + 12); 268 + E_KEY[0] = le32_to_cpu(key[0]); 269 + E_KEY[1] = le32_to_cpu(key[1]); 270 + E_KEY[2] = le32_to_cpu(key[2]); 271 + E_KEY[3] = le32_to_cpu(key[3]); 270 272 271 273 switch (key_len) { 272 274 case 16: ··· 276 278 break; 277 279 278 280 case 24: 279 - E_KEY[4] = u32_in (in_key + 16); 280 - t = E_KEY[5] = u32_in (in_key + 20); 281 + E_KEY[4] = le32_to_cpu(key[4]); 282 + t = E_KEY[5] = le32_to_cpu(key[5]); 281 283 for (i = 0; i < 8; ++i) 282 284 loop6 (i); 283 285 break; 284 286 285 287 case 32: 286 - E_KEY[4] = u32_in (in_key + 16); 287 - E_KEY[5] = u32_in (in_key + 20); 288 - E_KEY[6] = u32_in (in_key + 24); 289 - t = E_KEY[7] = u32_in (in_key + 28); 288 + E_KEY[4] = le32_to_cpu(key[4]); 289 + E_KEY[5] = le32_to_cpu(key[5]); 290 + E_KEY[6] = le32_to_cpu(key[6]); 291 + t = E_KEY[7] = le32_to_cpu(key[7]); 290 292 for (i = 0; i < 7; ++i) 291 293 loop8 (i); 292 294 break; ··· 322 324 static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in) 323 325 { 324 326 const struct aes_ctx *ctx = ctx_arg; 327 + const __le32 *src = (const __le32 *)in; 328 + __le32 *dst = (__le32 *)out; 325 329 u32 b0[4], b1[4]; 326 330 const u32 *kp = E_KEY + 4; 327 331 328 - b0[0] = u32_in (in) ^ E_KEY[0]; 329 - b0[1] = u32_in (in + 4) ^ E_KEY[1]; 330 - b0[2] = u32_in (in + 8) ^ E_KEY[2]; 331 - b0[3] = u32_in (in + 12) ^ E_KEY[3]; 332 + b0[0] = le32_to_cpu(src[0]) ^ E_KEY[0]; 333 + b0[1] = le32_to_cpu(src[1]) ^ E_KEY[1]; 334 + b0[2] = le32_to_cpu(src[2]) ^ E_KEY[2]; 335 + b0[3] = le32_to_cpu(src[3]) ^ E_KEY[3]; 332 336 333 337 if (ctx->key_length > 24) { 334 338 f_nround (b1, b0, kp); ··· 353 353 f_nround (b1, b0, kp); 354 354 f_lround (b0, b1, kp); 355 355 356 - u32_out (out, b0[0]); 357 - u32_out (out + 4, b0[1]); 358 - u32_out (out + 8, b0[2]); 359 - u32_out (out + 12, b0[3]); 356 + dst[0] = cpu_to_le32(b0[0]); 357 + dst[1] = cpu_to_le32(b0[1]); 358 + dst[2] = cpu_to_le32(b0[2]); 359 + dst[3] = cpu_to_le32(b0[3]); 360 360 } 361 361 362 362 /* decrypt a block of text */ ··· 377 377 static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in) 378 378 { 379 379 const struct aes_ctx *ctx = ctx_arg; 380 + const __le32 *src = (const __le32 *)in; 381 + __le32 *dst = (__le32 *)out; 380 382 u32 b0[4], b1[4]; 381 383 const int key_len = ctx->key_length; 382 384 const u32 *kp = D_KEY + key_len + 20; 383 385 384 - b0[0] = u32_in (in) ^ E_KEY[key_len + 24]; 385 - b0[1] = u32_in (in + 4) ^ E_KEY[key_len + 25]; 386 - b0[2] = u32_in (in + 8) ^ E_KEY[key_len + 26]; 387 - b0[3] = u32_in (in + 12) ^ E_KEY[key_len + 27]; 386 + b0[0] = le32_to_cpu(src[0]) ^ E_KEY[key_len + 24]; 387 + b0[1] = le32_to_cpu(src[1]) ^ E_KEY[key_len + 25]; 388 + b0[2] = le32_to_cpu(src[2]) ^ E_KEY[key_len + 26]; 389 + b0[3] = le32_to_cpu(src[3]) ^ E_KEY[key_len + 27]; 388 390 389 391 if (key_len > 24) { 390 392 i_nround (b1, b0, kp); ··· 409 407 i_nround (b1, b0, kp); 410 408 i_lround (b0, b1, kp); 411 409 412 - u32_out (out, b0[0]); 413 - u32_out (out + 4, b0[1]); 414 - u32_out (out + 8, b0[2]); 415 - u32_out (out + 12, b0[3]); 410 + dst[0] = cpu_to_le32(b0[0]); 411 + dst[1] = cpu_to_le32(b0[1]); 412 + dst[2] = cpu_to_le32(b0[2]); 413 + dst[3] = cpu_to_le32(b0[3]); 416 414 } 417 415 418 416 419 417 static struct crypto_alg aes_alg = { 420 418 .cra_name = "aes", 419 + .cra_driver_name = "aes-generic", 420 + .cra_priority = 100, 421 421 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 422 422 .cra_blocksize = AES_BLOCK_SIZE, 423 423 .cra_ctxsize = sizeof(struct aes_ctx), 424 + .cra_alignmask = 3, 424 425 .cra_module = THIS_MODULE, 425 426 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), 426 427 .cra_u = {
+14 -25
crypto/anubis.c
··· 32 32 #include <linux/init.h> 33 33 #include <linux/module.h> 34 34 #include <linux/mm.h> 35 + #include <asm/byteorder.h> 35 36 #include <asm/scatterlist.h> 36 37 #include <linux/crypto.h> 38 + #include <linux/types.h> 37 39 38 40 #define ANUBIS_MIN_KEY_SIZE 16 39 41 #define ANUBIS_MAX_KEY_SIZE 40 ··· 463 461 static int anubis_setkey(void *ctx_arg, const u8 *in_key, 464 462 unsigned int key_len, u32 *flags) 465 463 { 466 - 467 - int N, R, i, pos, r; 464 + const __be32 *key = (const __be32 *)in_key; 465 + int N, R, i, r; 468 466 u32 kappa[ANUBIS_MAX_N]; 469 467 u32 inter[ANUBIS_MAX_N]; 470 468 ··· 485 483 ctx->R = R = 8 + N; 486 484 487 485 /* * map cipher key to initial key state (mu): */ 488 - for (i = 0, pos = 0; i < N; i++, pos += 4) { 489 - kappa[i] = 490 - (in_key[pos ] << 24) ^ 491 - (in_key[pos + 1] << 16) ^ 492 - (in_key[pos + 2] << 8) ^ 493 - (in_key[pos + 3] ); 494 - } 486 + for (i = 0; i < N; i++) 487 + kappa[i] = be32_to_cpu(key[i]); 495 488 496 489 /* 497 490 * generate R + 1 round keys: ··· 575 578 static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], 576 579 u8 *ciphertext, const u8 *plaintext, const int R) 577 580 { 578 - int i, pos, r; 581 + const __be32 *src = (const __be32 *)plaintext; 582 + __be32 *dst = (__be32 *)ciphertext; 583 + int i, r; 579 584 u32 state[4]; 580 585 u32 inter[4]; 581 586 ··· 585 586 * map plaintext block to cipher state (mu) 586 587 * and add initial round key (sigma[K^0]): 587 588 */ 588 - for (i = 0, pos = 0; i < 4; i++, pos += 4) { 589 - state[i] = 590 - (plaintext[pos ] << 24) ^ 591 - (plaintext[pos + 1] << 16) ^ 592 - (plaintext[pos + 2] << 8) ^ 593 - (plaintext[pos + 3] ) ^ 594 - roundKey[0][i]; 595 - } 589 + for (i = 0; i < 4; i++) 590 + state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i]; 596 591 597 592 /* 598 593 * R - 1 full rounds: ··· 656 663 * map cipher state to ciphertext block (mu^{-1}): 657 664 */ 658 665 659 - for (i = 0, pos = 0; i < 4; i++, pos += 4) { 660 - u32 w = inter[i]; 661 - ciphertext[pos ] = (u8)(w >> 24); 662 - ciphertext[pos + 1] = (u8)(w >> 16); 663 - ciphertext[pos + 2] = (u8)(w >> 8); 664 - ciphertext[pos + 3] = (u8)(w ); 665 - } 666 + for (i = 0; i < 4; i++) 667 + dst[i] = cpu_to_be32(inter[i]); 666 668 } 667 669 668 670 static void anubis_encrypt(void *ctx_arg, u8 *dst, const u8 *src) ··· 677 689 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 678 690 .cra_blocksize = ANUBIS_BLOCK_SIZE, 679 691 .cra_ctxsize = sizeof (struct anubis_ctx), 692 + .cra_alignmask = 3, 680 693 .cra_module = THIS_MODULE, 681 694 .cra_list = LIST_HEAD_INIT(anubis_alg.cra_list), 682 695 .cra_u = { .cipher = {
+47 -7
crypto/api.c
··· 3 3 * 4 4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 5 5 * Copyright (c) 2002 David S. Miller (davem@redhat.com) 6 + * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> 6 7 * 7 8 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> 8 9 * and Nettle, by Niels M�ller. ··· 19 18 #include <linux/init.h> 20 19 #include <linux/crypto.h> 21 20 #include <linux/errno.h> 21 + #include <linux/kernel.h> 22 22 #include <linux/kmod.h> 23 23 #include <linux/rwsem.h> 24 24 #include <linux/slab.h> 25 + #include <linux/string.h> 25 26 #include "internal.h" 26 27 27 28 LIST_HEAD(crypto_alg_list); ··· 42 39 static struct crypto_alg *crypto_alg_lookup(const char *name) 43 40 { 44 41 struct crypto_alg *q, *alg = NULL; 42 + int best = -1; 45 43 46 44 if (!name) 47 45 return NULL; ··· 50 46 down_read(&crypto_alg_sem); 51 47 52 48 list_for_each_entry(q, &crypto_alg_list, cra_list) { 53 - if (!(strcmp(q->cra_name, name))) { 54 - if (crypto_alg_get(q)) 55 - alg = q; 49 + int exact, fuzzy; 50 + 51 + exact = !strcmp(q->cra_driver_name, name); 52 + fuzzy = !strcmp(q->cra_name, name); 53 + if (!exact && !(fuzzy && q->cra_priority > best)) 54 + continue; 55 + 56 + if (unlikely(!crypto_alg_get(q))) 57 + continue; 58 + 59 + best = q->cra_priority; 60 + if (alg) 61 + crypto_alg_put(alg); 62 + alg = q; 63 + 64 + if (exact) 56 65 break; 57 - } 58 66 } 59 67 60 68 up_read(&crypto_alg_sem); ··· 223 207 kfree(tfm); 224 208 } 225 209 210 + static inline int crypto_set_driver_name(struct crypto_alg *alg) 211 + { 212 + static const char suffix[] = "-generic"; 213 + char *driver_name = (char *)alg->cra_driver_name; 214 + int len; 215 + 216 + if (*driver_name) 217 + return 0; 218 + 219 + len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 220 + if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME) 221 + return -ENAMETOOLONG; 222 + 223 + memcpy(driver_name + len, suffix, sizeof(suffix)); 224 + return 0; 225 + } 226 + 226 227 int crypto_register_alg(struct crypto_alg *alg) 227 228 { 228 - int ret = 0; 229 + int ret; 229 230 struct crypto_alg *q; 230 231 231 232 if (alg->cra_alignmask & (alg->cra_alignmask + 1)) ··· 251 218 if (alg->cra_alignmask & alg->cra_blocksize) 252 219 return -EINVAL; 253 220 254 - if (alg->cra_blocksize > PAGE_SIZE) 221 + if (alg->cra_blocksize > PAGE_SIZE / 8) 222 + return -EINVAL; 223 + 224 + if (alg->cra_priority < 0) 255 225 return -EINVAL; 256 226 227 + ret = crypto_set_driver_name(alg); 228 + if (unlikely(ret)) 229 + return ret; 230 + 257 231 down_write(&crypto_alg_sem); 258 232 259 233 list_for_each_entry(q, &crypto_alg_list, cra_list) { 260 - if (!(strcmp(q->cra_name, alg->cra_name))) { 234 + if (!strcmp(q->cra_driver_name, alg->cra_driver_name)) { 261 235 ret = -EEXIST; 262 236 goto out; 263 237 }
+3
crypto/blowfish.c
··· 19 19 #include <linux/init.h> 20 20 #include <linux/module.h> 21 21 #include <linux/mm.h> 22 + #include <asm/byteorder.h> 22 23 #include <asm/scatterlist.h> 23 24 #include <linux/crypto.h> 25 + #include <linux/types.h> 24 26 25 27 #define BF_BLOCK_SIZE 8 26 28 #define BF_MIN_KEY_SIZE 4 ··· 453 451 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 454 452 .cra_blocksize = BF_BLOCK_SIZE, 455 453 .cra_ctxsize = sizeof(struct bf_ctx), 454 + .cra_alignmask = 3, 456 455 .cra_module = THIS_MODULE, 457 456 .cra_list = LIST_HEAD_INIT(alg.cra_list), 458 457 .cra_u = { .cipher = {
+20 -27
crypto/cast5.c
··· 21 21 */ 22 22 23 23 24 + #include <asm/byteorder.h> 24 25 #include <linux/init.h> 25 26 #include <linux/crypto.h> 26 27 #include <linux/module.h> 27 28 #include <linux/errno.h> 28 29 #include <linux/string.h> 30 + #include <linux/types.h> 29 31 30 32 #define CAST5_BLOCK_SIZE 8 31 33 #define CAST5_MIN_KEY_SIZE 5 ··· 580 578 static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf) 581 579 { 582 580 struct cast5_ctx *c = (struct cast5_ctx *) ctx; 581 + const __be32 *src = (const __be32 *)inbuf; 582 + __be32 *dst = (__be32 *)outbuf; 583 583 u32 l, r, t; 584 584 u32 I; /* used by the Fx macros */ 585 585 u32 *Km; ··· 593 589 /* (L0,R0) <-- (m1...m64). (Split the plaintext into left and 594 590 * right 32-bit halves L0 = m1...m32 and R0 = m33...m64.) 595 591 */ 596 - l = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; 597 - r = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; 592 + l = be32_to_cpu(src[0]); 593 + r = be32_to_cpu(src[1]); 598 594 599 595 /* (16 rounds) for i from 1 to 16, compute Li and Ri as follows: 600 596 * Li = Ri-1; ··· 638 634 639 635 /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and 640 636 * concatenate to form the ciphertext.) */ 641 - outbuf[0] = (r >> 24) & 0xff; 642 - outbuf[1] = (r >> 16) & 0xff; 643 - outbuf[2] = (r >> 8) & 0xff; 644 - outbuf[3] = r & 0xff; 645 - outbuf[4] = (l >> 24) & 0xff; 646 - outbuf[5] = (l >> 16) & 0xff; 647 - outbuf[6] = (l >> 8) & 0xff; 648 - outbuf[7] = l & 0xff; 637 + dst[0] = cpu_to_be32(r); 638 + dst[1] = cpu_to_be32(l); 649 639 } 650 640 651 641 static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf) 652 642 { 653 643 struct cast5_ctx *c = (struct cast5_ctx *) ctx; 644 + const __be32 *src = (const __be32 *)inbuf; 645 + __be32 *dst = (__be32 *)outbuf; 654 646 u32 l, r, t; 655 647 u32 I; 656 648 u32 *Km; ··· 655 655 Km = c->Km; 656 656 Kr = c->Kr; 657 657 658 - l = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; 659 - r = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; 658 + l = be32_to_cpu(src[0]); 659 + r = be32_to_cpu(src[1]); 660 660 661 661 if (!(c->rr)) { 662 662 t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); ··· 690 690 t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); 691 691 } 692 692 693 - outbuf[0] = (r >> 24) & 0xff; 694 - outbuf[1] = (r >> 16) & 0xff; 695 - outbuf[2] = (r >> 8) & 0xff; 696 - outbuf[3] = r & 0xff; 697 - outbuf[4] = (l >> 24) & 0xff; 698 - outbuf[5] = (l >> 16) & 0xff; 699 - outbuf[6] = (l >> 8) & 0xff; 700 - outbuf[7] = l & 0xff; 693 + dst[0] = cpu_to_be32(r); 694 + dst[1] = cpu_to_be32(l); 701 695 } 702 696 703 697 static void key_schedule(u32 * x, u32 * z, u32 * k) ··· 776 782 u32 x[4]; 777 783 u32 z[4]; 778 784 u32 k[16]; 779 - u8 p_key[16]; 785 + __be32 p_key[4]; 780 786 struct cast5_ctx *c = (struct cast5_ctx *) ctx; 781 787 782 788 if (key_len < 5 || key_len > 16) { ··· 790 796 memcpy(p_key, key, key_len); 791 797 792 798 793 - x[0] = p_key[0] << 24 | p_key[1] << 16 | p_key[2] << 8 | p_key[3]; 794 - x[1] = p_key[4] << 24 | p_key[5] << 16 | p_key[6] << 8 | p_key[7]; 795 - x[2] = 796 - p_key[8] << 24 | p_key[9] << 16 | p_key[10] << 8 | p_key[11]; 797 - x[3] = 798 - p_key[12] << 24 | p_key[13] << 16 | p_key[14] << 8 | p_key[15]; 799 + x[0] = be32_to_cpu(p_key[0]); 800 + x[1] = be32_to_cpu(p_key[1]); 801 + x[2] = be32_to_cpu(p_key[2]); 802 + x[3] = be32_to_cpu(p_key[3]); 799 803 800 804 key_schedule(x, z, k); 801 805 for (i = 0; i < 16; i++) ··· 809 817 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 810 818 .cra_blocksize = CAST5_BLOCK_SIZE, 811 819 .cra_ctxsize = sizeof(struct cast5_ctx), 820 + .cra_alignmask = 3, 812 821 .cra_module = THIS_MODULE, 813 822 .cra_list = LIST_HEAD_INIT(alg.cra_list), 814 823 .cra_u = {
+33 -50
crypto/cast6.c
··· 18 18 */ 19 19 20 20 21 + #include <asm/byteorder.h> 21 22 #include <linux/init.h> 22 23 #include <linux/crypto.h> 23 24 #include <linux/module.h> 24 25 #include <linux/errno.h> 25 26 #include <linux/string.h> 27 + #include <linux/types.h> 26 28 27 29 #define CAST6_BLOCK_SIZE 16 28 30 #define CAST6_MIN_KEY_SIZE 16 ··· 386 384 { 387 385 int i; 388 386 u32 key[8]; 389 - u8 p_key[32]; /* padded key */ 387 + __be32 p_key[8]; /* padded key */ 390 388 struct cast6_ctx *c = (struct cast6_ctx *) ctx; 391 389 392 390 if (key_len < 16 || key_len > 32 || key_len % 4 != 0) { ··· 397 395 memset (p_key, 0, 32); 398 396 memcpy (p_key, in_key, key_len); 399 397 400 - key[0] = p_key[0] << 24 | p_key[1] << 16 | p_key[2] << 8 | p_key[3]; /* A */ 401 - key[1] = p_key[4] << 24 | p_key[5] << 16 | p_key[6] << 8 | p_key[7]; /* B */ 402 - key[2] = p_key[8] << 24 | p_key[9] << 16 | p_key[10] << 8 | p_key[11]; /* C */ 403 - key[3] = p_key[12] << 24 | p_key[13] << 16 | p_key[14] << 8 | p_key[15]; /* D */ 404 - key[4] = p_key[16] << 24 | p_key[17] << 16 | p_key[18] << 8 | p_key[19]; /* E */ 405 - key[5] = p_key[20] << 24 | p_key[21] << 16 | p_key[22] << 8 | p_key[23]; /* F */ 406 - key[6] = p_key[24] << 24 | p_key[25] << 16 | p_key[26] << 8 | p_key[27]; /* G */ 407 - key[7] = p_key[28] << 24 | p_key[29] << 16 | p_key[30] << 8 | p_key[31]; /* H */ 398 + key[0] = be32_to_cpu(p_key[0]); /* A */ 399 + key[1] = be32_to_cpu(p_key[1]); /* B */ 400 + key[2] = be32_to_cpu(p_key[2]); /* C */ 401 + key[3] = be32_to_cpu(p_key[3]); /* D */ 402 + key[4] = be32_to_cpu(p_key[4]); /* E */ 403 + key[5] = be32_to_cpu(p_key[5]); /* F */ 404 + key[6] = be32_to_cpu(p_key[6]); /* G */ 405 + key[7] = be32_to_cpu(p_key[7]); /* H */ 408 406 409 407 410 408 ··· 446 444 447 445 static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { 448 446 struct cast6_ctx * c = (struct cast6_ctx *)ctx; 447 + const __be32 *src = (const __be32 *)inbuf; 448 + __be32 *dst = (__be32 *)outbuf; 449 449 u32 block[4]; 450 450 u32 * Km; 451 451 u8 * Kr; 452 452 453 - block[0] = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; 454 - block[1] = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; 455 - block[2] = inbuf[8] << 24 | inbuf[9] << 16 | inbuf[10] << 8 | inbuf[11]; 456 - block[3] = inbuf[12] << 24 | inbuf[13] << 16 | inbuf[14] << 8 | inbuf[15]; 453 + block[0] = be32_to_cpu(src[0]); 454 + block[1] = be32_to_cpu(src[1]); 455 + block[2] = be32_to_cpu(src[2]); 456 + block[3] = be32_to_cpu(src[3]); 457 457 458 458 Km = c->Km[0]; Kr = c->Kr[0]; Q (block, Kr, Km); 459 459 Km = c->Km[1]; Kr = c->Kr[1]; Q (block, Kr, Km); ··· 469 465 Km = c->Km[9]; Kr = c->Kr[9]; QBAR (block, Kr, Km); 470 466 Km = c->Km[10]; Kr = c->Kr[10]; QBAR (block, Kr, Km); 471 467 Km = c->Km[11]; Kr = c->Kr[11]; QBAR (block, Kr, Km); 472 - 473 - outbuf[0] = (block[0] >> 24) & 0xff; 474 - outbuf[1] = (block[0] >> 16) & 0xff; 475 - outbuf[2] = (block[0] >> 8) & 0xff; 476 - outbuf[3] = block[0] & 0xff; 477 - outbuf[4] = (block[1] >> 24) & 0xff; 478 - outbuf[5] = (block[1] >> 16) & 0xff; 479 - outbuf[6] = (block[1] >> 8) & 0xff; 480 - outbuf[7] = block[1] & 0xff; 481 - outbuf[8] = (block[2] >> 24) & 0xff; 482 - outbuf[9] = (block[2] >> 16) & 0xff; 483 - outbuf[10] = (block[2] >> 8) & 0xff; 484 - outbuf[11] = block[2] & 0xff; 485 - outbuf[12] = (block[3] >> 24) & 0xff; 486 - outbuf[13] = (block[3] >> 16) & 0xff; 487 - outbuf[14] = (block[3] >> 8) & 0xff; 488 - outbuf[15] = block[3] & 0xff; 468 + 469 + dst[0] = cpu_to_be32(block[0]); 470 + dst[1] = cpu_to_be32(block[1]); 471 + dst[2] = cpu_to_be32(block[2]); 472 + dst[3] = cpu_to_be32(block[3]); 489 473 } 490 474 491 475 static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { 492 476 struct cast6_ctx * c = (struct cast6_ctx *)ctx; 477 + const __be32 *src = (const __be32 *)inbuf; 478 + __be32 *dst = (__be32 *)outbuf; 493 479 u32 block[4]; 494 480 u32 * Km; 495 481 u8 * Kr; 496 482 497 - block[0] = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; 498 - block[1] = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; 499 - block[2] = inbuf[8] << 24 | inbuf[9] << 16 | inbuf[10] << 8 | inbuf[11]; 500 - block[3] = inbuf[12] << 24 | inbuf[13] << 16 | inbuf[14] << 8 | inbuf[15]; 483 + block[0] = be32_to_cpu(src[0]); 484 + block[1] = be32_to_cpu(src[1]); 485 + block[2] = be32_to_cpu(src[2]); 486 + block[3] = be32_to_cpu(src[3]); 501 487 502 488 Km = c->Km[11]; Kr = c->Kr[11]; Q (block, Kr, Km); 503 489 Km = c->Km[10]; Kr = c->Kr[10]; Q (block, Kr, Km); ··· 502 508 Km = c->Km[1]; Kr = c->Kr[1]; QBAR (block, Kr, Km); 503 509 Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km); 504 510 505 - outbuf[0] = (block[0] >> 24) & 0xff; 506 - outbuf[1] = (block[0] >> 16) & 0xff; 507 - outbuf[2] = (block[0] >> 8) & 0xff; 508 - outbuf[3] = block[0] & 0xff; 509 - outbuf[4] = (block[1] >> 24) & 0xff; 510 - outbuf[5] = (block[1] >> 16) & 0xff; 511 - outbuf[6] = (block[1] >> 8) & 0xff; 512 - outbuf[7] = block[1] & 0xff; 513 - outbuf[8] = (block[2] >> 24) & 0xff; 514 - outbuf[9] = (block[2] >> 16) & 0xff; 515 - outbuf[10] = (block[2] >> 8) & 0xff; 516 - outbuf[11] = block[2] & 0xff; 517 - outbuf[12] = (block[3] >> 24) & 0xff; 518 - outbuf[13] = (block[3] >> 16) & 0xff; 519 - outbuf[14] = (block[3] >> 8) & 0xff; 520 - outbuf[15] = block[3] & 0xff; 511 + dst[0] = cpu_to_be32(block[0]); 512 + dst[1] = cpu_to_be32(block[1]); 513 + dst[2] = cpu_to_be32(block[2]); 514 + dst[3] = cpu_to_be32(block[3]); 521 515 } 522 516 523 517 static struct crypto_alg alg = { ··· 513 531 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 514 532 .cra_blocksize = CAST6_BLOCK_SIZE, 515 533 .cra_ctxsize = sizeof(struct cast6_ctx), 534 + .cra_alignmask = 3, 516 535 .cra_module = THIS_MODULE, 517 536 .cra_list = LIST_HEAD_INIT(alg.cra_list), 518 537 .cra_u = {
+3 -2
crypto/cipher.c
··· 212 212 struct crypto_tfm *tfm = desc->tfm; 213 213 void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block; 214 214 int bsize = crypto_tfm_alg_blocksize(tfm); 215 + unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm); 215 216 216 - u8 stack[src == dst ? bsize : 0]; 217 - u8 *buf = stack; 217 + u8 stack[src == dst ? bsize + alignmask : 0]; 218 + u8 *buf = (u8 *)ALIGN((unsigned long)stack, alignmask + 1); 218 219 u8 **dst_p = src == dst ? &buf : &dst; 219 220 220 221 void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
+1
crypto/crc32c.c
··· 16 16 #include <linux/string.h> 17 17 #include <linux/crypto.h> 18 18 #include <linux/crc32c.h> 19 + #include <linux/types.h> 19 20 #include <asm/byteorder.h> 20 21 21 22 #define CHKSUM_BLOCK_SIZE 32
+3
crypto/des.c
··· 12 12 * 13 13 */ 14 14 15 + #include <asm/byteorder.h> 15 16 #include <linux/bitops.h> 16 17 #include <linux/init.h> 17 18 #include <linux/module.h> 18 19 #include <linux/errno.h> 19 20 #include <linux/crypto.h> 21 + #include <linux/types.h> 20 22 21 23 #define DES_KEY_SIZE 8 22 24 #define DES_EXPKEY_WORDS 32 ··· 949 947 .cra_blocksize = DES_BLOCK_SIZE, 950 948 .cra_ctxsize = sizeof(struct des_ctx), 951 949 .cra_module = THIS_MODULE, 950 + .cra_alignmask = 3, 952 951 .cra_list = LIST_HEAD_INIT(des_alg.cra_list), 953 952 .cra_u = { .cipher = { 954 953 .cia_min_keysize = DES_KEY_SIZE,
+6
crypto/internal.h
··· 2 2 * Cryptographic API. 3 3 * 4 4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 5 + * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> 5 6 * 6 7 * This program is free software; you can redistribute it and/or modify it 7 8 * under the terms of the GNU General Public License as published by the Free ··· 17 16 #include <linux/highmem.h> 18 17 #include <linux/interrupt.h> 19 18 #include <linux/init.h> 19 + #include <linux/list.h> 20 20 #include <linux/kernel.h> 21 + #include <linux/rwsem.h> 21 22 #include <linux/slab.h> 22 23 #include <asm/kmap_types.h> 24 + 25 + extern struct list_head crypto_alg_list; 26 + extern struct rw_semaphore crypto_alg_sem; 23 27 24 28 extern enum km_type crypto_km_types[]; 25 29
+10 -36
crypto/khazad.c
··· 22 22 #include <linux/init.h> 23 23 #include <linux/module.h> 24 24 #include <linux/mm.h> 25 + #include <asm/byteorder.h> 25 26 #include <asm/scatterlist.h> 26 27 #include <linux/crypto.h> 28 + #include <linux/types.h> 27 29 28 30 #define KHAZAD_KEY_SIZE 16 29 31 #define KHAZAD_BLOCK_SIZE 8 ··· 757 755 static int khazad_setkey(void *ctx_arg, const u8 *in_key, 758 756 unsigned int key_len, u32 *flags) 759 757 { 760 - 761 758 struct khazad_ctx *ctx = ctx_arg; 759 + const __be64 *key = (const __be64 *)in_key; 762 760 int r; 763 761 const u64 *S = T7; 764 762 u64 K2, K1; ··· 769 767 return -EINVAL; 770 768 } 771 769 772 - K2 = ((u64)in_key[ 0] << 56) ^ 773 - ((u64)in_key[ 1] << 48) ^ 774 - ((u64)in_key[ 2] << 40) ^ 775 - ((u64)in_key[ 3] << 32) ^ 776 - ((u64)in_key[ 4] << 24) ^ 777 - ((u64)in_key[ 5] << 16) ^ 778 - ((u64)in_key[ 6] << 8) ^ 779 - ((u64)in_key[ 7] ); 780 - K1 = ((u64)in_key[ 8] << 56) ^ 781 - ((u64)in_key[ 9] << 48) ^ 782 - ((u64)in_key[10] << 40) ^ 783 - ((u64)in_key[11] << 32) ^ 784 - ((u64)in_key[12] << 24) ^ 785 - ((u64)in_key[13] << 16) ^ 786 - ((u64)in_key[14] << 8) ^ 787 - ((u64)in_key[15] ); 770 + K2 = be64_to_cpu(key[0]); 771 + K1 = be64_to_cpu(key[1]); 788 772 789 773 /* setup the encrypt key */ 790 774 for (r = 0; r <= KHAZAD_ROUNDS; r++) { ··· 808 820 static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], 809 821 u8 *ciphertext, const u8 *plaintext) 810 822 { 811 - 823 + const __be64 *src = (const __be64 *)plaintext; 824 + __be64 *dst = (__be64 *)ciphertext; 812 825 int r; 813 826 u64 state; 814 827 815 - state = ((u64)plaintext[0] << 56) ^ 816 - ((u64)plaintext[1] << 48) ^ 817 - ((u64)plaintext[2] << 40) ^ 818 - ((u64)plaintext[3] << 32) ^ 819 - ((u64)plaintext[4] << 24) ^ 820 - ((u64)plaintext[5] << 16) ^ 821 - ((u64)plaintext[6] << 8) ^ 822 - ((u64)plaintext[7] ) ^ 823 - roundKey[0]; 828 + state = be64_to_cpu(*src) ^ roundKey[0]; 824 829 825 830 for (r = 1; r < KHAZAD_ROUNDS; r++) { 826 831 state = T0[(int)(state >> 56) ] ^ ··· 837 856 (T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^ 838 857 roundKey[KHAZAD_ROUNDS]; 839 858 840 - ciphertext[0] = (u8)(state >> 56); 841 - ciphertext[1] = (u8)(state >> 48); 842 - ciphertext[2] = (u8)(state >> 40); 843 - ciphertext[3] = (u8)(state >> 32); 844 - ciphertext[4] = (u8)(state >> 24); 845 - ciphertext[5] = (u8)(state >> 16); 846 - ciphertext[6] = (u8)(state >> 8); 847 - ciphertext[7] = (u8)(state ); 848 - 859 + *dst = cpu_to_be64(state); 849 860 } 850 861 851 862 static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src) ··· 857 884 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 858 885 .cra_blocksize = KHAZAD_BLOCK_SIZE, 859 886 .cra_ctxsize = sizeof (struct khazad_ctx), 887 + .cra_alignmask = 7, 860 888 .cra_module = THIS_MODULE, 861 889 .cra_list = LIST_HEAD_INIT(khazad_alg.cra_list), 862 890 .cra_u = { .cipher = {
+1
crypto/md4.c
··· 24 24 #include <linux/crypto.h> 25 25 #include <linux/kernel.h> 26 26 #include <linux/string.h> 27 + #include <linux/types.h> 27 28 #include <asm/byteorder.h> 28 29 29 30 #define MD4_DIGEST_SIZE 16
+1
crypto/md5.c
··· 19 19 #include <linux/module.h> 20 20 #include <linux/string.h> 21 21 #include <linux/crypto.h> 22 + #include <linux/types.h> 22 23 #include <asm/byteorder.h> 23 24 24 25 #define MD5_DIGEST_SIZE 16
+17 -23
crypto/michael_mic.c
··· 10 10 * published by the Free Software Foundation. 11 11 */ 12 12 13 + #include <asm/byteorder.h> 13 14 #include <linux/init.h> 14 15 #include <linux/module.h> 15 16 #include <linux/string.h> 16 17 #include <linux/crypto.h> 18 + #include <linux/types.h> 17 19 18 20 19 21 struct michael_mic_ctx { ··· 45 43 } while (0) 46 44 47 45 48 - static inline u32 get_le32(const u8 *p) 49 - { 50 - return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); 51 - } 52 - 53 - 54 - static inline void put_le32(u8 *p, u32 v) 55 - { 56 - p[0] = v; 57 - p[1] = v >> 8; 58 - p[2] = v >> 16; 59 - p[3] = v >> 24; 60 - } 61 - 62 - 63 46 static void michael_init(void *ctx) 64 47 { 65 48 struct michael_mic_ctx *mctx = ctx; ··· 55 68 static void michael_update(void *ctx, const u8 *data, unsigned int len) 56 69 { 57 70 struct michael_mic_ctx *mctx = ctx; 71 + const __le32 *src; 58 72 59 73 if (mctx->pending_len) { 60 74 int flen = 4 - mctx->pending_len; ··· 69 81 if (mctx->pending_len < 4) 70 82 return; 71 83 72 - mctx->l ^= get_le32(mctx->pending); 84 + src = (const __le32 *)mctx->pending; 85 + mctx->l ^= le32_to_cpup(src); 73 86 michael_block(mctx->l, mctx->r); 74 87 mctx->pending_len = 0; 75 88 } 76 89 90 + src = (const __le32 *)data; 91 + 77 92 while (len >= 4) { 78 - mctx->l ^= get_le32(data); 93 + mctx->l ^= le32_to_cpup(src++); 79 94 michael_block(mctx->l, mctx->r); 80 - data += 4; 81 95 len -= 4; 82 96 } 83 97 84 98 if (len > 0) { 85 99 mctx->pending_len = len; 86 - memcpy(mctx->pending, data, len); 100 + memcpy(mctx->pending, src, len); 87 101 } 88 102 } 89 103 ··· 94 104 { 95 105 struct michael_mic_ctx *mctx = ctx; 96 106 u8 *data = mctx->pending; 107 + __le32 *dst = (__le32 *)out; 97 108 98 109 /* Last block and padding (0x5a, 4..7 x 0) */ 99 110 switch (mctx->pending_len) { ··· 116 125 /* l ^= 0; */ 117 126 michael_block(mctx->l, mctx->r); 118 127 119 - put_le32(out, mctx->l); 120 - put_le32(out + 4, mctx->r); 128 + dst[0] = cpu_to_le32(mctx->l); 129 + dst[1] = cpu_to_le32(mctx->r); 121 130 } 122 131 123 132 ··· 125 134 u32 *flags) 126 135 { 127 136 struct michael_mic_ctx *mctx = ctx; 137 + const __le32 *data = (const __le32 *)key; 138 + 128 139 if (keylen != 8) { 129 140 if (flags) 130 141 *flags = CRYPTO_TFM_RES_BAD_KEY_LEN; 131 142 return -EINVAL; 132 143 } 133 - mctx->l = get_le32(key); 134 - mctx->r = get_le32(key + 4); 144 + 145 + mctx->l = le32_to_cpu(data[0]); 146 + mctx->r = le32_to_cpu(data[1]); 135 147 return 0; 136 148 } 137 149
+3 -3
crypto/proc.c
··· 4 4 * Procfs information. 5 5 * 6 6 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 7 + * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> 7 8 * 8 9 * This program is free software; you can redistribute it and/or modify it 9 10 * under the terms of the GNU General Public License as published by the Free ··· 18 17 #include <linux/proc_fs.h> 19 18 #include <linux/seq_file.h> 20 19 #include "internal.h" 21 - 22 - extern struct list_head crypto_alg_list; 23 - extern struct rw_semaphore crypto_alg_sem; 24 20 25 21 static void *c_start(struct seq_file *m, loff_t *pos) 26 22 { ··· 51 53 struct crypto_alg *alg = (struct crypto_alg *)p; 52 54 53 55 seq_printf(m, "name : %s\n", alg->cra_name); 56 + seq_printf(m, "driver : %s\n", alg->cra_driver_name); 54 57 seq_printf(m, "module : %s\n", module_name(alg->cra_module)); 58 + seq_printf(m, "priority : %d\n", alg->cra_priority); 55 59 56 60 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { 57 61 case CRYPTO_ALG_TYPE_CIPHER:
+2
crypto/serpent.c
··· 20 20 #include <linux/errno.h> 21 21 #include <asm/byteorder.h> 22 22 #include <linux/crypto.h> 23 + #include <linux/types.h> 23 24 24 25 /* Key is padded to the maximum of 256 bits before round key generation. 25 26 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm. ··· 553 552 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 554 553 .cra_blocksize = SERPENT_BLOCK_SIZE, 555 554 .cra_ctxsize = sizeof(struct serpent_ctx), 555 + .cra_alignmask = 3, 556 556 .cra_module = THIS_MODULE, 557 557 .cra_list = LIST_HEAD_INIT(serpent_alg.cra_list), 558 558 .cra_u = { .cipher = {
+32 -34
crypto/sha1.c
··· 21 21 #include <linux/mm.h> 22 22 #include <linux/crypto.h> 23 23 #include <linux/cryptohash.h> 24 + #include <linux/types.h> 24 25 #include <asm/scatterlist.h> 25 26 #include <asm/byteorder.h> 26 27 ··· 49 48 static void sha1_update(void *ctx, const u8 *data, unsigned int len) 50 49 { 51 50 struct sha1_ctx *sctx = ctx; 52 - unsigned int i, j; 53 - u32 temp[SHA_WORKSPACE_WORDS]; 51 + unsigned int partial, done; 52 + const u8 *src; 54 53 55 - j = (sctx->count >> 3) & 0x3f; 56 - sctx->count += len << 3; 54 + partial = sctx->count & 0x3f; 55 + sctx->count += len; 56 + done = 0; 57 + src = data; 57 58 58 - if ((j + len) > 63) { 59 - memcpy(&sctx->buffer[j], data, (i = 64-j)); 60 - sha_transform(sctx->state, sctx->buffer, temp); 61 - for ( ; i + 63 < len; i += 64) { 62 - sha_transform(sctx->state, &data[i], temp); 59 + if ((partial + len) > 63) { 60 + u32 temp[SHA_WORKSPACE_WORDS]; 61 + 62 + if (partial) { 63 + done = -partial; 64 + memcpy(sctx->buffer + partial, data, done + 64); 65 + src = sctx->buffer; 63 66 } 64 - j = 0; 67 + 68 + do { 69 + sha_transform(sctx->state, src, temp); 70 + done += 64; 71 + src = data + done; 72 + } while (done + 63 < len); 73 + 74 + memset(temp, 0, sizeof(temp)); 75 + partial = 0; 65 76 } 66 - else i = 0; 67 - memset(temp, 0, sizeof(temp)); 68 - memcpy(&sctx->buffer[j], &data[i], len - i); 77 + memcpy(sctx->buffer + partial, src, len - done); 69 78 } 70 79 71 80 ··· 83 72 static void sha1_final(void* ctx, u8 *out) 84 73 { 85 74 struct sha1_ctx *sctx = ctx; 86 - u32 i, j, index, padlen; 87 - u64 t; 88 - u8 bits[8] = { 0, }; 75 + __be32 *dst = (__be32 *)out; 76 + u32 i, index, padlen; 77 + __be64 bits; 89 78 static const u8 padding[64] = { 0x80, }; 90 79 91 - t = sctx->count; 92 - bits[7] = 0xff & t; t>>=8; 93 - bits[6] = 0xff & t; t>>=8; 94 - bits[5] = 0xff & t; t>>=8; 95 - bits[4] = 0xff & t; t>>=8; 96 - bits[3] = 0xff & t; t>>=8; 97 - bits[2] = 0xff & t; t>>=8; 98 - bits[1] = 0xff & t; t>>=8; 99 - bits[0] = 0xff & t; 80 + bits = cpu_to_be64(sctx->count << 3); 100 81 101 82 /* Pad out to 56 mod 64 */ 102 - index = (sctx->count >> 3) & 0x3f; 83 + index = sctx->count & 0x3f; 103 84 padlen = (index < 56) ? (56 - index) : ((64+56) - index); 104 85 sha1_update(sctx, padding, padlen); 105 86 106 87 /* Append length */ 107 - sha1_update(sctx, bits, sizeof bits); 88 + sha1_update(sctx, (const u8 *)&bits, sizeof(bits)); 108 89 109 90 /* Store state in digest */ 110 - for (i = j = 0; i < 5; i++, j += 4) { 111 - u32 t2 = sctx->state[i]; 112 - out[j+3] = t2 & 0xff; t2>>=8; 113 - out[j+2] = t2 & 0xff; t2>>=8; 114 - out[j+1] = t2 & 0xff; t2>>=8; 115 - out[j ] = t2 & 0xff; 116 - } 91 + for (i = 0; i < 5; i++) 92 + dst[i] = cpu_to_be32(sctx->state[i]); 117 93 118 94 /* Wipe context */ 119 95 memset(sctx, 0, sizeof *sctx);
+10 -21
crypto/sha256.c
··· 20 20 #include <linux/module.h> 21 21 #include <linux/mm.h> 22 22 #include <linux/crypto.h> 23 + #include <linux/types.h> 23 24 #include <asm/scatterlist.h> 24 25 #include <asm/byteorder.h> 25 26 ··· 280 279 static void sha256_final(void* ctx, u8 *out) 281 280 { 282 281 struct sha256_ctx *sctx = ctx; 283 - u8 bits[8]; 284 - unsigned int index, pad_len, t; 285 - int i, j; 282 + __be32 *dst = (__be32 *)out; 283 + __be32 bits[2]; 284 + unsigned int index, pad_len; 285 + int i; 286 286 static const u8 padding[64] = { 0x80, }; 287 287 288 288 /* Save number of bits */ 289 - t = sctx->count[0]; 290 - bits[7] = t; t >>= 8; 291 - bits[6] = t; t >>= 8; 292 - bits[5] = t; t >>= 8; 293 - bits[4] = t; 294 - t = sctx->count[1]; 295 - bits[3] = t; t >>= 8; 296 - bits[2] = t; t >>= 8; 297 - bits[1] = t; t >>= 8; 298 - bits[0] = t; 289 + bits[1] = cpu_to_be32(sctx->count[0]); 290 + bits[0] = cpu_to_be32(sctx->count[1]); 299 291 300 292 /* Pad out to 56 mod 64. */ 301 293 index = (sctx->count[0] >> 3) & 0x3f; ··· 296 302 sha256_update(sctx, padding, pad_len); 297 303 298 304 /* Append length (before padding) */ 299 - sha256_update(sctx, bits, 8); 305 + sha256_update(sctx, (const u8 *)bits, sizeof(bits)); 300 306 301 307 /* Store state in digest */ 302 - for (i = j = 0; i < 8; i++, j += 4) { 303 - t = sctx->state[i]; 304 - out[j+3] = t; t >>= 8; 305 - out[j+2] = t; t >>= 8; 306 - out[j+1] = t; t >>= 8; 307 - out[j ] = t; 308 - } 308 + for (i = 0; i < 8; i++) 309 + dst[i] = cpu_to_be32(sctx->state[i]); 309 310 310 311 /* Zeroize sensitive information. */ 311 312 memset(sctx, 0, sizeof(*sctx));
+12 -42
crypto/sha512.c
··· 17 17 #include <linux/mm.h> 18 18 #include <linux/init.h> 19 19 #include <linux/crypto.h> 20 + #include <linux/types.h> 20 21 21 22 #include <asm/scatterlist.h> 22 23 #include <asm/byteorder.h> ··· 236 235 sha512_final(void *ctx, u8 *hash) 237 236 { 238 237 struct sha512_ctx *sctx = ctx; 239 - 240 238 static u8 padding[128] = { 0x80, }; 241 - 242 - u32 t; 243 - u64 t2; 244 - u8 bits[128]; 239 + __be64 *dst = (__be64 *)hash; 240 + __be32 bits[4]; 245 241 unsigned int index, pad_len; 246 - int i, j; 247 - 248 - index = pad_len = t = i = j = 0; 249 - t2 = 0; 242 + int i; 250 243 251 244 /* Save number of bits */ 252 - t = sctx->count[0]; 253 - bits[15] = t; t>>=8; 254 - bits[14] = t; t>>=8; 255 - bits[13] = t; t>>=8; 256 - bits[12] = t; 257 - t = sctx->count[1]; 258 - bits[11] = t; t>>=8; 259 - bits[10] = t; t>>=8; 260 - bits[9 ] = t; t>>=8; 261 - bits[8 ] = t; 262 - t = sctx->count[2]; 263 - bits[7 ] = t; t>>=8; 264 - bits[6 ] = t; t>>=8; 265 - bits[5 ] = t; t>>=8; 266 - bits[4 ] = t; 267 - t = sctx->count[3]; 268 - bits[3 ] = t; t>>=8; 269 - bits[2 ] = t; t>>=8; 270 - bits[1 ] = t; t>>=8; 271 - bits[0 ] = t; 245 + bits[3] = cpu_to_be32(sctx->count[0]); 246 + bits[2] = cpu_to_be32(sctx->count[1]); 247 + bits[1] = cpu_to_be32(sctx->count[2]); 248 + bits[0] = cpu_to_be32(sctx->count[3]); 272 249 273 250 /* Pad out to 112 mod 128. */ 274 251 index = (sctx->count[0] >> 3) & 0x7f; ··· 254 275 sha512_update(sctx, padding, pad_len); 255 276 256 277 /* Append length (before padding) */ 257 - sha512_update(sctx, bits, 16); 278 + sha512_update(sctx, (const u8 *)bits, sizeof(bits)); 258 279 259 280 /* Store state in digest */ 260 - for (i = j = 0; i < 8; i++, j += 8) { 261 - t2 = sctx->state[i]; 262 - hash[j+7] = (char)t2 & 0xff; t2>>=8; 263 - hash[j+6] = (char)t2 & 0xff; t2>>=8; 264 - hash[j+5] = (char)t2 & 0xff; t2>>=8; 265 - hash[j+4] = (char)t2 & 0xff; t2>>=8; 266 - hash[j+3] = (char)t2 & 0xff; t2>>=8; 267 - hash[j+2] = (char)t2 & 0xff; t2>>=8; 268 - hash[j+1] = (char)t2 & 0xff; t2>>=8; 269 - hash[j ] = (char)t2 & 0xff; 270 - } 271 - 281 + for (i = 0; i < 8; i++) 282 + dst[i] = cpu_to_be64(sctx->state[i]); 283 + 272 284 /* Zeroize sensitive information. */ 273 285 memset(sctx, 0, sizeof(struct sha512_ctx)); 274 286 }
+51 -47
crypto/tea.c
··· 22 22 #include <linux/init.h> 23 23 #include <linux/module.h> 24 24 #include <linux/mm.h> 25 + #include <asm/byteorder.h> 25 26 #include <asm/scatterlist.h> 26 27 #include <linux/crypto.h> 28 + #include <linux/types.h> 27 29 28 30 #define TEA_KEY_SIZE 16 29 31 #define TEA_BLOCK_SIZE 8 ··· 36 34 #define XTEA_BLOCK_SIZE 8 37 35 #define XTEA_ROUNDS 32 38 36 #define XTEA_DELTA 0x9e3779b9 39 - 40 - #define u32_in(x) le32_to_cpu(*(const __le32 *)(x)) 41 - #define u32_out(to, from) (*(__le32 *)(to) = cpu_to_le32(from)) 42 37 43 38 struct tea_ctx { 44 39 u32 KEY[4]; ··· 48 49 static int tea_setkey(void *ctx_arg, const u8 *in_key, 49 50 unsigned int key_len, u32 *flags) 50 51 { 51 - 52 52 struct tea_ctx *ctx = ctx_arg; 53 + const __le32 *key = (const __le32 *)in_key; 53 54 54 55 if (key_len != 16) 55 56 { ··· 57 58 return -EINVAL; 58 59 } 59 60 60 - ctx->KEY[0] = u32_in (in_key); 61 - ctx->KEY[1] = u32_in (in_key + 4); 62 - ctx->KEY[2] = u32_in (in_key + 8); 63 - ctx->KEY[3] = u32_in (in_key + 12); 61 + ctx->KEY[0] = le32_to_cpu(key[0]); 62 + ctx->KEY[1] = le32_to_cpu(key[1]); 63 + ctx->KEY[2] = le32_to_cpu(key[2]); 64 + ctx->KEY[3] = le32_to_cpu(key[3]); 64 65 65 66 return 0; 66 67 ··· 72 73 u32 k0, k1, k2, k3; 73 74 74 75 struct tea_ctx *ctx = ctx_arg; 76 + const __le32 *in = (const __le32 *)src; 77 + __le32 *out = (__le32 *)dst; 75 78 76 - y = u32_in (src); 77 - z = u32_in (src + 4); 79 + y = le32_to_cpu(in[0]); 80 + z = le32_to_cpu(in[1]); 78 81 79 82 k0 = ctx->KEY[0]; 80 83 k1 = ctx->KEY[1]; ··· 91 90 z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); 92 91 } 93 92 94 - u32_out (dst, y); 95 - u32_out (dst + 4, z); 93 + out[0] = cpu_to_le32(y); 94 + out[1] = cpu_to_le32(z); 96 95 } 97 96 98 97 static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) 99 98 { 100 99 u32 y, z, n, sum; 101 100 u32 k0, k1, k2, k3; 102 - 103 101 struct tea_ctx *ctx = ctx_arg; 102 + const __le32 *in = (const __le32 *)src; 103 + __le32 *out = (__le32 *)dst; 104 104 105 - y = u32_in (src); 106 - z = u32_in (src + 4); 105 + y = le32_to_cpu(in[0]); 106 + z = le32_to_cpu(in[1]); 107 107 108 108 k0 = ctx->KEY[0]; 109 109 k1 = ctx->KEY[1]; ··· 121 119 sum -= TEA_DELTA; 122 120 } 123 121 124 - u32_out (dst, y); 125 - u32_out (dst + 4, z); 126 - 122 + out[0] = cpu_to_le32(y); 123 + out[1] = cpu_to_le32(z); 127 124 } 128 125 129 126 static int xtea_setkey(void *ctx_arg, const u8 *in_key, 130 127 unsigned int key_len, u32 *flags) 131 128 { 132 - 133 129 struct xtea_ctx *ctx = ctx_arg; 130 + const __le32 *key = (const __le32 *)in_key; 134 131 135 132 if (key_len != 16) 136 133 { ··· 137 136 return -EINVAL; 138 137 } 139 138 140 - ctx->KEY[0] = u32_in (in_key); 141 - ctx->KEY[1] = u32_in (in_key + 4); 142 - ctx->KEY[2] = u32_in (in_key + 8); 143 - ctx->KEY[3] = u32_in (in_key + 12); 139 + ctx->KEY[0] = le32_to_cpu(key[0]); 140 + ctx->KEY[1] = le32_to_cpu(key[1]); 141 + ctx->KEY[2] = le32_to_cpu(key[2]); 142 + ctx->KEY[3] = le32_to_cpu(key[3]); 144 143 145 144 return 0; 146 145 ··· 148 147 149 148 static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) 150 149 { 151 - 152 150 u32 y, z, sum = 0; 153 151 u32 limit = XTEA_DELTA * XTEA_ROUNDS; 154 152 155 153 struct xtea_ctx *ctx = ctx_arg; 154 + const __le32 *in = (const __le32 *)src; 155 + __le32 *out = (__le32 *)dst; 156 156 157 - y = u32_in (src); 158 - z = u32_in (src + 4); 157 + y = le32_to_cpu(in[0]); 158 + z = le32_to_cpu(in[1]); 159 159 160 160 while (sum != limit) { 161 161 y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]); ··· 164 162 z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]); 165 163 } 166 164 167 - u32_out (dst, y); 168 - u32_out (dst + 4, z); 169 - 165 + out[0] = cpu_to_le32(y); 166 + out[1] = cpu_to_le32(z); 170 167 } 171 168 172 169 static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) 173 170 { 174 - 175 171 u32 y, z, sum; 176 172 struct tea_ctx *ctx = ctx_arg; 173 + const __le32 *in = (const __le32 *)src; 174 + __le32 *out = (__le32 *)dst; 177 175 178 - y = u32_in (src); 179 - z = u32_in (src + 4); 176 + y = le32_to_cpu(in[0]); 177 + z = le32_to_cpu(in[1]); 180 178 181 179 sum = XTEA_DELTA * XTEA_ROUNDS; 182 180 ··· 186 184 y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]); 187 185 } 188 186 189 - u32_out (dst, y); 190 - u32_out (dst + 4, z); 191 - 187 + out[0] = cpu_to_le32(y); 188 + out[1] = cpu_to_le32(z); 192 189 } 193 190 194 191 195 192 static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src) 196 193 { 197 - 198 194 u32 y, z, sum = 0; 199 195 u32 limit = XTEA_DELTA * XTEA_ROUNDS; 200 196 201 197 struct xtea_ctx *ctx = ctx_arg; 198 + const __le32 *in = (const __le32 *)src; 199 + __le32 *out = (__le32 *)dst; 202 200 203 - y = u32_in (src); 204 - z = u32_in (src + 4); 201 + y = le32_to_cpu(in[0]); 202 + z = le32_to_cpu(in[1]); 205 203 206 204 while (sum != limit) { 207 205 y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; ··· 209 207 z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; 210 208 } 211 209 212 - u32_out (dst, y); 213 - u32_out (dst + 4, z); 214 - 210 + out[0] = cpu_to_le32(y); 211 + out[1] = cpu_to_le32(z); 215 212 } 216 213 217 214 static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src) 218 215 { 219 - 220 216 u32 y, z, sum; 221 217 struct tea_ctx *ctx = ctx_arg; 218 + const __le32 *in = (const __le32 *)src; 219 + __le32 *out = (__le32 *)dst; 222 220 223 - y = u32_in (src); 224 - z = u32_in (src + 4); 221 + y = le32_to_cpu(in[0]); 222 + z = le32_to_cpu(in[1]); 225 223 226 224 sum = XTEA_DELTA * XTEA_ROUNDS; 227 225 ··· 231 229 y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; 232 230 } 233 231 234 - u32_out (dst, y); 235 - u32_out (dst + 4, z); 236 - 232 + out[0] = cpu_to_le32(y); 233 + out[1] = cpu_to_le32(z); 237 234 } 238 235 239 236 static struct crypto_alg tea_alg = { ··· 240 239 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 241 240 .cra_blocksize = TEA_BLOCK_SIZE, 242 241 .cra_ctxsize = sizeof (struct tea_ctx), 242 + .cra_alignmask = 3, 243 243 .cra_module = THIS_MODULE, 244 244 .cra_list = LIST_HEAD_INIT(tea_alg.cra_list), 245 245 .cra_u = { .cipher = { ··· 256 254 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 257 255 .cra_blocksize = XTEA_BLOCK_SIZE, 258 256 .cra_ctxsize = sizeof (struct xtea_ctx), 257 + .cra_alignmask = 3, 259 258 .cra_module = THIS_MODULE, 260 259 .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), 261 260 .cra_u = { .cipher = { ··· 272 269 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 273 270 .cra_blocksize = XTEA_BLOCK_SIZE, 274 271 .cra_ctxsize = sizeof (struct xtea_ctx), 272 + .cra_alignmask = 3, 275 273 .cra_module = THIS_MODULE, 276 274 .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), 277 275 .cra_u = { .cipher = {
+16 -46
crypto/tgr192.c
··· 24 24 #include <linux/init.h> 25 25 #include <linux/module.h> 26 26 #include <linux/mm.h> 27 + #include <asm/byteorder.h> 27 28 #include <asm/scatterlist.h> 28 29 #include <linux/crypto.h> 30 + #include <linux/types.h> 29 31 30 32 #define TGR192_DIGEST_SIZE 24 31 33 #define TGR160_DIGEST_SIZE 20 ··· 469 467 u64 a, b, c, aa, bb, cc; 470 468 u64 x[8]; 471 469 int i; 472 - const u8 *ptr = data; 470 + const __le64 *ptr = (const __le64 *)data; 473 471 474 - for (i = 0; i < 8; i++, ptr += 8) { 475 - x[i] = (((u64)ptr[7] ) << 56) ^ 476 - (((u64)ptr[6] & 0xffL) << 48) ^ 477 - (((u64)ptr[5] & 0xffL) << 40) ^ 478 - (((u64)ptr[4] & 0xffL) << 32) ^ 479 - (((u64)ptr[3] & 0xffL) << 24) ^ 480 - (((u64)ptr[2] & 0xffL) << 16) ^ 481 - (((u64)ptr[1] & 0xffL) << 8) ^ 482 - (((u64)ptr[0] & 0xffL) ); 483 - } 472 + for (i = 0; i < 8; i++) 473 + x[i] = le64_to_cpu(ptr[i]); 484 474 485 475 /* save */ 486 476 a = aa = tctx->a; ··· 552 558 static void tgr192_final(void *ctx, u8 * out) 553 559 { 554 560 struct tgr192_ctx *tctx = ctx; 561 + __be64 *dst = (__be64 *)out; 562 + __be64 *be64p; 563 + __le32 *le32p; 555 564 u32 t, msb, lsb; 556 - u8 *p; 557 - int i, j; 558 565 559 566 tgr192_update(tctx, NULL, 0); /* flush */ ; 560 567 ··· 589 594 memset(tctx->hash, 0, 56); /* fill next block with zeroes */ 590 595 } 591 596 /* append the 64 bit count */ 592 - tctx->hash[56] = lsb; 593 - tctx->hash[57] = lsb >> 8; 594 - tctx->hash[58] = lsb >> 16; 595 - tctx->hash[59] = lsb >> 24; 596 - tctx->hash[60] = msb; 597 - tctx->hash[61] = msb >> 8; 598 - tctx->hash[62] = msb >> 16; 599 - tctx->hash[63] = msb >> 24; 597 + le32p = (__le32 *)&tctx->hash[56]; 598 + le32p[0] = cpu_to_le32(lsb); 599 + le32p[1] = cpu_to_le32(msb); 600 + 600 601 tgr192_transform(tctx, tctx->hash); 601 602 602 - p = tctx->hash; 603 - *p++ = tctx->a >> 56; *p++ = tctx->a >> 48; *p++ = tctx->a >> 40; 604 - *p++ = tctx->a >> 32; *p++ = tctx->a >> 24; *p++ = tctx->a >> 16; 605 - *p++ = tctx->a >> 8; *p++ = tctx->a;\ 606 - *p++ = tctx->b >> 56; *p++ = tctx->b >> 48; *p++ = tctx->b >> 40; 607 - *p++ = tctx->b >> 32; *p++ = tctx->b >> 24; *p++ = tctx->b >> 16; 608 - *p++ = tctx->b >> 8; *p++ = tctx->b; 609 - *p++ = tctx->c >> 56; *p++ = tctx->c >> 48; *p++ = tctx->c >> 40; 610 - *p++ = tctx->c >> 32; *p++ = tctx->c >> 24; *p++ = tctx->c >> 16; 611 - *p++ = tctx->c >> 8; *p++ = tctx->c; 612 - 613 - 614 - /* unpack the hash */ 615 - j = 7; 616 - for (i = 0; i < 8; i++) { 617 - out[j--] = (tctx->a >> 8 * i) & 0xff; 618 - } 619 - j = 15; 620 - for (i = 0; i < 8; i++) { 621 - out[j--] = (tctx->b >> 8 * i) & 0xff; 622 - } 623 - j = 23; 624 - for (i = 0; i < 8; i++) { 625 - out[j--] = (tctx->c >> 8 * i) & 0xff; 626 - } 603 + be64p = (__be64 *)tctx->hash; 604 + dst[0] = be64p[0] = cpu_to_be64(tctx->a); 605 + dst[1] = be64p[1] = cpu_to_be64(tctx->b); 606 + dst[2] = be64p[2] = cpu_to_be64(tctx->c); 627 607 } 628 608 629 609 static void tgr160_final(void *ctx, u8 * out)
+9 -4
crypto/twofish.c
··· 37 37 * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the 38 38 * Third Edition. 39 39 */ 40 + 41 + #include <asm/byteorder.h> 40 42 #include <linux/module.h> 41 43 #include <linux/init.h> 42 44 #include <linux/types.h> ··· 623 621 * whitening subkey number m. */ 624 622 625 623 #define INPACK(n, x, m) \ 626 - x = in[4 * (n)] ^ (in[4 * (n) + 1] << 8) \ 627 - ^ (in[4 * (n) + 2] << 16) ^ (in[4 * (n) + 3] << 24) ^ ctx->w[m] 624 + x = le32_to_cpu(src[n]) ^ ctx->w[m] 628 625 629 626 #define OUTUNPACK(n, x, m) \ 630 627 x ^= ctx->w[m]; \ 631 - out[4 * (n)] = x; out[4 * (n) + 1] = x >> 8; \ 632 - out[4 * (n) + 2] = x >> 16; out[4 * (n) + 3] = x >> 24 628 + dst[n] = cpu_to_le32(x) 633 629 634 630 #define TF_MIN_KEY_SIZE 16 635 631 #define TF_MAX_KEY_SIZE 32 ··· 804 804 static void twofish_encrypt(void *cx, u8 *out, const u8 *in) 805 805 { 806 806 struct twofish_ctx *ctx = cx; 807 + const __le32 *src = (const __le32 *)in; 808 + __le32 *dst = (__le32 *)out; 807 809 808 810 /* The four 32-bit chunks of the text. */ 809 811 u32 a, b, c, d; ··· 841 839 static void twofish_decrypt(void *cx, u8 *out, const u8 *in) 842 840 { 843 841 struct twofish_ctx *ctx = cx; 842 + const __le32 *src = (const __le32 *)in; 843 + __le32 *dst = (__le32 *)out; 844 844 845 845 /* The four 32-bit chunks of the text. */ 846 846 u32 a, b, c, d; ··· 879 875 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 880 876 .cra_blocksize = TF_BLOCK_SIZE, 881 877 .cra_ctxsize = sizeof(struct twofish_ctx), 878 + .cra_alignmask = 3, 882 879 .cra_module = THIS_MODULE, 883 880 .cra_list = LIST_HEAD_INIT(alg.cra_list), 884 881 .cra_u = { .cipher = {
+8 -24
crypto/wp512.c
··· 22 22 #include <linux/init.h> 23 23 #include <linux/module.h> 24 24 #include <linux/mm.h> 25 + #include <asm/byteorder.h> 25 26 #include <asm/scatterlist.h> 26 27 #include <linux/crypto.h> 28 + #include <linux/types.h> 27 29 28 30 #define WP512_DIGEST_SIZE 64 29 31 #define WP384_DIGEST_SIZE 48 ··· 780 778 u64 block[8]; /* mu(buffer) */ 781 779 u64 state[8]; /* the cipher state */ 782 780 u64 L[8]; 783 - u8 *buffer = wctx->buffer; 781 + const __be64 *buffer = (const __be64 *)wctx->buffer; 784 782 785 - for (i = 0; i < 8; i++, buffer += 8) { 786 - block[i] = 787 - (((u64)buffer[0] ) << 56) ^ 788 - (((u64)buffer[1] & 0xffL) << 48) ^ 789 - (((u64)buffer[2] & 0xffL) << 40) ^ 790 - (((u64)buffer[3] & 0xffL) << 32) ^ 791 - (((u64)buffer[4] & 0xffL) << 24) ^ 792 - (((u64)buffer[5] & 0xffL) << 16) ^ 793 - (((u64)buffer[6] & 0xffL) << 8) ^ 794 - (((u64)buffer[7] & 0xffL) ); 795 - } 783 + for (i = 0; i < 8; i++) 784 + block[i] = be64_to_cpu(buffer[i]); 796 785 797 786 state[0] = block[0] ^ (K[0] = wctx->hash[0]); 798 787 state[1] = block[1] ^ (K[1] = wctx->hash[1]); ··· 1062 1069 u8 *bitLength = wctx->bitLength; 1063 1070 int bufferBits = wctx->bufferBits; 1064 1071 int bufferPos = wctx->bufferPos; 1065 - u8 *digest = out; 1072 + __be64 *digest = (__be64 *)out; 1066 1073 1067 1074 buffer[bufferPos] |= 0x80U >> (bufferBits & 7); 1068 1075 bufferPos++; ··· 1081 1088 memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES], 1082 1089 bitLength, WP512_LENGTHBYTES); 1083 1090 wp512_process_buffer(wctx); 1084 - for (i = 0; i < WP512_DIGEST_SIZE/8; i++) { 1085 - digest[0] = (u8)(wctx->hash[i] >> 56); 1086 - digest[1] = (u8)(wctx->hash[i] >> 48); 1087 - digest[2] = (u8)(wctx->hash[i] >> 40); 1088 - digest[3] = (u8)(wctx->hash[i] >> 32); 1089 - digest[4] = (u8)(wctx->hash[i] >> 24); 1090 - digest[5] = (u8)(wctx->hash[i] >> 16); 1091 - digest[6] = (u8)(wctx->hash[i] >> 8); 1092 - digest[7] = (u8)(wctx->hash[i] ); 1093 - digest += 8; 1094 - } 1091 + for (i = 0; i < WP512_DIGEST_SIZE/8; i++) 1092 + digest[i] = cpu_to_be64(wctx->hash[i]); 1095 1093 wctx->bufferBits = bufferBits; 1096 1094 wctx->bufferPos = bufferPos; 1097 1095 }
+13 -13
drivers/crypto/padlock-aes.c
··· 99 99 return x >> (n << 3); 100 100 } 101 101 102 - #define uint32_t_in(x) le32_to_cpu(*(const uint32_t *)(x)) 103 - #define uint32_t_out(to, from) (*(uint32_t *)(to) = cpu_to_le32(from)) 104 - 105 102 #define E_KEY ctx->E 106 103 #define D_KEY ctx->D 107 104 ··· 291 294 aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags) 292 295 { 293 296 struct aes_ctx *ctx = aes_ctx(ctx_arg); 297 + const __le32 *key = (const __le32 *)in_key; 294 298 uint32_t i, t, u, v, w; 295 299 uint32_t P[AES_EXTENDED_KEY_SIZE]; 296 300 uint32_t rounds; ··· 311 313 ctx->E = ctx->e_data; 312 314 ctx->D = ctx->e_data; 313 315 314 - E_KEY[0] = uint32_t_in (in_key); 315 - E_KEY[1] = uint32_t_in (in_key + 4); 316 - E_KEY[2] = uint32_t_in (in_key + 8); 317 - E_KEY[3] = uint32_t_in (in_key + 12); 316 + E_KEY[0] = le32_to_cpu(key[0]); 317 + E_KEY[1] = le32_to_cpu(key[1]); 318 + E_KEY[2] = le32_to_cpu(key[2]); 319 + E_KEY[3] = le32_to_cpu(key[3]); 318 320 319 321 /* Prepare control words. */ 320 322 memset(&ctx->cword, 0, sizeof(ctx->cword)); ··· 341 343 break; 342 344 343 345 case 24: 344 - E_KEY[4] = uint32_t_in (in_key + 16); 345 - t = E_KEY[5] = uint32_t_in (in_key + 20); 346 + E_KEY[4] = le32_to_cpu(key[4]); 347 + t = E_KEY[5] = le32_to_cpu(key[5]); 346 348 for (i = 0; i < 8; ++i) 347 349 loop6 (i); 348 350 break; 349 351 350 352 case 32: 351 - E_KEY[4] = uint32_t_in (in_key + 16); 352 - E_KEY[5] = uint32_t_in (in_key + 20); 353 - E_KEY[6] = uint32_t_in (in_key + 24); 354 - t = E_KEY[7] = uint32_t_in (in_key + 28); 353 + E_KEY[4] = le32_to_cpu(in_key[4]); 354 + E_KEY[5] = le32_to_cpu(in_key[5]); 355 + E_KEY[6] = le32_to_cpu(in_key[6]); 356 + t = E_KEY[7] = le32_to_cpu(in_key[7]); 355 357 for (i = 0; i < 7; ++i) 356 358 loop8 (i); 357 359 break; ··· 466 468 467 469 static struct crypto_alg aes_alg = { 468 470 .cra_name = "aes", 471 + .cra_driver_name = "aes-padlock", 472 + .cra_priority = 300, 469 473 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 470 474 .cra_blocksize = AES_BLOCK_SIZE, 471 475 .cra_ctxsize = sizeof(struct aes_ctx),
+1 -1
drivers/crypto/padlock.h
··· 17 17 18 18 /* Control word. */ 19 19 struct cword { 20 - int __attribute__ ((__packed__)) 20 + unsigned int __attribute__ ((__packed__)) 21 21 rounds:4, 22 22 algo:3, 23 23 keygen:1,
+13
drivers/net/Kconfig
··· 27 27 # that for each of the symbols. 28 28 if NETDEVICES 29 29 30 + config IFB 31 + tristate "Intermediate Functional Block support" 32 + depends on NET_CLS_ACT 33 + ---help--- 34 + This is an intermidiate driver that allows sharing of 35 + resources. 36 + To compile this driver as a module, choose M here: the module 37 + will be called ifb. If you want to use more than one ifb 38 + device at a time, you need to compile this driver as a module. 39 + Instead of 'ifb', the devices will then be called 'ifb0', 40 + 'ifb1' etc. 41 + Look at the iproute2 documentation directory for usage etc 42 + 30 43 config DUMMY 31 44 tristate "Dummy net driver support" 32 45 ---help---
+1
drivers/net/Makefile
··· 125 125 endif 126 126 127 127 obj-$(CONFIG_DUMMY) += dummy.o 128 + obj-$(CONFIG_IFB) += ifb.o 128 129 obj-$(CONFIG_DE600) += de600.o 129 130 obj-$(CONFIG_DE620) += de620.o 130 131 obj-$(CONFIG_LANCE) += lance.o
+1 -1
drivers/net/hamradio/mkiss.c
··· 515 515 count = kiss_esc(p, (unsigned char *)ax->xbuff, len); 516 516 } 517 517 } 518 + spin_unlock_bh(&ax->buflock); 518 519 519 520 set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); 520 521 actual = ax->tty->driver->write(ax->tty, ax->xbuff, count); ··· 525 524 ax->dev->trans_start = jiffies; 526 525 ax->xleft = count - actual; 527 526 ax->xhead = ax->xbuff + actual; 528 - spin_unlock_bh(&ax->buflock); 529 527 } 530 528 531 529 /* Encapsulate an AX.25 packet and kick it into a TTY queue. */
+294
drivers/net/ifb.c
··· 1 + /* drivers/net/ifb.c: 2 + 3 + The purpose of this driver is to provide a device that allows 4 + for sharing of resources: 5 + 6 + 1) qdiscs/policies that are per device as opposed to system wide. 7 + ifb allows for a device which can be redirected to thus providing 8 + an impression of sharing. 9 + 10 + 2) Allows for queueing incoming traffic for shaping instead of 11 + dropping. 12 + 13 + The original concept is based on what is known as the IMQ 14 + driver initially written by Martin Devera, later rewritten 15 + by Patrick McHardy and then maintained by Andre Correa. 16 + 17 + You need the tc action mirror or redirect to feed this device 18 + packets. 19 + 20 + This program is free software; you can redistribute it and/or 21 + modify it under the terms of the GNU General Public License 22 + as published by the Free Software Foundation; either version 23 + 2 of the License, or (at your option) any later version. 24 + 25 + Authors: Jamal Hadi Salim (2005) 26 + 27 + */ 28 + 29 + 30 + #include <linux/config.h> 31 + #include <linux/module.h> 32 + #include <linux/kernel.h> 33 + #include <linux/netdevice.h> 34 + #include <linux/etherdevice.h> 35 + #include <linux/init.h> 36 + #include <linux/moduleparam.h> 37 + #include <net/pkt_sched.h> 38 + 39 + #define TX_TIMEOUT (2*HZ) 40 + 41 + #define TX_Q_LIMIT 32 42 + struct ifb_private { 43 + struct net_device_stats stats; 44 + struct tasklet_struct ifb_tasklet; 45 + int tasklet_pending; 46 + /* mostly debug stats leave in for now */ 47 + unsigned long st_task_enter; /* tasklet entered */ 48 + unsigned long st_txq_refl_try; /* transmit queue refill attempt */ 49 + unsigned long st_rxq_enter; /* receive queue entered */ 50 + unsigned long st_rx2tx_tran; /* receive to trasmit transfers */ 51 + unsigned long st_rxq_notenter; /*receiveQ not entered, resched */ 52 + unsigned long st_rx_frm_egr; /* received from egress path */ 53 + unsigned long st_rx_frm_ing; /* received from ingress path */ 54 + unsigned long st_rxq_check; 55 + unsigned long st_rxq_rsch; 56 + struct sk_buff_head rq; 57 + struct sk_buff_head tq; 58 + }; 59 + 60 + static int numifbs = 1; 61 + 62 + static void ri_tasklet(unsigned long dev); 63 + static int ifb_xmit(struct sk_buff *skb, struct net_device *dev); 64 + static struct net_device_stats *ifb_get_stats(struct net_device *dev); 65 + static int ifb_open(struct net_device *dev); 66 + static int ifb_close(struct net_device *dev); 67 + 68 + static void ri_tasklet(unsigned long dev) 69 + { 70 + 71 + struct net_device *_dev = (struct net_device *)dev; 72 + struct ifb_private *dp = netdev_priv(_dev); 73 + struct net_device_stats *stats = &dp->stats; 74 + struct sk_buff *skb; 75 + 76 + dp->st_task_enter++; 77 + if ((skb = skb_peek(&dp->tq)) == NULL) { 78 + dp->st_txq_refl_try++; 79 + if (spin_trylock(&_dev->xmit_lock)) { 80 + dp->st_rxq_enter++; 81 + while ((skb = skb_dequeue(&dp->rq)) != NULL) { 82 + skb_queue_tail(&dp->tq, skb); 83 + dp->st_rx2tx_tran++; 84 + } 85 + spin_unlock(&_dev->xmit_lock); 86 + } else { 87 + /* reschedule */ 88 + dp->st_rxq_notenter++; 89 + goto resched; 90 + } 91 + } 92 + 93 + while ((skb = skb_dequeue(&dp->tq)) != NULL) { 94 + u32 from = G_TC_FROM(skb->tc_verd); 95 + 96 + skb->tc_verd = 0; 97 + skb->tc_verd = SET_TC_NCLS(skb->tc_verd); 98 + stats->tx_packets++; 99 + stats->tx_bytes +=skb->len; 100 + if (from & AT_EGRESS) { 101 + dp->st_rx_frm_egr++; 102 + dev_queue_xmit(skb); 103 + } else if (from & AT_INGRESS) { 104 + 105 + dp->st_rx_frm_ing++; 106 + netif_rx(skb); 107 + } else { 108 + dev_kfree_skb(skb); 109 + stats->tx_dropped++; 110 + } 111 + } 112 + 113 + if (spin_trylock(&_dev->xmit_lock)) { 114 + dp->st_rxq_check++; 115 + if ((skb = skb_peek(&dp->rq)) == NULL) { 116 + dp->tasklet_pending = 0; 117 + if (netif_queue_stopped(_dev)) 118 + netif_wake_queue(_dev); 119 + } else { 120 + dp->st_rxq_rsch++; 121 + spin_unlock(&_dev->xmit_lock); 122 + goto resched; 123 + } 124 + spin_unlock(&_dev->xmit_lock); 125 + } else { 126 + resched: 127 + dp->tasklet_pending = 1; 128 + tasklet_schedule(&dp->ifb_tasklet); 129 + } 130 + 131 + } 132 + 133 + static void __init ifb_setup(struct net_device *dev) 134 + { 135 + /* Initialize the device structure. */ 136 + dev->get_stats = ifb_get_stats; 137 + dev->hard_start_xmit = ifb_xmit; 138 + dev->open = &ifb_open; 139 + dev->stop = &ifb_close; 140 + 141 + /* Fill in device structure with ethernet-generic values. */ 142 + ether_setup(dev); 143 + dev->tx_queue_len = TX_Q_LIMIT; 144 + dev->change_mtu = NULL; 145 + dev->flags |= IFF_NOARP; 146 + dev->flags &= ~IFF_MULTICAST; 147 + SET_MODULE_OWNER(dev); 148 + random_ether_addr(dev->dev_addr); 149 + } 150 + 151 + static int ifb_xmit(struct sk_buff *skb, struct net_device *dev) 152 + { 153 + struct ifb_private *dp = netdev_priv(dev); 154 + struct net_device_stats *stats = &dp->stats; 155 + int ret = 0; 156 + u32 from = G_TC_FROM(skb->tc_verd); 157 + 158 + stats->tx_packets++; 159 + stats->tx_bytes+=skb->len; 160 + 161 + if (!from || !skb->input_dev) { 162 + dropped: 163 + dev_kfree_skb(skb); 164 + stats->rx_dropped++; 165 + return ret; 166 + } else { 167 + /* 168 + * note we could be going 169 + * ingress -> egress or 170 + * egress -> ingress 171 + */ 172 + skb->dev = skb->input_dev; 173 + skb->input_dev = dev; 174 + if (from & AT_INGRESS) { 175 + skb_pull(skb, skb->dev->hard_header_len); 176 + } else { 177 + if (!(from & AT_EGRESS)) { 178 + goto dropped; 179 + } 180 + } 181 + } 182 + 183 + if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) { 184 + netif_stop_queue(dev); 185 + } 186 + 187 + dev->trans_start = jiffies; 188 + skb_queue_tail(&dp->rq, skb); 189 + if (!dp->tasklet_pending) { 190 + dp->tasklet_pending = 1; 191 + tasklet_schedule(&dp->ifb_tasklet); 192 + } 193 + 194 + return ret; 195 + } 196 + 197 + static struct net_device_stats *ifb_get_stats(struct net_device *dev) 198 + { 199 + struct ifb_private *dp = netdev_priv(dev); 200 + struct net_device_stats *stats = &dp->stats; 201 + 202 + pr_debug("tasklets stats %ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld \n", 203 + dp->st_task_enter, dp->st_txq_refl_try, dp->st_rxq_enter, 204 + dp->st_rx2tx_tran dp->st_rxq_notenter, dp->st_rx_frm_egr, 205 + dp->st_rx_frm_ing, dp->st_rxq_check, dp->st_rxq_rsch ); 206 + 207 + return stats; 208 + } 209 + 210 + static struct net_device **ifbs; 211 + 212 + /* Number of ifb devices to be set up by this module. */ 213 + module_param(numifbs, int, 0); 214 + MODULE_PARM_DESC(numifbs, "Number of ifb devices"); 215 + 216 + static int ifb_close(struct net_device *dev) 217 + { 218 + struct ifb_private *dp = netdev_priv(dev); 219 + 220 + tasklet_kill(&dp->ifb_tasklet); 221 + netif_stop_queue(dev); 222 + skb_queue_purge(&dp->rq); 223 + skb_queue_purge(&dp->tq); 224 + return 0; 225 + } 226 + 227 + static int ifb_open(struct net_device *dev) 228 + { 229 + struct ifb_private *dp = netdev_priv(dev); 230 + 231 + tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev); 232 + skb_queue_head_init(&dp->rq); 233 + skb_queue_head_init(&dp->tq); 234 + netif_start_queue(dev); 235 + 236 + return 0; 237 + } 238 + 239 + static int __init ifb_init_one(int index) 240 + { 241 + struct net_device *dev_ifb; 242 + int err; 243 + 244 + dev_ifb = alloc_netdev(sizeof(struct ifb_private), 245 + "ifb%d", ifb_setup); 246 + 247 + if (!dev_ifb) 248 + return -ENOMEM; 249 + 250 + if ((err = register_netdev(dev_ifb))) { 251 + free_netdev(dev_ifb); 252 + dev_ifb = NULL; 253 + } else { 254 + ifbs[index] = dev_ifb; 255 + } 256 + 257 + return err; 258 + } 259 + 260 + static void ifb_free_one(int index) 261 + { 262 + unregister_netdev(ifbs[index]); 263 + free_netdev(ifbs[index]); 264 + } 265 + 266 + static int __init ifb_init_module(void) 267 + { 268 + int i, err = 0; 269 + ifbs = kmalloc(numifbs * sizeof(void *), GFP_KERNEL); 270 + if (!ifbs) 271 + return -ENOMEM; 272 + for (i = 0; i < numifbs && !err; i++) 273 + err = ifb_init_one(i); 274 + if (err) { 275 + while (--i >= 0) 276 + ifb_free_one(i); 277 + } 278 + 279 + return err; 280 + } 281 + 282 + static void __exit ifb_cleanup_module(void) 283 + { 284 + int i; 285 + 286 + for (i = 0; i < numifbs; i++) 287 + ifb_free_one(i); 288 + kfree(ifbs); 289 + } 290 + 291 + module_init(ifb_init_module); 292 + module_exit(ifb_cleanup_module); 293 + MODULE_LICENSE("GPL"); 294 + MODULE_AUTHOR("Jamal Hadi Salim");
+5
include/linux/crypto.h
··· 3 3 * 4 4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 5 5 * Copyright (c) 2002 David S. Miller (davem@redhat.com) 6 + * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> 6 7 * 7 8 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> 8 9 * and Nettle, by Niels M�ller. ··· 127 126 unsigned int cra_blocksize; 128 127 unsigned int cra_ctxsize; 129 128 unsigned int cra_alignmask; 129 + 130 + int cra_priority; 131 + 130 132 const char cra_name[CRYPTO_MAX_ALG_NAME]; 133 + const char cra_driver_name[CRYPTO_MAX_ALG_NAME]; 131 134 132 135 union { 133 136 struct cipher_alg cipher;
+1 -1
include/net/act_api.h
··· 63 63 __u32 type; /* TBD to match kind */ 64 64 __u32 capab; /* capabilities includes 4 bit version */ 65 65 struct module *owner; 66 - int (*act)(struct sk_buff **, struct tc_action *, struct tcf_result *); 66 + int (*act)(struct sk_buff *, struct tc_action *, struct tcf_result *); 67 67 int (*get_stats)(struct sk_buff *, struct tc_action *); 68 68 int (*dump)(struct sk_buff *, struct tc_action *,int , int); 69 69 int (*cleanup)(struct tc_action *, int bind);
+12 -11
include/net/pkt_sched.h
··· 1 1 #ifndef __NET_PKT_SCHED_H 2 2 #define __NET_PKT_SCHED_H 3 3 4 + #include <linux/jiffies.h> 4 5 #include <net/sch_generic.h> 5 6 6 7 struct qdisc_walker ··· 60 59 typedef long psched_tdiff_t; 61 60 62 61 #define PSCHED_GET_TIME(stamp) do_gettimeofday(&(stamp)) 63 - #define PSCHED_US2JIFFIE(usecs) (((usecs)+(1000000/HZ-1))/(1000000/HZ)) 64 - #define PSCHED_JIFFIE2US(delay) ((delay)*(1000000/HZ)) 62 + #define PSCHED_US2JIFFIE(usecs) usecs_to_jiffies(usecs) 63 + #define PSCHED_JIFFIE2US(delay) jiffies_to_usecs(delay) 65 64 66 65 #else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */ 67 66 ··· 124 123 default: \ 125 124 __delta = 0; \ 126 125 case 2: \ 127 - __delta += 1000000; \ 126 + __delta += USEC_PER_SEC; \ 128 127 case 1: \ 129 - __delta += 1000000; \ 128 + __delta += USEC_PER_SEC; \ 130 129 } \ 131 130 } \ 132 131 __delta; \ ··· 137 136 { 138 137 int delta; 139 138 140 - if (bound <= 1000000 || delta_sec > (0x7FFFFFFF/1000000)-1) 139 + if (bound <= USEC_PER_SEC || delta_sec > (0x7FFFFFFF/USEC_PER_SEC)-1) 141 140 return bound; 142 - delta = delta_sec * 1000000; 141 + delta = delta_sec * USEC_PER_SEC; 143 142 if (delta > bound || delta < 0) 144 143 delta = bound; 145 144 return delta; ··· 153 152 default: \ 154 153 __delta = psched_tod_diff(__delta_sec, bound); break; \ 155 154 case 2: \ 156 - __delta += 1000000; \ 155 + __delta += USEC_PER_SEC; \ 157 156 case 1: \ 158 - __delta += 1000000; \ 157 + __delta += USEC_PER_SEC; \ 159 158 case 0: \ 160 159 if (__delta > bound || __delta < 0) \ 161 160 __delta = bound; \ ··· 171 170 ({ \ 172 171 int __delta = (tv).tv_usec + (delta); \ 173 172 (tv_res).tv_sec = (tv).tv_sec; \ 174 - if (__delta > 1000000) { (tv_res).tv_sec++; __delta -= 1000000; } \ 173 + if (__delta > USEC_PER_SEC) { (tv_res).tv_sec++; __delta -= USEC_PER_SEC; } \ 175 174 (tv_res).tv_usec = __delta; \ 176 175 }) 177 176 178 177 #define PSCHED_TADD(tv, delta) \ 179 178 ({ \ 180 179 (tv).tv_usec += (delta); \ 181 - if ((tv).tv_usec > 1000000) { (tv).tv_sec++; \ 182 - (tv).tv_usec -= 1000000; } \ 180 + if ((tv).tv_usec > USEC_PER_SEC) { (tv).tv_sec++; \ 181 + (tv).tv_usec -= USEC_PER_SEC; } \ 183 182 }) 184 183 185 184 /* Set/check that time is in the "past perfect";
+3 -6
net/core/dev.c
··· 1092 1092 goto out; 1093 1093 } 1094 1094 1095 - if (offset > (int)skb->len) 1096 - BUG(); 1095 + BUG_ON(offset > (int)skb->len); 1097 1096 csum = skb_checksum(skb, offset, skb->len-offset, 0); 1098 1097 1099 1098 offset = skb->tail - skb->h.raw; 1100 - if (offset <= 0) 1101 - BUG(); 1102 - if (skb->csum + 2 > offset) 1103 - BUG(); 1099 + BUG_ON(offset <= 0); 1100 + BUG_ON(skb->csum + 2 > offset); 1104 1101 1105 1102 *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); 1106 1103 skb->ip_summed = CHECKSUM_NONE;
+5 -10
net/core/skbuff.c
··· 791 791 int end = offset + skb_shinfo(skb)->frags[i].size; 792 792 if (end > len) { 793 793 if (skb_cloned(skb)) { 794 - if (!realloc) 795 - BUG(); 794 + BUG_ON(!realloc); 796 795 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 797 796 return -ENOMEM; 798 797 } ··· 893 894 struct sk_buff *insp = NULL; 894 895 895 896 do { 896 - if (!list) 897 - BUG(); 897 + BUG_ON(!list); 898 898 899 899 if (list->len <= eat) { 900 900 /* Eaten as whole. */ ··· 1197 1199 start = end; 1198 1200 } 1199 1201 } 1200 - if (len) 1201 - BUG(); 1202 + BUG_ON(len); 1202 1203 1203 1204 return csum; 1204 1205 } ··· 1279 1282 start = end; 1280 1283 } 1281 1284 } 1282 - if (len) 1283 - BUG(); 1285 + BUG_ON(len); 1284 1286 return csum; 1285 1287 } 1286 1288 ··· 1293 1297 else 1294 1298 csstart = skb_headlen(skb); 1295 1299 1296 - if (csstart > skb_headlen(skb)) 1297 - BUG(); 1300 + BUG_ON(csstart > skb_headlen(skb)); 1298 1301 1299 1302 memcpy(to, skb->data, csstart); 1300 1303
+1 -2
net/ipv4/icmp.c
··· 899 899 u32 _mask, *mp; 900 900 901 901 mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask); 902 - if (mp == NULL) 903 - BUG(); 902 + BUG_ON(mp == NULL); 904 903 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { 905 904 if (*mp == ifa->ifa_mask && 906 905 inet_ifa_match(rt->rt_src, ifa))
+160 -83
net/ipv4/inet_diag.c
··· 50 50 #define INET_DIAG_PUT(skb, attrtype, attrlen) \ 51 51 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) 52 52 53 - static int inet_diag_fill(struct sk_buff *skb, struct sock *sk, 54 - int ext, u32 pid, u32 seq, u16 nlmsg_flags, 55 - const struct nlmsghdr *unlh) 53 + static int inet_csk_diag_fill(struct sock *sk, 54 + struct sk_buff *skb, 55 + int ext, u32 pid, u32 seq, u16 nlmsg_flags, 56 + const struct nlmsghdr *unlh) 56 57 { 57 58 const struct inet_sock *inet = inet_sk(sk); 58 59 const struct inet_connection_sock *icsk = inet_csk(sk); ··· 71 70 nlh->nlmsg_flags = nlmsg_flags; 72 71 73 72 r = NLMSG_DATA(nlh); 74 - if (sk->sk_state != TCP_TIME_WAIT) { 75 - if (ext & (1 << (INET_DIAG_MEMINFO - 1))) 76 - minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, 77 - sizeof(*minfo)); 78 - if (ext & (1 << (INET_DIAG_INFO - 1))) 79 - info = INET_DIAG_PUT(skb, INET_DIAG_INFO, 80 - handler->idiag_info_size); 81 - 82 - if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) { 83 - size_t len = strlen(icsk->icsk_ca_ops->name); 84 - strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1), 85 - icsk->icsk_ca_ops->name); 86 - } 73 + BUG_ON(sk->sk_state == TCP_TIME_WAIT); 74 + 75 + if (ext & (1 << (INET_DIAG_MEMINFO - 1))) 76 + minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo)); 77 + 78 + if (ext & (1 << (INET_DIAG_INFO - 1))) 79 + info = INET_DIAG_PUT(skb, INET_DIAG_INFO, 80 + handler->idiag_info_size); 81 + 82 + if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) { 83 + const size_t len = strlen(icsk->icsk_ca_ops->name); 84 + 85 + strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1), 86 + icsk->icsk_ca_ops->name); 87 87 } 88 + 88 89 r->idiag_family = sk->sk_family; 89 90 r->idiag_state = sk->sk_state; 90 91 r->idiag_timer = 0; ··· 95 92 r->id.idiag_if = sk->sk_bound_dev_if; 96 93 r->id.idiag_cookie[0] = (u32)(unsigned long)sk; 97 94 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); 98 - 99 - if (r->idiag_state == TCP_TIME_WAIT) { 100 - const struct inet_timewait_sock *tw = inet_twsk(sk); 101 - long tmo = tw->tw_ttd - jiffies; 102 - if (tmo < 0) 103 - tmo = 0; 104 - 105 - r->id.idiag_sport = tw->tw_sport; 106 - r->id.idiag_dport = tw->tw_dport; 107 - r->id.idiag_src[0] = tw->tw_rcv_saddr; 108 - r->id.idiag_dst[0] = tw->tw_daddr; 109 - r->idiag_state = tw->tw_substate; 110 - r->idiag_timer = 3; 111 - r->idiag_expires = (tmo * 1000 + HZ - 1) / HZ; 112 - r->idiag_rqueue = 0; 113 - r->idiag_wqueue = 0; 114 - r->idiag_uid = 0; 115 - r->idiag_inode = 0; 116 - #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 117 - if (r->idiag_family == AF_INET6) { 118 - const struct inet6_timewait_sock *tw6 = inet6_twsk(sk); 119 - 120 - ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, 121 - &tw6->tw_v6_rcv_saddr); 122 - ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, 123 - &tw6->tw_v6_daddr); 124 - } 125 - #endif 126 - nlh->nlmsg_len = skb->tail - b; 127 - return skb->len; 128 - } 129 95 130 96 r->id.idiag_sport = inet->sport; 131 97 r->id.idiag_dport = inet->dport; ··· 157 185 return -1; 158 186 } 159 187 160 - static int inet_diag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nlh) 188 + static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, 189 + struct sk_buff *skb, int ext, u32 pid, 190 + u32 seq, u16 nlmsg_flags, 191 + const struct nlmsghdr *unlh) 192 + { 193 + long tmo; 194 + struct inet_diag_msg *r; 195 + const unsigned char *previous_tail = skb->tail; 196 + struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq, 197 + unlh->nlmsg_type, sizeof(*r)); 198 + 199 + r = NLMSG_DATA(nlh); 200 + BUG_ON(tw->tw_state != TCP_TIME_WAIT); 201 + 202 + nlh->nlmsg_flags = nlmsg_flags; 203 + 204 + tmo = tw->tw_ttd - jiffies; 205 + if (tmo < 0) 206 + tmo = 0; 207 + 208 + r->idiag_family = tw->tw_family; 209 + r->idiag_state = tw->tw_state; 210 + r->idiag_timer = 0; 211 + r->idiag_retrans = 0; 212 + r->id.idiag_if = tw->tw_bound_dev_if; 213 + r->id.idiag_cookie[0] = (u32)(unsigned long)tw; 214 + r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1); 215 + r->id.idiag_sport = tw->tw_sport; 216 + r->id.idiag_dport = tw->tw_dport; 217 + r->id.idiag_src[0] = tw->tw_rcv_saddr; 218 + r->id.idiag_dst[0] = tw->tw_daddr; 219 + r->idiag_state = tw->tw_substate; 220 + r->idiag_timer = 3; 221 + r->idiag_expires = (tmo * 1000 + HZ - 1) / HZ; 222 + r->idiag_rqueue = 0; 223 + r->idiag_wqueue = 0; 224 + r->idiag_uid = 0; 225 + r->idiag_inode = 0; 226 + #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 227 + if (tw->tw_family == AF_INET6) { 228 + const struct inet6_timewait_sock *tw6 = 229 + inet6_twsk((struct sock *)tw); 230 + 231 + ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, 232 + &tw6->tw_v6_rcv_saddr); 233 + ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, 234 + &tw6->tw_v6_daddr); 235 + } 236 + #endif 237 + nlh->nlmsg_len = skb->tail - previous_tail; 238 + return skb->len; 239 + nlmsg_failure: 240 + skb_trim(skb, previous_tail - skb->data); 241 + return -1; 242 + } 243 + 244 + static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 245 + int ext, u32 pid, u32 seq, u16 nlmsg_flags, 246 + const struct nlmsghdr *unlh) 247 + { 248 + if (sk->sk_state == TCP_TIME_WAIT) 249 + return inet_twsk_diag_fill((struct inet_timewait_sock *)sk, 250 + skb, ext, pid, seq, nlmsg_flags, 251 + unlh); 252 + return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh); 253 + } 254 + 255 + static int inet_diag_get_exact(struct sk_buff *in_skb, 256 + const struct nlmsghdr *nlh) 161 257 { 162 258 int err; 163 259 struct sock *sk; ··· 275 235 if (!rep) 276 236 goto out; 277 237 278 - if (inet_diag_fill(rep, sk, req->idiag_ext, 238 + if (sk_diag_fill(sk, rep, req->idiag_ext, 279 239 NETLINK_CB(in_skb).pid, 280 240 nlh->nlmsg_seq, 0, nlh) <= 0) 281 241 BUG(); ··· 323 283 324 284 325 285 static int inet_diag_bc_run(const void *bc, int len, 326 - const struct inet_diag_entry *entry) 286 + const struct inet_diag_entry *entry) 327 287 { 328 288 while (len > 0) { 329 289 int yes = 1; ··· 362 322 yes = 0; 363 323 break; 364 324 } 365 - 325 + 366 326 if (cond->prefix_len == 0) 367 327 break; 368 328 ··· 371 331 else 372 332 addr = entry->daddr; 373 333 374 - if (bitstring_match(addr, cond->addr, cond->prefix_len)) 334 + if (bitstring_match(addr, cond->addr, 335 + cond->prefix_len)) 375 336 break; 376 337 if (entry->family == AF_INET6 && 377 338 cond->family == AF_INET) { ··· 387 346 } 388 347 } 389 348 390 - if (yes) { 349 + if (yes) { 391 350 len -= op->yes; 392 351 bc += op->yes; 393 352 } else { ··· 448 407 default: 449 408 return -EINVAL; 450 409 } 451 - bc += op->yes; 410 + bc += op->yes; 452 411 len -= op->yes; 453 412 } 454 413 return len == 0 ? 0 : -EINVAL; 455 414 } 456 415 457 - static int inet_diag_dump_sock(struct sk_buff *skb, struct sock *sk, 458 - struct netlink_callback *cb) 416 + static int inet_csk_diag_dump(struct sock *sk, 417 + struct sk_buff *skb, 418 + struct netlink_callback *cb) 459 419 { 460 420 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 461 421 ··· 486 444 return 0; 487 445 } 488 446 489 - return inet_diag_fill(skb, sk, r->idiag_ext, NETLINK_CB(cb->skb).pid, 490 - cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 447 + return inet_csk_diag_fill(sk, skb, r->idiag_ext, 448 + NETLINK_CB(cb->skb).pid, 449 + cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 450 + } 451 + 452 + static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, 453 + struct sk_buff *skb, 454 + struct netlink_callback *cb) 455 + { 456 + struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 457 + 458 + if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 459 + struct inet_diag_entry entry; 460 + struct rtattr *bc = (struct rtattr *)(r + 1); 461 + 462 + entry.family = tw->tw_family; 463 + #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 464 + if (tw->tw_family == AF_INET6) { 465 + struct inet6_timewait_sock *tw6 = 466 + inet6_twsk((struct sock *)tw); 467 + entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32; 468 + entry.daddr = tw6->tw_v6_daddr.s6_addr32; 469 + } else 470 + #endif 471 + { 472 + entry.saddr = &tw->tw_rcv_saddr; 473 + entry.daddr = &tw->tw_daddr; 474 + } 475 + entry.sport = tw->tw_num; 476 + entry.dport = ntohs(tw->tw_dport); 477 + entry.userlocks = 0; 478 + 479 + if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) 480 + return 0; 481 + } 482 + 483 + return inet_twsk_diag_fill(tw, skb, r->idiag_ext, 484 + NETLINK_CB(cb->skb).pid, 485 + cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 491 486 } 492 487 493 488 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, 494 - struct request_sock *req, 495 - u32 pid, u32 seq, 496 - const struct nlmsghdr *unlh) 489 + struct request_sock *req, u32 pid, u32 seq, 490 + const struct nlmsghdr *unlh) 497 491 { 498 492 const struct inet_request_sock *ireq = inet_rsk(req); 499 493 struct inet_sock *inet = inet_sk(sk); ··· 582 504 } 583 505 584 506 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, 585 - struct netlink_callback *cb) 507 + struct netlink_callback *cb) 586 508 { 587 509 struct inet_diag_entry entry; 588 510 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); ··· 634 556 inet6_rsk(req)->loc_addr.s6_addr32 : 635 557 #endif 636 558 &ireq->loc_addr; 637 - entry.daddr = 559 + entry.daddr = 638 560 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 639 561 (entry.family == AF_INET6) ? 640 562 inet6_rsk(req)->rmt_addr.s6_addr32 : ··· 677 599 handler = inet_diag_table[cb->nlh->nlmsg_type]; 678 600 BUG_ON(handler == NULL); 679 601 hashinfo = handler->idiag_hashinfo; 680 - 602 + 681 603 s_i = cb->args[1]; 682 604 s_num = num = cb->args[2]; 683 605 ··· 708 630 cb->args[3] > 0) 709 631 goto syn_recv; 710 632 711 - if (inet_diag_dump_sock(skb, sk, cb) < 0) { 633 + if (inet_csk_diag_dump(sk, skb, cb) < 0) { 712 634 inet_listen_unlock(hashinfo); 713 635 goto done; 714 636 } ··· 750 672 s_num = 0; 751 673 752 674 read_lock_bh(&head->lock); 753 - 754 675 num = 0; 755 676 sk_for_each(sk, node, &head->chain) { 756 677 struct inet_sock *inet = inet_sk(sk); ··· 761 684 if (r->id.idiag_sport != inet->sport && 762 685 r->id.idiag_sport) 763 686 goto next_normal; 764 - if (r->id.idiag_dport != inet->dport && r->id.idiag_dport) 687 + if (r->id.idiag_dport != inet->dport && 688 + r->id.idiag_dport) 765 689 goto next_normal; 766 - if (inet_diag_dump_sock(skb, sk, cb) < 0) { 690 + if (inet_csk_diag_dump(sk, skb, cb) < 0) { 767 691 read_unlock_bh(&head->lock); 768 692 goto done; 769 693 } ··· 773 695 } 774 696 775 697 if (r->idiag_states & TCPF_TIME_WAIT) { 776 - sk_for_each(sk, node, 698 + struct inet_timewait_sock *tw; 699 + 700 + inet_twsk_for_each(tw, node, 777 701 &hashinfo->ehash[i + hashinfo->ehash_size].chain) { 778 - struct inet_sock *inet = inet_sk(sk); 779 702 780 703 if (num < s_num) 781 704 goto next_dying; 782 - if (r->id.idiag_sport != inet->sport && 705 + if (r->id.idiag_sport != tw->tw_sport && 783 706 r->id.idiag_sport) 784 707 goto next_dying; 785 - if (r->id.idiag_dport != inet->dport && 708 + if (r->id.idiag_dport != tw->tw_dport && 786 709 r->id.idiag_dport) 787 710 goto next_dying; 788 - if (inet_diag_dump_sock(skb, sk, cb) < 0) { 711 + if (inet_twsk_diag_dump(tw, skb, cb) < 0) { 789 712 read_unlock_bh(&head->lock); 790 713 goto done; 791 714 } ··· 803 724 return skb->len; 804 725 } 805 726 806 - static __inline__ int 807 - inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 727 + static inline int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 808 728 { 809 729 if (!(nlh->nlmsg_flags&NLM_F_REQUEST)) 810 730 return 0; ··· 833 755 } 834 756 return netlink_dump_start(idiagnl, skb, nlh, 835 757 inet_diag_dump, NULL); 836 - } else { 758 + } else 837 759 return inet_diag_get_exact(skb, nlh); 838 - } 839 760 840 761 err_inval: 841 762 return -EINVAL; ··· 843 766 844 767 static inline void inet_diag_rcv_skb(struct sk_buff *skb) 845 768 { 846 - int err; 847 - struct nlmsghdr * nlh; 848 - 849 769 if (skb->len >= NLMSG_SPACE(0)) { 850 - nlh = (struct nlmsghdr *)skb->data; 851 - if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) 770 + int err; 771 + struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data; 772 + 773 + if (nlh->nlmsg_len < sizeof(*nlh) || 774 + skb->len < nlh->nlmsg_len) 852 775 return; 853 776 err = inet_diag_rcv_msg(skb, nlh); 854 - if (err || nlh->nlmsg_flags & NLM_F_ACK) 777 + if (err || nlh->nlmsg_flags & NLM_F_ACK) 855 778 netlink_ack(skb, nlh, err); 856 779 } 857 780 }
+2 -4
net/ipv4/inetpeer.c
··· 304 304 /* look for a node to insert instead of p */ 305 305 struct inet_peer *t; 306 306 t = lookup_rightempty(p); 307 - if (*stackptr[-1] != t) 308 - BUG(); 307 + BUG_ON(*stackptr[-1] != t); 309 308 **--stackptr = t->avl_left; 310 309 /* t is removed, t->v4daddr > x->v4daddr for any 311 310 * x in p->avl_left subtree. ··· 313 314 t->avl_left = p->avl_left; 314 315 t->avl_right = p->avl_right; 315 316 t->avl_height = p->avl_height; 316 - if (delp[1] != &p->avl_left) 317 - BUG(); 317 + BUG_ON(delp[1] != &p->avl_left); 318 318 delp[1] = &t->avl_left; /* was &p->avl_left */ 319 319 } 320 320 peer_avl_rebalance(stack, stackptr);
+15 -18
net/ipv4/ip_gre.c
··· 188 188 } 189 189 190 190 if (ipgre_fb_tunnel_dev->flags&IFF_UP) 191 - return ipgre_fb_tunnel_dev->priv; 191 + return netdev_priv(ipgre_fb_tunnel_dev); 192 192 return NULL; 193 193 } 194 194 ··· 278 278 return NULL; 279 279 280 280 dev->init = ipgre_tunnel_init; 281 - nt = dev->priv; 281 + nt = netdev_priv(dev); 282 282 nt->parms = *parms; 283 283 284 284 if (register_netdevice(dev) < 0) { 285 285 free_netdev(dev); 286 286 goto failed; 287 287 } 288 - 289 - nt = dev->priv; 290 - nt->parms = *parms; 291 288 292 289 dev_hold(dev); 293 290 ipgre_tunnel_link(nt); ··· 296 299 297 300 static void ipgre_tunnel_uninit(struct net_device *dev) 298 301 { 299 - ipgre_tunnel_unlink((struct ip_tunnel*)dev->priv); 302 + ipgre_tunnel_unlink(netdev_priv(dev)); 300 303 dev_put(dev); 301 304 } 302 305 ··· 515 518 skb2->dst->ops->update_pmtu(skb2->dst, rel_info); 516 519 rel_info = htonl(rel_info); 517 520 } else if (type == ICMP_TIME_EXCEEDED) { 518 - struct ip_tunnel *t = (struct ip_tunnel*)skb2->dev->priv; 521 + struct ip_tunnel *t = netdev_priv(skb2->dev); 519 522 if (t->parms.iph.ttl) { 520 523 rel_type = ICMP_DEST_UNREACH; 521 524 rel_code = ICMP_HOST_UNREACH; ··· 666 669 667 670 static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 668 671 { 669 - struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; 672 + struct ip_tunnel *tunnel = netdev_priv(dev); 670 673 struct net_device_stats *stats = &tunnel->stat; 671 674 struct iphdr *old_iph = skb->nh.iph; 672 675 struct iphdr *tiph; ··· 912 915 t = ipgre_tunnel_locate(&p, 0); 913 916 } 914 917 if (t == NULL) 915 - t = (struct ip_tunnel*)dev->priv; 918 + t = netdev_priv(dev); 916 919 memcpy(&p, &t->parms, sizeof(p)); 917 920 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 918 921 err = -EFAULT; ··· 952 955 } else { 953 956 unsigned nflags=0; 954 957 955 - t = (struct ip_tunnel*)dev->priv; 958 + t = netdev_priv(dev); 956 959 957 960 if (MULTICAST(p.iph.daddr)) 958 961 nflags = IFF_BROADCAST; ··· 1001 1004 if ((t = ipgre_tunnel_locate(&p, 0)) == NULL) 1002 1005 goto done; 1003 1006 err = -EPERM; 1004 - if (t == ipgre_fb_tunnel_dev->priv) 1007 + if (t == netdev_priv(ipgre_fb_tunnel_dev)) 1005 1008 goto done; 1006 1009 dev = t->dev; 1007 1010 } ··· 1018 1021 1019 1022 static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev) 1020 1023 { 1021 - return &(((struct ip_tunnel*)dev->priv)->stat); 1024 + return &(((struct ip_tunnel*)netdev_priv(dev))->stat); 1022 1025 } 1023 1026 1024 1027 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) 1025 1028 { 1026 - struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; 1029 + struct ip_tunnel *tunnel = netdev_priv(dev); 1027 1030 if (new_mtu < 68 || new_mtu > 0xFFF8 - tunnel->hlen) 1028 1031 return -EINVAL; 1029 1032 dev->mtu = new_mtu; ··· 1063 1066 static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, 1064 1067 void *daddr, void *saddr, unsigned len) 1065 1068 { 1066 - struct ip_tunnel *t = (struct ip_tunnel*)dev->priv; 1069 + struct ip_tunnel *t = netdev_priv(dev); 1067 1070 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); 1068 1071 u16 *p = (u16*)(iph+1); 1069 1072 ··· 1090 1093 1091 1094 static int ipgre_open(struct net_device *dev) 1092 1095 { 1093 - struct ip_tunnel *t = (struct ip_tunnel*)dev->priv; 1096 + struct ip_tunnel *t = netdev_priv(dev); 1094 1097 1095 1098 if (MULTICAST(t->parms.iph.daddr)) { 1096 1099 struct flowi fl = { .oif = t->parms.link, ··· 1114 1117 1115 1118 static int ipgre_close(struct net_device *dev) 1116 1119 { 1117 - struct ip_tunnel *t = (struct ip_tunnel*)dev->priv; 1120 + struct ip_tunnel *t = netdev_priv(dev); 1118 1121 if (MULTICAST(t->parms.iph.daddr) && t->mlink) { 1119 1122 struct in_device *in_dev = inetdev_by_index(t->mlink); 1120 1123 if (in_dev) { ··· 1154 1157 int mtu = ETH_DATA_LEN; 1155 1158 int addend = sizeof(struct iphdr) + 4; 1156 1159 1157 - tunnel = (struct ip_tunnel*)dev->priv; 1160 + tunnel = netdev_priv(dev); 1158 1161 iph = &tunnel->parms.iph; 1159 1162 1160 1163 tunnel->dev = dev; ··· 1218 1221 1219 1222 static int __init ipgre_fb_tunnel_init(struct net_device *dev) 1220 1223 { 1221 - struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; 1224 + struct ip_tunnel *tunnel = netdev_priv(dev); 1222 1225 struct iphdr *iph = &tunnel->parms.iph; 1223 1226 1224 1227 tunnel->dev = dev;
+1
net/ipv4/ip_output.c
··· 69 69 #include <net/ip.h> 70 70 #include <net/protocol.h> 71 71 #include <net/route.h> 72 + #include <net/xfrm.h> 72 73 #include <linux/skbuff.h> 73 74 #include <net/sock.h> 74 75 #include <net/arp.h>
+9 -9
net/ipv4/ipip.c
··· 244 244 if (dev == NULL) 245 245 return NULL; 246 246 247 - nt = dev->priv; 247 + nt = netdev_priv(dev); 248 248 SET_MODULE_OWNER(dev); 249 249 dev->init = ipip_tunnel_init; 250 250 nt->parms = *parms; ··· 269 269 tunnels_wc[0] = NULL; 270 270 write_unlock_bh(&ipip_lock); 271 271 } else 272 - ipip_tunnel_unlink((struct ip_tunnel*)dev->priv); 272 + ipip_tunnel_unlink(netdev_priv(dev)); 273 273 dev_put(dev); 274 274 } 275 275 ··· 443 443 skb2->dst->ops->update_pmtu(skb2->dst, rel_info); 444 444 rel_info = htonl(rel_info); 445 445 } else if (type == ICMP_TIME_EXCEEDED) { 446 - struct ip_tunnel *t = (struct ip_tunnel*)skb2->dev->priv; 446 + struct ip_tunnel *t = netdev_priv(skb2->dev); 447 447 if (t->parms.iph.ttl) { 448 448 rel_type = ICMP_DEST_UNREACH; 449 449 rel_code = ICMP_HOST_UNREACH; ··· 514 514 515 515 static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 516 516 { 517 - struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; 517 + struct ip_tunnel *tunnel = netdev_priv(dev); 518 518 struct net_device_stats *stats = &tunnel->stat; 519 519 struct iphdr *tiph = &tunnel->parms.iph; 520 520 u8 tos = tunnel->parms.iph.tos; ··· 674 674 t = ipip_tunnel_locate(&p, 0); 675 675 } 676 676 if (t == NULL) 677 - t = (struct ip_tunnel*)dev->priv; 677 + t = netdev_priv(dev); 678 678 memcpy(&p, &t->parms, sizeof(p)); 679 679 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 680 680 err = -EFAULT; ··· 711 711 err = -EINVAL; 712 712 break; 713 713 } 714 - t = (struct ip_tunnel*)dev->priv; 714 + t = netdev_priv(dev); 715 715 ipip_tunnel_unlink(t); 716 716 t->parms.iph.saddr = p.iph.saddr; 717 717 t->parms.iph.daddr = p.iph.daddr; ··· 765 765 766 766 static struct net_device_stats *ipip_tunnel_get_stats(struct net_device *dev) 767 767 { 768 - return &(((struct ip_tunnel*)dev->priv)->stat); 768 + return &(((struct ip_tunnel*)netdev_priv(dev))->stat); 769 769 } 770 770 771 771 static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu) ··· 800 800 struct ip_tunnel *tunnel; 801 801 struct iphdr *iph; 802 802 803 - tunnel = (struct ip_tunnel*)dev->priv; 803 + tunnel = netdev_priv(dev); 804 804 iph = &tunnel->parms.iph; 805 805 806 806 tunnel->dev = dev; ··· 838 838 839 839 static int __init ipip_fb_tunnel_init(struct net_device *dev) 840 840 { 841 - struct ip_tunnel *tunnel = dev->priv; 841 + struct ip_tunnel *tunnel = netdev_priv(dev); 842 842 struct iphdr *iph = &tunnel->parms.iph; 843 843 844 844 tunnel->dev = dev;
+11 -11
net/ipv4/ipmr.c
··· 178 178 static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 179 179 { 180 180 read_lock(&mrt_lock); 181 - ((struct net_device_stats*)dev->priv)->tx_bytes += skb->len; 182 - ((struct net_device_stats*)dev->priv)->tx_packets++; 181 + ((struct net_device_stats*)netdev_priv(dev))->tx_bytes += skb->len; 182 + ((struct net_device_stats*)netdev_priv(dev))->tx_packets++; 183 183 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT); 184 184 read_unlock(&mrt_lock); 185 185 kfree_skb(skb); ··· 188 188 189 189 static struct net_device_stats *reg_vif_get_stats(struct net_device *dev) 190 190 { 191 - return (struct net_device_stats*)dev->priv; 191 + return (struct net_device_stats*)netdev_priv(dev); 192 192 } 193 193 194 194 static void reg_vif_setup(struct net_device *dev) ··· 1149 1149 if (vif->flags & VIFF_REGISTER) { 1150 1150 vif->pkt_out++; 1151 1151 vif->bytes_out+=skb->len; 1152 - ((struct net_device_stats*)vif->dev->priv)->tx_bytes += skb->len; 1153 - ((struct net_device_stats*)vif->dev->priv)->tx_packets++; 1152 + ((struct net_device_stats*)netdev_priv(vif->dev))->tx_bytes += skb->len; 1153 + ((struct net_device_stats*)netdev_priv(vif->dev))->tx_packets++; 1154 1154 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); 1155 1155 kfree_skb(skb); 1156 1156 return; ··· 1210 1210 if (vif->flags & VIFF_TUNNEL) { 1211 1211 ip_encap(skb, vif->local, vif->remote); 1212 1212 /* FIXME: extra output firewall step used to be here. --RR */ 1213 - ((struct ip_tunnel *)vif->dev->priv)->stat.tx_packets++; 1214 - ((struct ip_tunnel *)vif->dev->priv)->stat.tx_bytes+=skb->len; 1213 + ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_packets++; 1214 + ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_bytes+=skb->len; 1215 1215 } 1216 1216 1217 1217 IPCB(skb)->flags |= IPSKB_FORWARDED; ··· 1467 1467 skb->pkt_type = PACKET_HOST; 1468 1468 dst_release(skb->dst); 1469 1469 skb->dst = NULL; 1470 - ((struct net_device_stats*)reg_dev->priv)->rx_bytes += skb->len; 1471 - ((struct net_device_stats*)reg_dev->priv)->rx_packets++; 1470 + ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len; 1471 + ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++; 1472 1472 nf_reset(skb); 1473 1473 netif_rx(skb); 1474 1474 dev_put(reg_dev); ··· 1522 1522 skb->ip_summed = 0; 1523 1523 skb->pkt_type = PACKET_HOST; 1524 1524 dst_release(skb->dst); 1525 - ((struct net_device_stats*)reg_dev->priv)->rx_bytes += skb->len; 1526 - ((struct net_device_stats*)reg_dev->priv)->rx_packets++; 1525 + ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len; 1526 + ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++; 1527 1527 skb->dst = NULL; 1528 1528 nf_reset(skb); 1529 1529 netif_rx(skb);
+1 -1
net/ipv4/tcp_input.c
··· 3347 3347 int offset = start - TCP_SKB_CB(skb)->seq; 3348 3348 int size = TCP_SKB_CB(skb)->end_seq - start; 3349 3349 3350 - if (offset < 0) BUG(); 3350 + BUG_ON(offset < 0); 3351 3351 if (size > 0) { 3352 3352 size = min(copy, size); 3353 3353 if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
+4
net/ipv6/ip6_output.c
··· 226 226 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); 227 227 ipv6_addr_copy(&hdr->daddr, first_hop); 228 228 229 + skb->priority = sk->sk_priority; 230 + 229 231 mtu = dst_mtu(dst); 230 232 if ((skb->len <= mtu) || ipfragok) { 231 233 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); ··· 1183 1181 hdr->nexthdr = proto; 1184 1182 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); 1185 1183 ipv6_addr_copy(&hdr->daddr, final_dst); 1184 + 1185 + skb->priority = sk->sk_priority; 1186 1186 1187 1187 skb->dst = dst_clone(&rt->u.dst); 1188 1188 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+12 -12
net/ipv6/ip6_tunnel.c
··· 243 243 if (dev == NULL) 244 244 return -ENOMEM; 245 245 246 - t = dev->priv; 246 + t = netdev_priv(dev); 247 247 dev->init = ip6ip6_tnl_dev_init; 248 248 t->parms = *p; 249 249 ··· 308 308 static void 309 309 ip6ip6_tnl_dev_uninit(struct net_device *dev) 310 310 { 311 - struct ip6_tnl *t = dev->priv; 311 + struct ip6_tnl *t = netdev_priv(dev); 312 312 313 313 if (dev == ip6ip6_fb_tnl_dev) { 314 314 write_lock_bh(&ip6ip6_lock); ··· 623 623 static int 624 624 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 625 625 { 626 - struct ip6_tnl *t = (struct ip6_tnl *) dev->priv; 626 + struct ip6_tnl *t = netdev_priv(dev); 627 627 struct net_device_stats *stats = &t->stat; 628 628 struct ipv6hdr *ipv6h = skb->nh.ipv6h; 629 629 struct ipv6_txoptions *opt = NULL; ··· 933 933 break; 934 934 } 935 935 if ((err = ip6ip6_tnl_locate(&p, &t, 0)) == -ENODEV) 936 - t = (struct ip6_tnl *) dev->priv; 936 + t = netdev_priv(dev); 937 937 else if (err) 938 938 break; 939 939 } else 940 - t = (struct ip6_tnl *) dev->priv; 940 + t = netdev_priv(dev); 941 941 942 942 memcpy(&p, &t->parms, sizeof (p)); 943 943 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { ··· 955 955 break; 956 956 } 957 957 if (!create && dev != ip6ip6_fb_tnl_dev) { 958 - t = (struct ip6_tnl *) dev->priv; 958 + t = netdev_priv(dev); 959 959 } 960 960 if (!t && (err = ip6ip6_tnl_locate(&p, &t, create))) { 961 961 break; ··· 991 991 err = ip6ip6_tnl_locate(&p, &t, 0); 992 992 if (err) 993 993 break; 994 - if (t == ip6ip6_fb_tnl_dev->priv) { 994 + if (t == netdev_priv(ip6ip6_fb_tnl_dev)) { 995 995 err = -EPERM; 996 996 break; 997 997 } 998 998 } else { 999 - t = (struct ip6_tnl *) dev->priv; 999 + t = netdev_priv(dev); 1000 1000 } 1001 1001 err = unregister_netdevice(t->dev); 1002 1002 break; ··· 1016 1016 static struct net_device_stats * 1017 1017 ip6ip6_tnl_get_stats(struct net_device *dev) 1018 1018 { 1019 - return &(((struct ip6_tnl *) dev->priv)->stat); 1019 + return &(((struct ip6_tnl *)netdev_priv(dev))->stat); 1020 1020 } 1021 1021 1022 1022 /** ··· 1073 1073 static inline void 1074 1074 ip6ip6_tnl_dev_init_gen(struct net_device *dev) 1075 1075 { 1076 - struct ip6_tnl *t = (struct ip6_tnl *) dev->priv; 1076 + struct ip6_tnl *t = netdev_priv(dev); 1077 1077 t->fl.proto = IPPROTO_IPV6; 1078 1078 t->dev = dev; 1079 1079 strcpy(t->parms.name, dev->name); ··· 1087 1087 static int 1088 1088 ip6ip6_tnl_dev_init(struct net_device *dev) 1089 1089 { 1090 - struct ip6_tnl *t = (struct ip6_tnl *) dev->priv; 1090 + struct ip6_tnl *t = netdev_priv(dev); 1091 1091 ip6ip6_tnl_dev_init_gen(dev); 1092 1092 ip6ip6_tnl_link_config(t); 1093 1093 return 0; ··· 1103 1103 static int 1104 1104 ip6ip6_fb_tnl_dev_init(struct net_device *dev) 1105 1105 { 1106 - struct ip6_tnl *t = dev->priv; 1106 + struct ip6_tnl *t = netdev_priv(dev); 1107 1107 ip6ip6_tnl_dev_init_gen(dev); 1108 1108 dev_hold(dev); 1109 1109 tnls_wc[0] = t;
+10 -10
net/ipv6/sit.c
··· 184 184 if (dev == NULL) 185 185 return NULL; 186 186 187 - nt = dev->priv; 187 + nt = netdev_priv(dev); 188 188 dev->init = ipip6_tunnel_init; 189 189 nt->parms = *parms; 190 190 ··· 210 210 write_unlock_bh(&ipip6_lock); 211 211 dev_put(dev); 212 212 } else { 213 - ipip6_tunnel_unlink((struct ip_tunnel*)dev->priv); 213 + ipip6_tunnel_unlink(netdev_priv(dev)); 214 214 dev_put(dev); 215 215 } 216 216 } ··· 346 346 rt6i = rt6_lookup(&iph6->daddr, &iph6->saddr, NULL, 0); 347 347 348 348 if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) { 349 - struct ip_tunnel * t = (struct ip_tunnel*)rt6i->rt6i_dev->priv; 349 + struct ip_tunnel *t = netdev_priv(rt6i->rt6i_dev); 350 350 if (rel_type == ICMPV6_TIME_EXCEED && t->parms.iph.ttl) { 351 351 rel_type = ICMPV6_DEST_UNREACH; 352 352 rel_code = ICMPV6_ADDR_UNREACH; ··· 424 424 425 425 static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 426 426 { 427 - struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; 427 + struct ip_tunnel *tunnel = netdev_priv(dev); 428 428 struct net_device_stats *stats = &tunnel->stat; 429 429 struct iphdr *tiph = &tunnel->parms.iph; 430 430 struct ipv6hdr *iph6 = skb->nh.ipv6h; ··· 610 610 t = ipip6_tunnel_locate(&p, 0); 611 611 } 612 612 if (t == NULL) 613 - t = (struct ip_tunnel*)dev->priv; 613 + t = netdev_priv(dev); 614 614 memcpy(&p, &t->parms, sizeof(p)); 615 615 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 616 616 err = -EFAULT; ··· 647 647 err = -EINVAL; 648 648 break; 649 649 } 650 - t = (struct ip_tunnel*)dev->priv; 650 + t = netdev_priv(dev); 651 651 ipip6_tunnel_unlink(t); 652 652 t->parms.iph.saddr = p.iph.saddr; 653 653 t->parms.iph.daddr = p.iph.daddr; ··· 683 683 if ((t = ipip6_tunnel_locate(&p, 0)) == NULL) 684 684 goto done; 685 685 err = -EPERM; 686 - if (t == ipip6_fb_tunnel_dev->priv) 686 + if (t == netdev_priv(ipip6_fb_tunnel_dev)) 687 687 goto done; 688 688 dev = t->dev; 689 689 } ··· 700 700 701 701 static struct net_device_stats *ipip6_tunnel_get_stats(struct net_device *dev) 702 702 { 703 - return &(((struct ip_tunnel*)dev->priv)->stat); 703 + return &(((struct ip_tunnel*)netdev_priv(dev))->stat); 704 704 } 705 705 706 706 static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) ··· 735 735 struct ip_tunnel *tunnel; 736 736 struct iphdr *iph; 737 737 738 - tunnel = (struct ip_tunnel*)dev->priv; 738 + tunnel = netdev_priv(dev); 739 739 iph = &tunnel->parms.iph; 740 740 741 741 tunnel->dev = dev; ··· 775 775 776 776 static int __init ipip6_fb_tunnel_init(struct net_device *dev) 777 777 { 778 - struct ip_tunnel *tunnel = dev->priv; 778 + struct ip_tunnel *tunnel = netdev_priv(dev); 779 779 struct iphdr *iph = &tunnel->parms.iph; 780 780 781 781 tunnel->dev = dev;
+1 -2
net/key/af_key.c
··· 297 297 err = EINTR; 298 298 if (err >= 512) 299 299 err = EINVAL; 300 - if (err <= 0 || err >= 256) 301 - BUG(); 300 + BUG_ON(err <= 0 || err >= 256); 302 301 303 302 hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); 304 303 pfkey_hdr_dup(hdr, orig);
+7 -7
net/sched/Makefile
··· 7 7 obj-$(CONFIG_NET_SCHED) += sch_api.o sch_fifo.o sch_blackhole.o 8 8 obj-$(CONFIG_NET_CLS) += cls_api.o 9 9 obj-$(CONFIG_NET_CLS_ACT) += act_api.o 10 - obj-$(CONFIG_NET_ACT_POLICE) += police.o 11 - obj-$(CONFIG_NET_CLS_POLICE) += police.o 12 - obj-$(CONFIG_NET_ACT_GACT) += gact.o 13 - obj-$(CONFIG_NET_ACT_MIRRED) += mirred.o 14 - obj-$(CONFIG_NET_ACT_IPT) += ipt.o 15 - obj-$(CONFIG_NET_ACT_PEDIT) += pedit.o 16 - obj-$(CONFIG_NET_ACT_SIMP) += simple.o 10 + obj-$(CONFIG_NET_ACT_POLICE) += act_police.o 11 + obj-$(CONFIG_NET_CLS_POLICE) += act_police.o 12 + obj-$(CONFIG_NET_ACT_GACT) += act_gact.o 13 + obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o 14 + obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o 15 + obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o 16 + obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o 17 17 obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o 18 18 obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o 19 19 obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o
+2 -2
net/sched/act_api.c
··· 165 165 while ((a = act) != NULL) { 166 166 repeat: 167 167 if (a->ops && a->ops->act) { 168 - ret = a->ops->act(&skb, a, res); 168 + ret = a->ops->act(skb, a, res); 169 169 if (TC_MUNGED & skb->tc_verd) { 170 170 /* copied already, allow trampling */ 171 171 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); ··· 290 290 if (a_o == NULL) { 291 291 #ifdef CONFIG_KMOD 292 292 rtnl_unlock(); 293 - request_module(act_name); 293 + request_module("act_%s", act_name); 294 294 rtnl_lock(); 295 295 296 296 a_o = tc_lookup_action_n(act_name);
+1 -2
net/sched/gact.c net/sched/act_gact.c
··· 135 135 } 136 136 137 137 static int 138 - tcf_gact(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) 138 + tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 139 139 { 140 140 struct tcf_gact *p = PRIV(a, gact); 141 - struct sk_buff *skb = *pskb; 142 141 int action = TC_ACT_SHOT; 143 142 144 143 spin_lock(&p->lock);
+4 -2
net/sched/ipt.c net/sched/act_ipt.c
··· 201 201 } 202 202 203 203 static int 204 - tcf_ipt(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) 204 + tcf_ipt(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 205 205 { 206 206 int ret = 0, result = 0; 207 207 struct tcf_ipt *p = PRIV(a, ipt); 208 - struct sk_buff *skb = *pskb; 209 208 210 209 if (skb_cloned(skb)) { 211 210 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) ··· 221 222 worry later - danger - this API seems to have changed 222 223 from earlier kernels */ 223 224 225 + /* iptables targets take a double skb pointer in case the skb 226 + * needs to be replaced. We don't own the skb, so this must not 227 + * happen. The pskb_expand_head above should make sure of this */ 224 228 ret = p->t->u.kernel.target->target(&skb, skb->dev, NULL, 225 229 p->hook, p->t->data, NULL); 226 230 switch (ret) {
+1 -2
net/sched/mirred.c net/sched/act_mirred.c
··· 158 158 } 159 159 160 160 static int 161 - tcf_mirred(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) 161 + tcf_mirred(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 162 162 { 163 163 struct tcf_mirred *p = PRIV(a, mirred); 164 164 struct net_device *dev; 165 165 struct sk_buff *skb2 = NULL; 166 - struct sk_buff *skb = *pskb; 167 166 u32 at = G_TC_AT(skb->tc_verd); 168 167 169 168 spin_lock(&p->lock);
+3 -2
net/sched/pedit.c net/sched/act_pedit.c
··· 130 130 } 131 131 132 132 static int 133 - tcf_pedit(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) 133 + tcf_pedit(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 134 134 { 135 135 struct tcf_pedit *p = PRIV(a, pedit); 136 - struct sk_buff *skb = *pskb; 137 136 int i, munged = 0; 138 137 u8 *pptr; 139 138 ··· 245 246 t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); 246 247 t.expires = jiffies_to_clock_t(p->tm.expires); 247 248 RTA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t); 249 + kfree(opt); 248 250 return skb->len; 249 251 250 252 rtattr_failure: 251 253 skb_trim(skb, b - skb->data); 254 + kfree(opt); 252 255 return -1; 253 256 } 254 257
+4 -13
net/sched/police.c net/sched/act_police.c
··· 284 284 return 0; 285 285 } 286 286 287 - static int tcf_act_police(struct sk_buff **pskb, struct tc_action *a, 287 + static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, 288 288 struct tcf_result *res) 289 289 { 290 290 psched_time_t now; 291 - struct sk_buff *skb = *pskb; 292 291 struct tcf_police *p = PRIV(a); 293 292 long toks; 294 293 long ptoks = 0; ··· 407 408 module_init(police_init_module); 408 409 module_exit(police_cleanup_module); 409 410 410 - #endif 411 + #else /* CONFIG_NET_CLS_ACT */ 411 412 412 413 struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est) 413 414 { ··· 544 545 spin_unlock(&p->lock); 545 546 return p->action; 546 547 } 548 + EXPORT_SYMBOL(tcf_police); 547 549 548 550 int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p) 549 551 { ··· 601 601 return -1; 602 602 } 603 603 604 - 605 - EXPORT_SYMBOL(tcf_police); 606 - EXPORT_SYMBOL(tcf_police_destroy); 607 - EXPORT_SYMBOL(tcf_police_dump); 608 - EXPORT_SYMBOL(tcf_police_dump_stats); 609 - EXPORT_SYMBOL(tcf_police_hash); 610 - EXPORT_SYMBOL(tcf_police_ht); 611 - EXPORT_SYMBOL(tcf_police_locate); 612 - EXPORT_SYMBOL(tcf_police_lookup); 613 - EXPORT_SYMBOL(tcf_police_new_index); 604 + #endif /* CONFIG_NET_CLS_ACT */
+2 -2
net/sched/sch_cbq.c
··· 257 257 (cl = cbq_class_lookup(q, prio)) != NULL) 258 258 return cl; 259 259 260 - *qerr = NET_XMIT_DROP; 260 + *qerr = NET_XMIT_BYPASS; 261 261 for (;;) { 262 262 int result = 0; 263 263 defmap = head->defaults; ··· 413 413 q->rx_class = cl; 414 414 #endif 415 415 if (cl == NULL) { 416 - if (ret == NET_XMIT_DROP) 416 + if (ret == NET_XMIT_BYPASS) 417 417 sch->qstats.drops++; 418 418 kfree_skb(skb); 419 419 return ret;
+6 -6
net/sched/sch_hfsc.c
··· 208 208 do { \ 209 209 struct timeval tv; \ 210 210 do_gettimeofday(&tv); \ 211 - (stamp) = 1000000ULL * tv.tv_sec + tv.tv_usec; \ 211 + (stamp) = 1ULL * USEC_PER_SEC * tv.tv_sec + tv.tv_usec; \ 212 212 } while (0) 213 213 #endif 214 214 ··· 502 502 u64 dx; 503 503 504 504 dx = ((u64)d * PSCHED_JIFFIE2US(HZ)); 505 - dx += 1000000 - 1; 506 - do_div(dx, 1000000); 505 + dx += USEC_PER_SEC - 1; 506 + do_div(dx, USEC_PER_SEC); 507 507 return dx; 508 508 } 509 509 ··· 523 523 { 524 524 u64 d; 525 525 526 - d = dx * 1000000; 526 + d = dx * USEC_PER_SEC; 527 527 do_div(d, PSCHED_JIFFIE2US(HZ)); 528 528 return (u32)d; 529 529 } ··· 1227 1227 if (cl->level == 0) 1228 1228 return cl; 1229 1229 1230 - *qerr = NET_XMIT_DROP; 1230 + *qerr = NET_XMIT_BYPASS; 1231 1231 tcf = q->root.filter_list; 1232 1232 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 1233 1233 #ifdef CONFIG_NET_CLS_ACT ··· 1643 1643 1644 1644 cl = hfsc_classify(skb, sch, &err); 1645 1645 if (cl == NULL) { 1646 - if (err == NET_XMIT_DROP) 1646 + if (err == NET_XMIT_BYPASS) 1647 1647 sch->qstats.drops++; 1648 1648 kfree_skb(skb); 1649 1649 return err;
+2 -2
net/sched/sch_htb.c
··· 321 321 if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0) 322 322 return cl; 323 323 324 - *qerr = NET_XMIT_DROP; 324 + *qerr = NET_XMIT_BYPASS; 325 325 tcf = q->filter_list; 326 326 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 327 327 #ifdef CONFIG_NET_CLS_ACT ··· 724 724 } 725 725 #ifdef CONFIG_NET_CLS_ACT 726 726 } else if (!cl) { 727 - if (ret == NET_XMIT_DROP) 727 + if (ret == NET_XMIT_BYPASS) 728 728 sch->qstats.drops++; 729 729 kfree_skb (skb); 730 730 return ret;
+4 -3
net/sched/sch_prio.c
··· 54 54 u32 band = skb->priority; 55 55 struct tcf_result res; 56 56 57 - *qerr = NET_XMIT_DROP; 57 + *qerr = NET_XMIT_BYPASS; 58 58 if (TC_H_MAJ(skb->priority) != sch->handle) { 59 59 #ifdef CONFIG_NET_CLS_ACT 60 60 switch (tc_classify(skb, q->filter_list, &res)) { ··· 91 91 qdisc = prio_classify(skb, sch, &ret); 92 92 #ifdef CONFIG_NET_CLS_ACT 93 93 if (qdisc == NULL) { 94 - if (ret == NET_XMIT_DROP) 94 + 95 + if (ret == NET_XMIT_BYPASS) 95 96 sch->qstats.drops++; 96 97 kfree_skb(skb); 97 98 return ret; ··· 119 118 qdisc = prio_classify(skb, sch, &ret); 120 119 #ifdef CONFIG_NET_CLS_ACT 121 120 if (qdisc == NULL) { 122 - if (ret == NET_XMIT_DROP) 121 + if (ret == NET_XMIT_BYPASS) 123 122 sch->qstats.drops++; 124 123 kfree_skb(skb); 125 124 return ret;
+6 -6
net/sched/sch_teql.c
··· 274 274 275 275 static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) 276 276 { 277 - struct teql_master *master = (void*)dev->priv; 277 + struct teql_master *master = netdev_priv(dev); 278 278 struct Qdisc *start, *q; 279 279 int busy; 280 280 int nores; ··· 350 350 static int teql_master_open(struct net_device *dev) 351 351 { 352 352 struct Qdisc * q; 353 - struct teql_master *m = (void*)dev->priv; 353 + struct teql_master *m = netdev_priv(dev); 354 354 int mtu = 0xFFFE; 355 355 unsigned flags = IFF_NOARP|IFF_MULTICAST; 356 356 ··· 397 397 398 398 static struct net_device_stats *teql_master_stats(struct net_device *dev) 399 399 { 400 - struct teql_master *m = (void*)dev->priv; 400 + struct teql_master *m = netdev_priv(dev); 401 401 return &m->stats; 402 402 } 403 403 404 404 static int teql_master_mtu(struct net_device *dev, int new_mtu) 405 405 { 406 - struct teql_master *m = (void*)dev->priv; 406 + struct teql_master *m = netdev_priv(dev); 407 407 struct Qdisc *q; 408 408 409 409 if (new_mtu < 68) ··· 423 423 424 424 static __init void teql_master_setup(struct net_device *dev) 425 425 { 426 - struct teql_master *master = dev->priv; 426 + struct teql_master *master = netdev_priv(dev); 427 427 struct Qdisc_ops *ops = &master->qops; 428 428 429 429 master->dev = dev; ··· 476 476 break; 477 477 } 478 478 479 - master = dev->priv; 479 + master = netdev_priv(dev); 480 480 481 481 strlcpy(master->qops.id, dev->name, IFNAMSIZ); 482 482 err = register_qdisc(&master->qops);
+1 -2
net/sched/simple.c net/sched/act_simple.c
··· 44 44 #include <net/pkt_act.h> 45 45 #include <net/act_generic.h> 46 46 47 - static int tcf_simp(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) 47 + static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 48 48 { 49 - struct sk_buff *skb = *pskb; 50 49 struct tcf_defact *p = PRIV(a, defact); 51 50 52 51 spin_lock(&p->lock);
+1 -2
net/sctp/sm_sideeffect.c
··· 1250 1250 case SCTP_CMD_TIMER_START: 1251 1251 timer = &asoc->timers[cmd->obj.to]; 1252 1252 timeout = asoc->timeouts[cmd->obj.to]; 1253 - if (!timeout) 1254 - BUG(); 1253 + BUG_ON(!timeout); 1255 1254 1256 1255 timer->expires = jiffies + timeout; 1257 1256 sctp_association_hold(asoc);
+2 -3
net/sunrpc/cache.c
··· 575 575 if (rp->q.list.next == &cd->queue) { 576 576 spin_unlock(&queue_lock); 577 577 up(&queue_io_sem); 578 - if (rp->offset) 579 - BUG(); 578 + BUG_ON(rp->offset); 580 579 return 0; 581 580 } 582 581 rq = container_of(rp->q.list.next, struct cache_request, q.list); 583 - if (rq->q.reader) BUG(); 582 + BUG_ON(rq->q.reader); 584 583 if (rp->offset == 0) 585 584 rq->readers++; 586 585 spin_unlock(&queue_lock);
+1 -2
net/sunrpc/svc.c
··· 122 122 rqstp->rq_argused = 0; 123 123 rqstp->rq_resused = 0; 124 124 arghi = 0; 125 - if (pages > RPCSVC_MAXPAGES) 126 - BUG(); 125 + BUG_ON(pages > RPCSVC_MAXPAGES); 127 126 while (pages) { 128 127 struct page *p = alloc_page(GFP_KERNEL); 129 128 if (!p)
+2 -4
net/xfrm/xfrm_algo.c
··· 540 540 start = end; 541 541 } 542 542 } 543 - if (len) 544 - BUG(); 543 + BUG_ON(len); 545 544 } 546 545 EXPORT_SYMBOL_GPL(skb_icv_walk); 547 546 ··· 609 610 start = end; 610 611 } 611 612 } 612 - if (len) 613 - BUG(); 613 + BUG_ON(len); 614 614 return elt; 615 615 } 616 616 EXPORT_SYMBOL_GPL(skb_to_sgvec);
+2 -4
net/xfrm/xfrm_policy.c
··· 248 248 249 249 void __xfrm_policy_destroy(struct xfrm_policy *policy) 250 250 { 251 - if (!policy->dead) 252 - BUG(); 251 + BUG_ON(!policy->dead); 253 252 254 - if (policy->bundles) 255 - BUG(); 253 + BUG_ON(policy->bundles); 256 254 257 255 if (del_timer(&policy->timer)) 258 256 BUG();