Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

+1020 -745
+18 -20
arch/i386/crypto/aes-i586-asm.S
··· 255 xor 8(%ebp),%r4 256 xor 12(%ebp),%r5 257 258 - sub $8,%esp // space for register saves on stack 259 - add $16,%ebp // increment to next round key 260 - sub $10,%r3 261 - je 4f // 10 rounds for 128-bit key 262 - add $32,%ebp 263 - sub $2,%r3 264 - je 3f // 12 rounds for 128-bit key 265 - add $32,%ebp 266 267 - 2: fwd_rnd1( -64(%ebp) ,ft_tab) // 14 rounds for 128-bit key 268 fwd_rnd2( -48(%ebp) ,ft_tab) 269 - 3: fwd_rnd1( -32(%ebp) ,ft_tab) // 12 rounds for 128-bit key 270 fwd_rnd2( -16(%ebp) ,ft_tab) 271 4: fwd_rnd1( (%ebp) ,ft_tab) // 10 rounds for 128-bit key 272 fwd_rnd2( +16(%ebp) ,ft_tab) ··· 333 xor 8(%ebp),%r4 334 xor 12(%ebp),%r5 335 336 - sub $8,%esp // space for register saves on stack 337 - sub $16,%ebp // increment to next round key 338 - sub $10,%r3 339 - je 4f // 10 rounds for 128-bit key 340 - sub $32,%ebp 341 - sub $2,%r3 342 - je 3f // 12 rounds for 128-bit key 343 - sub $32,%ebp 344 345 - 2: inv_rnd1( +64(%ebp), it_tab) // 14 rounds for 128-bit key 346 inv_rnd2( +48(%ebp), it_tab) 347 - 3: inv_rnd1( +32(%ebp), it_tab) // 12 rounds for 128-bit key 348 inv_rnd2( +16(%ebp), it_tab) 349 4: inv_rnd1( (%ebp), it_tab) // 10 rounds for 128-bit key 350 inv_rnd2( -16(%ebp), it_tab)
··· 255 xor 8(%ebp),%r4 256 xor 12(%ebp),%r5 257 258 + sub $8,%esp // space for register saves on stack 259 + add $16,%ebp // increment to next round key 260 + cmp $12,%r3 261 + jb 4f // 10 rounds for 128-bit key 262 + lea 32(%ebp),%ebp 263 + je 3f // 12 rounds for 192-bit key 264 + lea 32(%ebp),%ebp 265 266 + 2: fwd_rnd1( -64(%ebp) ,ft_tab) // 14 rounds for 256-bit key 267 fwd_rnd2( -48(%ebp) ,ft_tab) 268 + 3: fwd_rnd1( -32(%ebp) ,ft_tab) // 12 rounds for 192-bit key 269 fwd_rnd2( -16(%ebp) ,ft_tab) 270 4: fwd_rnd1( (%ebp) ,ft_tab) // 10 rounds for 128-bit key 271 fwd_rnd2( +16(%ebp) ,ft_tab) ··· 334 xor 8(%ebp),%r4 335 xor 12(%ebp),%r5 336 337 + sub $8,%esp // space for register saves on stack 338 + sub $16,%ebp // increment to next round key 339 + cmp $12,%r3 340 + jb 4f // 10 rounds for 128-bit key 341 + lea -32(%ebp),%ebp 342 + je 3f // 12 rounds for 192-bit key 343 + lea -32(%ebp),%ebp 344 345 + 2: inv_rnd1( +64(%ebp), it_tab) // 14 rounds for 256-bit key 346 inv_rnd2( +48(%ebp), it_tab) 347 + 3: inv_rnd1( +32(%ebp), it_tab) // 12 rounds for 192-bit key 348 inv_rnd2( +16(%ebp), it_tab) 349 4: inv_rnd1( (%ebp), it_tab) // 10 rounds for 128-bit key 350 inv_rnd2( -16(%ebp), it_tab)
+25 -31
arch/i386/crypto/aes.c
··· 36 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 37 * 38 */ 39 #include <linux/kernel.h> 40 #include <linux/module.h> 41 #include <linux/init.h> ··· 61 }; 62 63 #define WPOLY 0x011b 64 - #define u32_in(x) le32_to_cpup((const __le32 *)(x)) 65 #define bytes2word(b0, b1, b2, b3) \ 66 (((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0)) 67 ··· 94 95 u32 ft_tab[4][256]; 96 u32 fl_tab[4][256]; 97 - static u32 ls_tab[4][256]; 98 static u32 im_tab[4][256]; 99 u32 il_tab[4][256]; 100 u32 it_tab[4][256]; ··· 143 fl_tab[1][i] = upr(w, 1); 144 fl_tab[2][i] = upr(w, 2); 145 fl_tab[3][i] = upr(w, 3); 146 - 147 - /* 148 - * table for key schedule if fl_tab above is 149 - * not of the required form 150 - */ 151 - ls_tab[0][i] = w; 152 - ls_tab[1][i] = upr(w, 1); 153 - ls_tab[2][i] = upr(w, 2); 154 - ls_tab[3][i] = upr(w, 3); 155 156 b = fi(inv_affine((u8)i)); 157 w = bytes2word(fe(b), f9(b), fd(b), fb(b)); ··· 384 int i; 385 u32 ss[8]; 386 struct aes_ctx *ctx = ctx_arg; 387 388 /* encryption schedule */ 389 390 - ctx->ekey[0] = ss[0] = u32_in(in_key); 391 - ctx->ekey[1] = ss[1] = u32_in(in_key + 4); 392 - ctx->ekey[2] = ss[2] = u32_in(in_key + 8); 393 - ctx->ekey[3] = ss[3] = u32_in(in_key + 12); 394 395 switch(key_len) { 396 case 16: ··· 402 break; 403 404 case 24: 405 - ctx->ekey[4] = ss[4] = u32_in(in_key + 16); 406 - ctx->ekey[5] = ss[5] = u32_in(in_key + 20); 407 for (i = 0; i < 7; i++) 408 ke6(ctx->ekey, i); 409 kel6(ctx->ekey, 7); ··· 411 break; 412 413 case 32: 414 - ctx->ekey[4] = ss[4] = u32_in(in_key + 16); 415 - ctx->ekey[5] = ss[5] = u32_in(in_key + 20); 416 - ctx->ekey[6] = ss[6] = u32_in(in_key + 24); 417 - ctx->ekey[7] = ss[7] = u32_in(in_key + 28); 418 for (i = 0; i < 6; i++) 419 ke8(ctx->ekey, i); 420 kel8(ctx->ekey, 6); ··· 428 429 /* decryption schedule */ 430 431 - ctx->dkey[0] = ss[0] = u32_in(in_key); 432 - ctx->dkey[1] = ss[1] = u32_in(in_key + 4); 433 - ctx->dkey[2] = ss[2] = u32_in(in_key + 8); 434 - ctx->dkey[3] = ss[3] = u32_in(in_key + 12); 435 436 switch (key_len) { 437 case 16: ··· 442 break; 443 444 case 24: 445 - ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16)); 446 - ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20)); 447 kdf6(ctx->dkey, 0); 448 for (i = 1; i < 7; i++) 449 kd6(ctx->dkey, i); ··· 451 break; 452 453 case 32: 454 - ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16)); 455 - ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20)); 456 - ctx->dkey[6] = ff(ss[6] = u32_in(in_key + 24)); 457 - ctx->dkey[7] = ff(ss[7] = u32_in(in_key + 28)); 458 kdf8(ctx->dkey, 0); 459 for (i = 1; i < 6; i++) 460 kd8(ctx->dkey, i); ··· 476 477 static struct crypto_alg aes_alg = { 478 .cra_name = "aes", 479 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 480 .cra_blocksize = AES_BLOCK_SIZE, 481 .cra_ctxsize = sizeof(struct aes_ctx),
··· 36 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 37 * 38 */ 39 + 40 + #include <asm/byteorder.h> 41 #include <linux/kernel.h> 42 #include <linux/module.h> 43 #include <linux/init.h> ··· 59 }; 60 61 #define WPOLY 0x011b 62 #define bytes2word(b0, b1, b2, b3) \ 63 (((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0)) 64 ··· 93 94 u32 ft_tab[4][256]; 95 u32 fl_tab[4][256]; 96 static u32 im_tab[4][256]; 97 u32 il_tab[4][256]; 98 u32 it_tab[4][256]; ··· 143 fl_tab[1][i] = upr(w, 1); 144 fl_tab[2][i] = upr(w, 2); 145 fl_tab[3][i] = upr(w, 3); 146 147 b = fi(inv_affine((u8)i)); 148 w = bytes2word(fe(b), f9(b), fd(b), fb(b)); ··· 393 int i; 394 u32 ss[8]; 395 struct aes_ctx *ctx = ctx_arg; 396 + const __le32 *key = (const __le32 *)in_key; 397 398 /* encryption schedule */ 399 400 + ctx->ekey[0] = ss[0] = le32_to_cpu(key[0]); 401 + ctx->ekey[1] = ss[1] = le32_to_cpu(key[1]); 402 + ctx->ekey[2] = ss[2] = le32_to_cpu(key[2]); 403 + ctx->ekey[3] = ss[3] = le32_to_cpu(key[3]); 404 405 switch(key_len) { 406 case 16: ··· 410 break; 411 412 case 24: 413 + ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]); 414 + ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]); 415 for (i = 0; i < 7; i++) 416 ke6(ctx->ekey, i); 417 kel6(ctx->ekey, 7); ··· 419 break; 420 421 case 32: 422 + ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]); 423 + ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]); 424 + ctx->ekey[6] = ss[6] = le32_to_cpu(key[6]); 425 + ctx->ekey[7] = ss[7] = le32_to_cpu(key[7]); 426 for (i = 0; i < 6; i++) 427 ke8(ctx->ekey, i); 428 kel8(ctx->ekey, 6); ··· 436 437 /* decryption schedule */ 438 439 + ctx->dkey[0] = ss[0] = le32_to_cpu(key[0]); 440 + ctx->dkey[1] = ss[1] = le32_to_cpu(key[1]); 441 + ctx->dkey[2] = ss[2] = le32_to_cpu(key[2]); 442 + ctx->dkey[3] = ss[3] = le32_to_cpu(key[3]); 443 444 switch (key_len) { 445 case 16: ··· 450 break; 451 452 case 24: 453 + ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4])); 454 + ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5])); 455 kdf6(ctx->dkey, 0); 456 for (i = 1; i < 7; i++) 457 kd6(ctx->dkey, i); ··· 459 break; 460 461 case 32: 462 + ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4])); 463 + ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5])); 464 + ctx->dkey[6] = ff(ss[6] = le32_to_cpu(key[6])); 465 + ctx->dkey[7] = ff(ss[7] = le32_to_cpu(key[7])); 466 kdf8(ctx->dkey, 0); 467 for (i = 1; i < 6; i++) 468 kd8(ctx->dkey, i); ··· 484 485 static struct crypto_alg aes_alg = { 486 .cra_name = "aes", 487 + .cra_driver_name = "aes-i586", 488 + .cra_priority = 200, 489 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 490 .cra_blocksize = AES_BLOCK_SIZE, 491 .cra_ctxsize = sizeof(struct aes_ctx),
+13 -12
arch/x86_64/crypto/aes.c
··· 74 return x >> (n << 3); 75 } 76 77 - #define u32_in(x) le32_to_cpu(*(const __le32 *)(x)) 78 - 79 struct aes_ctx 80 { 81 u32 key_length; ··· 232 u32 *flags) 233 { 234 struct aes_ctx *ctx = ctx_arg; 235 u32 i, j, t, u, v, w; 236 237 if (key_len != 16 && key_len != 24 && key_len != 32) { ··· 242 243 ctx->key_length = key_len; 244 245 - D_KEY[key_len + 24] = E_KEY[0] = u32_in(in_key); 246 - D_KEY[key_len + 25] = E_KEY[1] = u32_in(in_key + 4); 247 - D_KEY[key_len + 26] = E_KEY[2] = u32_in(in_key + 8); 248 - D_KEY[key_len + 27] = E_KEY[3] = u32_in(in_key + 12); 249 250 switch (key_len) { 251 case 16: ··· 255 break; 256 257 case 24: 258 - E_KEY[4] = u32_in(in_key + 16); 259 - t = E_KEY[5] = u32_in(in_key + 20); 260 for (i = 0; i < 8; ++i) 261 loop6 (i); 262 break; 263 264 case 32: 265 - E_KEY[4] = u32_in(in_key + 16); 266 - E_KEY[5] = u32_in(in_key + 20); 267 - E_KEY[6] = u32_in(in_key + 24); 268 - t = E_KEY[7] = u32_in(in_key + 28); 269 for (i = 0; i < 7; ++i) 270 loop8(i); 271 break; ··· 289 290 static struct crypto_alg aes_alg = { 291 .cra_name = "aes", 292 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 293 .cra_blocksize = AES_BLOCK_SIZE, 294 .cra_ctxsize = sizeof(struct aes_ctx),
··· 74 return x >> (n << 3); 75 } 76 77 struct aes_ctx 78 { 79 u32 key_length; ··· 234 u32 *flags) 235 { 236 struct aes_ctx *ctx = ctx_arg; 237 + const __le32 *key = (const __le32 *)in_key; 238 u32 i, j, t, u, v, w; 239 240 if (key_len != 16 && key_len != 24 && key_len != 32) { ··· 243 244 ctx->key_length = key_len; 245 246 + D_KEY[key_len + 24] = E_KEY[0] = le32_to_cpu(key[0]); 247 + D_KEY[key_len + 25] = E_KEY[1] = le32_to_cpu(key[1]); 248 + D_KEY[key_len + 26] = E_KEY[2] = le32_to_cpu(key[2]); 249 + D_KEY[key_len + 27] = E_KEY[3] = le32_to_cpu(key[3]); 250 251 switch (key_len) { 252 case 16: ··· 256 break; 257 258 case 24: 259 + E_KEY[4] = le32_to_cpu(key[4]); 260 + t = E_KEY[5] = le32_to_cpu(key[5]); 261 for (i = 0; i < 8; ++i) 262 loop6 (i); 263 break; 264 265 case 32: 266 + E_KEY[4] = le32_to_cpu(key[4]); 267 + E_KEY[5] = le32_to_cpu(key[5]); 268 + E_KEY[6] = le32_to_cpu(key[6]); 269 + t = E_KEY[7] = le32_to_cpu(key[7]); 270 for (i = 0; i < 7; ++i) 271 loop8(i); 272 break; ··· 290 291 static struct crypto_alg aes_alg = { 292 .cra_name = "aes", 293 + .cra_driver_name = "aes-x86_64", 294 + .cra_priority = 200, 295 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 296 .cra_blocksize = AES_BLOCK_SIZE, 297 .cra_ctxsize = sizeof(struct aes_ctx),
+1 -1
crypto/Kconfig
··· 157 158 config CRYPTO_AES 159 tristate "AES cipher algorithms" 160 - depends on CRYPTO && !(X86 || UML_X86) 161 help 162 AES cipher algorithms (FIPS-197). AES uses the Rijndael 163 algorithm.
··· 157 158 config CRYPTO_AES 159 tristate "AES cipher algorithms" 160 + depends on CRYPTO 161 help 162 AES cipher algorithms (FIPS-197). AES uses the Rijndael 163 algorithm.
+34 -29
crypto/aes.c
··· 73 return x >> (n << 3); 74 } 75 76 - #define u32_in(x) le32_to_cpu(*(const u32 *)(x)) 77 - #define u32_out(to, from) (*(u32 *)(to) = cpu_to_le32(from)) 78 - 79 struct aes_ctx { 80 int key_length; 81 u32 E[60]; ··· 253 aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) 254 { 255 struct aes_ctx *ctx = ctx_arg; 256 u32 i, t, u, v, w; 257 258 if (key_len != 16 && key_len != 24 && key_len != 32) { ··· 263 264 ctx->key_length = key_len; 265 266 - E_KEY[0] = u32_in (in_key); 267 - E_KEY[1] = u32_in (in_key + 4); 268 - E_KEY[2] = u32_in (in_key + 8); 269 - E_KEY[3] = u32_in (in_key + 12); 270 271 switch (key_len) { 272 case 16: ··· 276 break; 277 278 case 24: 279 - E_KEY[4] = u32_in (in_key + 16); 280 - t = E_KEY[5] = u32_in (in_key + 20); 281 for (i = 0; i < 8; ++i) 282 loop6 (i); 283 break; 284 285 case 32: 286 - E_KEY[4] = u32_in (in_key + 16); 287 - E_KEY[5] = u32_in (in_key + 20); 288 - E_KEY[6] = u32_in (in_key + 24); 289 - t = E_KEY[7] = u32_in (in_key + 28); 290 for (i = 0; i < 7; ++i) 291 loop8 (i); 292 break; ··· 322 static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in) 323 { 324 const struct aes_ctx *ctx = ctx_arg; 325 u32 b0[4], b1[4]; 326 const u32 *kp = E_KEY + 4; 327 328 - b0[0] = u32_in (in) ^ E_KEY[0]; 329 - b0[1] = u32_in (in + 4) ^ E_KEY[1]; 330 - b0[2] = u32_in (in + 8) ^ E_KEY[2]; 331 - b0[3] = u32_in (in + 12) ^ E_KEY[3]; 332 333 if (ctx->key_length > 24) { 334 f_nround (b1, b0, kp); ··· 353 f_nround (b1, b0, kp); 354 f_lround (b0, b1, kp); 355 356 - u32_out (out, b0[0]); 357 - u32_out (out + 4, b0[1]); 358 - u32_out (out + 8, b0[2]); 359 - u32_out (out + 12, b0[3]); 360 } 361 362 /* decrypt a block of text */ ··· 377 static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in) 378 { 379 const struct aes_ctx *ctx = ctx_arg; 380 u32 b0[4], b1[4]; 381 const int key_len = ctx->key_length; 382 const u32 *kp = D_KEY + key_len + 20; 383 384 - b0[0] = u32_in (in) ^ E_KEY[key_len + 24]; 385 - b0[1] = u32_in (in + 4) ^ E_KEY[key_len + 25]; 386 - b0[2] = u32_in (in + 8) ^ E_KEY[key_len + 26]; 387 - b0[3] = u32_in (in + 12) ^ E_KEY[key_len + 27]; 388 389 if (key_len > 24) { 390 i_nround (b1, b0, kp); ··· 409 i_nround (b1, b0, kp); 410 i_lround (b0, b1, kp); 411 412 - u32_out (out, b0[0]); 413 - u32_out (out + 4, b0[1]); 414 - u32_out (out + 8, b0[2]); 415 - u32_out (out + 12, b0[3]); 416 } 417 418 419 static struct crypto_alg aes_alg = { 420 .cra_name = "aes", 421 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 422 .cra_blocksize = AES_BLOCK_SIZE, 423 .cra_ctxsize = sizeof(struct aes_ctx), 424 .cra_module = THIS_MODULE, 425 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), 426 .cra_u = {
··· 73 return x >> (n << 3); 74 } 75 76 struct aes_ctx { 77 int key_length; 78 u32 E[60]; ··· 256 aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) 257 { 258 struct aes_ctx *ctx = ctx_arg; 259 + const __le32 *key = (const __le32 *)in_key; 260 u32 i, t, u, v, w; 261 262 if (key_len != 16 && key_len != 24 && key_len != 32) { ··· 265 266 ctx->key_length = key_len; 267 268 + E_KEY[0] = le32_to_cpu(key[0]); 269 + E_KEY[1] = le32_to_cpu(key[1]); 270 + E_KEY[2] = le32_to_cpu(key[2]); 271 + E_KEY[3] = le32_to_cpu(key[3]); 272 273 switch (key_len) { 274 case 16: ··· 278 break; 279 280 case 24: 281 + E_KEY[4] = le32_to_cpu(key[4]); 282 + t = E_KEY[5] = le32_to_cpu(key[5]); 283 for (i = 0; i < 8; ++i) 284 loop6 (i); 285 break; 286 287 case 32: 288 + E_KEY[4] = le32_to_cpu(key[4]); 289 + E_KEY[5] = le32_to_cpu(key[5]); 290 + E_KEY[6] = le32_to_cpu(key[6]); 291 + t = E_KEY[7] = le32_to_cpu(key[7]); 292 for (i = 0; i < 7; ++i) 293 loop8 (i); 294 break; ··· 324 static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in) 325 { 326 const struct aes_ctx *ctx = ctx_arg; 327 + const __le32 *src = (const __le32 *)in; 328 + __le32 *dst = (__le32 *)out; 329 u32 b0[4], b1[4]; 330 const u32 *kp = E_KEY + 4; 331 332 + b0[0] = le32_to_cpu(src[0]) ^ E_KEY[0]; 333 + b0[1] = le32_to_cpu(src[1]) ^ E_KEY[1]; 334 + b0[2] = le32_to_cpu(src[2]) ^ E_KEY[2]; 335 + b0[3] = le32_to_cpu(src[3]) ^ E_KEY[3]; 336 337 if (ctx->key_length > 24) { 338 f_nround (b1, b0, kp); ··· 353 f_nround (b1, b0, kp); 354 f_lround (b0, b1, kp); 355 356 + dst[0] = cpu_to_le32(b0[0]); 357 + dst[1] = cpu_to_le32(b0[1]); 358 + dst[2] = cpu_to_le32(b0[2]); 359 + dst[3] = cpu_to_le32(b0[3]); 360 } 361 362 /* decrypt a block of text */ ··· 377 static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in) 378 { 379 const struct aes_ctx *ctx = ctx_arg; 380 + const __le32 *src = (const __le32 *)in; 381 + __le32 *dst = (__le32 *)out; 382 u32 b0[4], b1[4]; 383 const int key_len = ctx->key_length; 384 const u32 *kp = D_KEY + key_len + 20; 385 386 + b0[0] = le32_to_cpu(src[0]) ^ E_KEY[key_len + 24]; 387 + b0[1] = le32_to_cpu(src[1]) ^ E_KEY[key_len + 25]; 388 + b0[2] = le32_to_cpu(src[2]) ^ E_KEY[key_len + 26]; 389 + b0[3] = le32_to_cpu(src[3]) ^ E_KEY[key_len + 27]; 390 391 if (key_len > 24) { 392 i_nround (b1, b0, kp); ··· 407 i_nround (b1, b0, kp); 408 i_lround (b0, b1, kp); 409 410 + dst[0] = cpu_to_le32(b0[0]); 411 + dst[1] = cpu_to_le32(b0[1]); 412 + dst[2] = cpu_to_le32(b0[2]); 413 + dst[3] = cpu_to_le32(b0[3]); 414 } 415 416 417 static struct crypto_alg aes_alg = { 418 .cra_name = "aes", 419 + .cra_driver_name = "aes-generic", 420 + .cra_priority = 100, 421 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 422 .cra_blocksize = AES_BLOCK_SIZE, 423 .cra_ctxsize = sizeof(struct aes_ctx), 424 + .cra_alignmask = 3, 425 .cra_module = THIS_MODULE, 426 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), 427 .cra_u = {
+14 -25
crypto/anubis.c
··· 32 #include <linux/init.h> 33 #include <linux/module.h> 34 #include <linux/mm.h> 35 #include <asm/scatterlist.h> 36 #include <linux/crypto.h> 37 38 #define ANUBIS_MIN_KEY_SIZE 16 39 #define ANUBIS_MAX_KEY_SIZE 40 ··· 463 static int anubis_setkey(void *ctx_arg, const u8 *in_key, 464 unsigned int key_len, u32 *flags) 465 { 466 - 467 - int N, R, i, pos, r; 468 u32 kappa[ANUBIS_MAX_N]; 469 u32 inter[ANUBIS_MAX_N]; 470 ··· 485 ctx->R = R = 8 + N; 486 487 /* * map cipher key to initial key state (mu): */ 488 - for (i = 0, pos = 0; i < N; i++, pos += 4) { 489 - kappa[i] = 490 - (in_key[pos ] << 24) ^ 491 - (in_key[pos + 1] << 16) ^ 492 - (in_key[pos + 2] << 8) ^ 493 - (in_key[pos + 3] ); 494 - } 495 496 /* 497 * generate R + 1 round keys: ··· 575 static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], 576 u8 *ciphertext, const u8 *plaintext, const int R) 577 { 578 - int i, pos, r; 579 u32 state[4]; 580 u32 inter[4]; 581 ··· 585 * map plaintext block to cipher state (mu) 586 * and add initial round key (sigma[K^0]): 587 */ 588 - for (i = 0, pos = 0; i < 4; i++, pos += 4) { 589 - state[i] = 590 - (plaintext[pos ] << 24) ^ 591 - (plaintext[pos + 1] << 16) ^ 592 - (plaintext[pos + 2] << 8) ^ 593 - (plaintext[pos + 3] ) ^ 594 - roundKey[0][i]; 595 - } 596 597 /* 598 * R - 1 full rounds: ··· 656 * map cipher state to ciphertext block (mu^{-1}): 657 */ 658 659 - for (i = 0, pos = 0; i < 4; i++, pos += 4) { 660 - u32 w = inter[i]; 661 - ciphertext[pos ] = (u8)(w >> 24); 662 - ciphertext[pos + 1] = (u8)(w >> 16); 663 - ciphertext[pos + 2] = (u8)(w >> 8); 664 - ciphertext[pos + 3] = (u8)(w ); 665 - } 666 } 667 668 static void anubis_encrypt(void *ctx_arg, u8 *dst, const u8 *src) ··· 677 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 678 .cra_blocksize = ANUBIS_BLOCK_SIZE, 679 .cra_ctxsize = sizeof (struct anubis_ctx), 680 .cra_module = THIS_MODULE, 681 .cra_list = LIST_HEAD_INIT(anubis_alg.cra_list), 682 .cra_u = { .cipher = {
··· 32 #include <linux/init.h> 33 #include <linux/module.h> 34 #include <linux/mm.h> 35 + #include <asm/byteorder.h> 36 #include <asm/scatterlist.h> 37 #include <linux/crypto.h> 38 + #include <linux/types.h> 39 40 #define ANUBIS_MIN_KEY_SIZE 16 41 #define ANUBIS_MAX_KEY_SIZE 40 ··· 461 static int anubis_setkey(void *ctx_arg, const u8 *in_key, 462 unsigned int key_len, u32 *flags) 463 { 464 + const __be32 *key = (const __be32 *)in_key; 465 + int N, R, i, r; 466 u32 kappa[ANUBIS_MAX_N]; 467 u32 inter[ANUBIS_MAX_N]; 468 ··· 483 ctx->R = R = 8 + N; 484 485 /* * map cipher key to initial key state (mu): */ 486 + for (i = 0; i < N; i++) 487 + kappa[i] = be32_to_cpu(key[i]); 488 489 /* 490 * generate R + 1 round keys: ··· 578 static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], 579 u8 *ciphertext, const u8 *plaintext, const int R) 580 { 581 + const __be32 *src = (const __be32 *)plaintext; 582 + __be32 *dst = (__be32 *)ciphertext; 583 + int i, r; 584 u32 state[4]; 585 u32 inter[4]; 586 ··· 586 * map plaintext block to cipher state (mu) 587 * and add initial round key (sigma[K^0]): 588 */ 589 + for (i = 0; i < 4; i++) 590 + state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i]; 591 592 /* 593 * R - 1 full rounds: ··· 663 * map cipher state to ciphertext block (mu^{-1}): 664 */ 665 666 + for (i = 0; i < 4; i++) 667 + dst[i] = cpu_to_be32(inter[i]); 668 } 669 670 static void anubis_encrypt(void *ctx_arg, u8 *dst, const u8 *src) ··· 689 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 690 .cra_blocksize = ANUBIS_BLOCK_SIZE, 691 .cra_ctxsize = sizeof (struct anubis_ctx), 692 + .cra_alignmask = 3, 693 .cra_module = THIS_MODULE, 694 .cra_list = LIST_HEAD_INIT(anubis_alg.cra_list), 695 .cra_u = { .cipher = {
+47 -7
crypto/api.c
··· 3 * 4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 5 * Copyright (c) 2002 David S. Miller (davem@redhat.com) 6 * 7 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> 8 * and Nettle, by Niels M�ller. ··· 19 #include <linux/init.h> 20 #include <linux/crypto.h> 21 #include <linux/errno.h> 22 #include <linux/kmod.h> 23 #include <linux/rwsem.h> 24 #include <linux/slab.h> 25 #include "internal.h" 26 27 LIST_HEAD(crypto_alg_list); ··· 42 static struct crypto_alg *crypto_alg_lookup(const char *name) 43 { 44 struct crypto_alg *q, *alg = NULL; 45 46 if (!name) 47 return NULL; ··· 50 down_read(&crypto_alg_sem); 51 52 list_for_each_entry(q, &crypto_alg_list, cra_list) { 53 - if (!(strcmp(q->cra_name, name))) { 54 - if (crypto_alg_get(q)) 55 - alg = q; 56 break; 57 - } 58 } 59 60 up_read(&crypto_alg_sem); ··· 223 kfree(tfm); 224 } 225 226 int crypto_register_alg(struct crypto_alg *alg) 227 { 228 - int ret = 0; 229 struct crypto_alg *q; 230 231 if (alg->cra_alignmask & (alg->cra_alignmask + 1)) ··· 251 if (alg->cra_alignmask & alg->cra_blocksize) 252 return -EINVAL; 253 254 - if (alg->cra_blocksize > PAGE_SIZE) 255 return -EINVAL; 256 257 down_write(&crypto_alg_sem); 258 259 list_for_each_entry(q, &crypto_alg_list, cra_list) { 260 - if (!(strcmp(q->cra_name, alg->cra_name))) { 261 ret = -EEXIST; 262 goto out; 263 }
··· 3 * 4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 5 * Copyright (c) 2002 David S. Miller (davem@redhat.com) 6 + * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> 7 * 8 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> 9 * and Nettle, by Niels M�ller. ··· 18 #include <linux/init.h> 19 #include <linux/crypto.h> 20 #include <linux/errno.h> 21 + #include <linux/kernel.h> 22 #include <linux/kmod.h> 23 #include <linux/rwsem.h> 24 #include <linux/slab.h> 25 + #include <linux/string.h> 26 #include "internal.h" 27 28 LIST_HEAD(crypto_alg_list); ··· 39 static struct crypto_alg *crypto_alg_lookup(const char *name) 40 { 41 struct crypto_alg *q, *alg = NULL; 42 + int best = -1; 43 44 if (!name) 45 return NULL; ··· 46 down_read(&crypto_alg_sem); 47 48 list_for_each_entry(q, &crypto_alg_list, cra_list) { 49 + int exact, fuzzy; 50 + 51 + exact = !strcmp(q->cra_driver_name, name); 52 + fuzzy = !strcmp(q->cra_name, name); 53 + if (!exact && !(fuzzy && q->cra_priority > best)) 54 + continue; 55 + 56 + if (unlikely(!crypto_alg_get(q))) 57 + continue; 58 + 59 + best = q->cra_priority; 60 + if (alg) 61 + crypto_alg_put(alg); 62 + alg = q; 63 + 64 + if (exact) 65 break; 66 } 67 68 up_read(&crypto_alg_sem); ··· 207 kfree(tfm); 208 } 209 210 + static inline int crypto_set_driver_name(struct crypto_alg *alg) 211 + { 212 + static const char suffix[] = "-generic"; 213 + char *driver_name = (char *)alg->cra_driver_name; 214 + int len; 215 + 216 + if (*driver_name) 217 + return 0; 218 + 219 + len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 220 + if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME) 221 + return -ENAMETOOLONG; 222 + 223 + memcpy(driver_name + len, suffix, sizeof(suffix)); 224 + return 0; 225 + } 226 + 227 int crypto_register_alg(struct crypto_alg *alg) 228 { 229 + int ret; 230 struct crypto_alg *q; 231 232 if (alg->cra_alignmask & (alg->cra_alignmask + 1)) ··· 218 if (alg->cra_alignmask & alg->cra_blocksize) 219 return -EINVAL; 220 221 + if (alg->cra_blocksize > PAGE_SIZE / 8) 222 + return -EINVAL; 223 + 224 + if (alg->cra_priority < 0) 225 return -EINVAL; 226 227 + ret = crypto_set_driver_name(alg); 228 + if (unlikely(ret)) 229 + return ret; 230 + 231 down_write(&crypto_alg_sem); 232 233 list_for_each_entry(q, &crypto_alg_list, cra_list) { 234 + if (!strcmp(q->cra_driver_name, alg->cra_driver_name)) { 235 ret = -EEXIST; 236 goto out; 237 }
+3
crypto/blowfish.c
··· 19 #include <linux/init.h> 20 #include <linux/module.h> 21 #include <linux/mm.h> 22 #include <asm/scatterlist.h> 23 #include <linux/crypto.h> 24 25 #define BF_BLOCK_SIZE 8 26 #define BF_MIN_KEY_SIZE 4 ··· 453 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 454 .cra_blocksize = BF_BLOCK_SIZE, 455 .cra_ctxsize = sizeof(struct bf_ctx), 456 .cra_module = THIS_MODULE, 457 .cra_list = LIST_HEAD_INIT(alg.cra_list), 458 .cra_u = { .cipher = {
··· 19 #include <linux/init.h> 20 #include <linux/module.h> 21 #include <linux/mm.h> 22 + #include <asm/byteorder.h> 23 #include <asm/scatterlist.h> 24 #include <linux/crypto.h> 25 + #include <linux/types.h> 26 27 #define BF_BLOCK_SIZE 8 28 #define BF_MIN_KEY_SIZE 4 ··· 451 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 452 .cra_blocksize = BF_BLOCK_SIZE, 453 .cra_ctxsize = sizeof(struct bf_ctx), 454 + .cra_alignmask = 3, 455 .cra_module = THIS_MODULE, 456 .cra_list = LIST_HEAD_INIT(alg.cra_list), 457 .cra_u = { .cipher = {
+20 -27
crypto/cast5.c
··· 21 */ 22 23 24 #include <linux/init.h> 25 #include <linux/crypto.h> 26 #include <linux/module.h> 27 #include <linux/errno.h> 28 #include <linux/string.h> 29 30 #define CAST5_BLOCK_SIZE 8 31 #define CAST5_MIN_KEY_SIZE 5 ··· 580 static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf) 581 { 582 struct cast5_ctx *c = (struct cast5_ctx *) ctx; 583 u32 l, r, t; 584 u32 I; /* used by the Fx macros */ 585 u32 *Km; ··· 593 /* (L0,R0) <-- (m1...m64). (Split the plaintext into left and 594 * right 32-bit halves L0 = m1...m32 and R0 = m33...m64.) 595 */ 596 - l = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; 597 - r = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; 598 599 /* (16 rounds) for i from 1 to 16, compute Li and Ri as follows: 600 * Li = Ri-1; ··· 638 639 /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and 640 * concatenate to form the ciphertext.) */ 641 - outbuf[0] = (r >> 24) & 0xff; 642 - outbuf[1] = (r >> 16) & 0xff; 643 - outbuf[2] = (r >> 8) & 0xff; 644 - outbuf[3] = r & 0xff; 645 - outbuf[4] = (l >> 24) & 0xff; 646 - outbuf[5] = (l >> 16) & 0xff; 647 - outbuf[6] = (l >> 8) & 0xff; 648 - outbuf[7] = l & 0xff; 649 } 650 651 static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf) 652 { 653 struct cast5_ctx *c = (struct cast5_ctx *) ctx; 654 u32 l, r, t; 655 u32 I; 656 u32 *Km; ··· 655 Km = c->Km; 656 Kr = c->Kr; 657 658 - l = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; 659 - r = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; 660 661 if (!(c->rr)) { 662 t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); ··· 690 t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); 691 } 692 693 - outbuf[0] = (r >> 24) & 0xff; 694 - outbuf[1] = (r >> 16) & 0xff; 695 - outbuf[2] = (r >> 8) & 0xff; 696 - outbuf[3] = r & 0xff; 697 - outbuf[4] = (l >> 24) & 0xff; 698 - outbuf[5] = (l >> 16) & 0xff; 699 - outbuf[6] = (l >> 8) & 0xff; 700 - outbuf[7] = l & 0xff; 701 } 702 703 static void key_schedule(u32 * x, u32 * z, u32 * k) ··· 776 u32 x[4]; 777 u32 z[4]; 778 u32 k[16]; 779 - u8 p_key[16]; 780 struct cast5_ctx *c = (struct cast5_ctx *) ctx; 781 782 if (key_len < 5 || key_len > 16) { ··· 790 memcpy(p_key, key, key_len); 791 792 793 - x[0] = p_key[0] << 24 | p_key[1] << 16 | p_key[2] << 8 | p_key[3]; 794 - x[1] = p_key[4] << 24 | p_key[5] << 16 | p_key[6] << 8 | p_key[7]; 795 - x[2] = 796 - p_key[8] << 24 | p_key[9] << 16 | p_key[10] << 8 | p_key[11]; 797 - x[3] = 798 - p_key[12] << 24 | p_key[13] << 16 | p_key[14] << 8 | p_key[15]; 799 800 key_schedule(x, z, k); 801 for (i = 0; i < 16; i++) ··· 809 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 810 .cra_blocksize = CAST5_BLOCK_SIZE, 811 .cra_ctxsize = sizeof(struct cast5_ctx), 812 .cra_module = THIS_MODULE, 813 .cra_list = LIST_HEAD_INIT(alg.cra_list), 814 .cra_u = {
··· 21 */ 22 23 24 + #include <asm/byteorder.h> 25 #include <linux/init.h> 26 #include <linux/crypto.h> 27 #include <linux/module.h> 28 #include <linux/errno.h> 29 #include <linux/string.h> 30 + #include <linux/types.h> 31 32 #define CAST5_BLOCK_SIZE 8 33 #define CAST5_MIN_KEY_SIZE 5 ··· 578 static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf) 579 { 580 struct cast5_ctx *c = (struct cast5_ctx *) ctx; 581 + const __be32 *src = (const __be32 *)inbuf; 582 + __be32 *dst = (__be32 *)outbuf; 583 u32 l, r, t; 584 u32 I; /* used by the Fx macros */ 585 u32 *Km; ··· 589 /* (L0,R0) <-- (m1...m64). (Split the plaintext into left and 590 * right 32-bit halves L0 = m1...m32 and R0 = m33...m64.) 591 */ 592 + l = be32_to_cpu(src[0]); 593 + r = be32_to_cpu(src[1]); 594 595 /* (16 rounds) for i from 1 to 16, compute Li and Ri as follows: 596 * Li = Ri-1; ··· 634 635 /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and 636 * concatenate to form the ciphertext.) */ 637 + dst[0] = cpu_to_be32(r); 638 + dst[1] = cpu_to_be32(l); 639 } 640 641 static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf) 642 { 643 struct cast5_ctx *c = (struct cast5_ctx *) ctx; 644 + const __be32 *src = (const __be32 *)inbuf; 645 + __be32 *dst = (__be32 *)outbuf; 646 u32 l, r, t; 647 u32 I; 648 u32 *Km; ··· 655 Km = c->Km; 656 Kr = c->Kr; 657 658 + l = be32_to_cpu(src[0]); 659 + r = be32_to_cpu(src[1]); 660 661 if (!(c->rr)) { 662 t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); ··· 690 t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); 691 } 692 693 + dst[0] = cpu_to_be32(r); 694 + dst[1] = cpu_to_be32(l); 695 } 696 697 static void key_schedule(u32 * x, u32 * z, u32 * k) ··· 782 u32 x[4]; 783 u32 z[4]; 784 u32 k[16]; 785 + __be32 p_key[4]; 786 struct cast5_ctx *c = (struct cast5_ctx *) ctx; 787 788 if (key_len < 5 || key_len > 16) { ··· 796 memcpy(p_key, key, key_len); 797 798 799 + x[0] = be32_to_cpu(p_key[0]); 800 + x[1] = be32_to_cpu(p_key[1]); 801 + x[2] = be32_to_cpu(p_key[2]); 802 + x[3] = be32_to_cpu(p_key[3]); 803 804 key_schedule(x, z, k); 805 for (i = 0; i < 16; i++) ··· 817 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 818 .cra_blocksize = CAST5_BLOCK_SIZE, 819 .cra_ctxsize = sizeof(struct cast5_ctx), 820 + .cra_alignmask = 3, 821 .cra_module = THIS_MODULE, 822 .cra_list = LIST_HEAD_INIT(alg.cra_list), 823 .cra_u = {
+33 -50
crypto/cast6.c
··· 18 */ 19 20 21 #include <linux/init.h> 22 #include <linux/crypto.h> 23 #include <linux/module.h> 24 #include <linux/errno.h> 25 #include <linux/string.h> 26 27 #define CAST6_BLOCK_SIZE 16 28 #define CAST6_MIN_KEY_SIZE 16 ··· 386 { 387 int i; 388 u32 key[8]; 389 - u8 p_key[32]; /* padded key */ 390 struct cast6_ctx *c = (struct cast6_ctx *) ctx; 391 392 if (key_len < 16 || key_len > 32 || key_len % 4 != 0) { ··· 397 memset (p_key, 0, 32); 398 memcpy (p_key, in_key, key_len); 399 400 - key[0] = p_key[0] << 24 | p_key[1] << 16 | p_key[2] << 8 | p_key[3]; /* A */ 401 - key[1] = p_key[4] << 24 | p_key[5] << 16 | p_key[6] << 8 | p_key[7]; /* B */ 402 - key[2] = p_key[8] << 24 | p_key[9] << 16 | p_key[10] << 8 | p_key[11]; /* C */ 403 - key[3] = p_key[12] << 24 | p_key[13] << 16 | p_key[14] << 8 | p_key[15]; /* D */ 404 - key[4] = p_key[16] << 24 | p_key[17] << 16 | p_key[18] << 8 | p_key[19]; /* E */ 405 - key[5] = p_key[20] << 24 | p_key[21] << 16 | p_key[22] << 8 | p_key[23]; /* F */ 406 - key[6] = p_key[24] << 24 | p_key[25] << 16 | p_key[26] << 8 | p_key[27]; /* G */ 407 - key[7] = p_key[28] << 24 | p_key[29] << 16 | p_key[30] << 8 | p_key[31]; /* H */ 408 409 410 ··· 446 447 static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { 448 struct cast6_ctx * c = (struct cast6_ctx *)ctx; 449 u32 block[4]; 450 u32 * Km; 451 u8 * Kr; 452 453 - block[0] = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; 454 - block[1] = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; 455 - block[2] = inbuf[8] << 24 | inbuf[9] << 16 | inbuf[10] << 8 | inbuf[11]; 456 - block[3] = inbuf[12] << 24 | inbuf[13] << 16 | inbuf[14] << 8 | inbuf[15]; 457 458 Km = c->Km[0]; Kr = c->Kr[0]; Q (block, Kr, Km); 459 Km = c->Km[1]; Kr = c->Kr[1]; Q (block, Kr, Km); ··· 469 Km = c->Km[9]; Kr = c->Kr[9]; QBAR (block, Kr, Km); 470 Km = c->Km[10]; Kr = c->Kr[10]; QBAR (block, Kr, Km); 471 Km = c->Km[11]; Kr = c->Kr[11]; QBAR (block, Kr, Km); 472 - 473 - outbuf[0] = (block[0] >> 24) & 0xff; 474 - outbuf[1] = (block[0] >> 16) & 0xff; 475 - outbuf[2] = (block[0] >> 8) & 0xff; 476 - outbuf[3] = block[0] & 0xff; 477 - outbuf[4] = (block[1] >> 24) & 0xff; 478 - outbuf[5] = (block[1] >> 16) & 0xff; 479 - outbuf[6] = (block[1] >> 8) & 0xff; 480 - outbuf[7] = block[1] & 0xff; 481 - outbuf[8] = (block[2] >> 24) & 0xff; 482 - outbuf[9] = (block[2] >> 16) & 0xff; 483 - outbuf[10] = (block[2] >> 8) & 0xff; 484 - outbuf[11] = block[2] & 0xff; 485 - outbuf[12] = (block[3] >> 24) & 0xff; 486 - outbuf[13] = (block[3] >> 16) & 0xff; 487 - outbuf[14] = (block[3] >> 8) & 0xff; 488 - outbuf[15] = block[3] & 0xff; 489 } 490 491 static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { 492 struct cast6_ctx * c = (struct cast6_ctx *)ctx; 493 u32 block[4]; 494 u32 * Km; 495 u8 * Kr; 496 497 - block[0] = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3]; 498 - block[1] = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7]; 499 - block[2] = inbuf[8] << 24 | inbuf[9] << 16 | inbuf[10] << 8 | inbuf[11]; 500 - block[3] = inbuf[12] << 24 | inbuf[13] << 16 | inbuf[14] << 8 | inbuf[15]; 501 502 Km = c->Km[11]; Kr = c->Kr[11]; Q (block, Kr, Km); 503 Km = c->Km[10]; Kr = c->Kr[10]; Q (block, Kr, Km); ··· 502 Km = c->Km[1]; Kr = c->Kr[1]; QBAR (block, Kr, Km); 503 Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km); 504 505 - outbuf[0] = (block[0] >> 24) & 0xff; 506 - outbuf[1] = (block[0] >> 16) & 0xff; 507 - outbuf[2] = (block[0] >> 8) & 0xff; 508 - outbuf[3] = block[0] & 0xff; 509 - outbuf[4] = (block[1] >> 24) & 0xff; 510 - outbuf[5] = (block[1] >> 16) & 0xff; 511 - outbuf[6] = (block[1] >> 8) & 0xff; 512 - outbuf[7] = block[1] & 0xff; 513 - outbuf[8] = (block[2] >> 24) & 0xff; 514 - outbuf[9] = (block[2] >> 16) & 0xff; 515 - outbuf[10] = (block[2] >> 8) & 0xff; 516 - outbuf[11] = block[2] & 0xff; 517 - outbuf[12] = (block[3] >> 24) & 0xff; 518 - outbuf[13] = (block[3] >> 16) & 0xff; 519 - outbuf[14] = (block[3] >> 8) & 0xff; 520 - outbuf[15] = block[3] & 0xff; 521 } 522 523 static struct crypto_alg alg = { ··· 513 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 514 .cra_blocksize = CAST6_BLOCK_SIZE, 515 .cra_ctxsize = sizeof(struct cast6_ctx), 516 .cra_module = THIS_MODULE, 517 .cra_list = LIST_HEAD_INIT(alg.cra_list), 518 .cra_u = {
··· 18 */ 19 20 21 + #include <asm/byteorder.h> 22 #include <linux/init.h> 23 #include <linux/crypto.h> 24 #include <linux/module.h> 25 #include <linux/errno.h> 26 #include <linux/string.h> 27 + #include <linux/types.h> 28 29 #define CAST6_BLOCK_SIZE 16 30 #define CAST6_MIN_KEY_SIZE 16 ··· 384 { 385 int i; 386 u32 key[8]; 387 + __be32 p_key[8]; /* padded key */ 388 struct cast6_ctx *c = (struct cast6_ctx *) ctx; 389 390 if (key_len < 16 || key_len > 32 || key_len % 4 != 0) { ··· 395 memset (p_key, 0, 32); 396 memcpy (p_key, in_key, key_len); 397 398 + key[0] = be32_to_cpu(p_key[0]); /* A */ 399 + key[1] = be32_to_cpu(p_key[1]); /* B */ 400 + key[2] = be32_to_cpu(p_key[2]); /* C */ 401 + key[3] = be32_to_cpu(p_key[3]); /* D */ 402 + key[4] = be32_to_cpu(p_key[4]); /* E */ 403 + key[5] = be32_to_cpu(p_key[5]); /* F */ 404 + key[6] = be32_to_cpu(p_key[6]); /* G */ 405 + key[7] = be32_to_cpu(p_key[7]); /* H */ 406 407 408 ··· 444 445 static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { 446 struct cast6_ctx * c = (struct cast6_ctx *)ctx; 447 + const __be32 *src = (const __be32 *)inbuf; 448 + __be32 *dst = (__be32 *)outbuf; 449 u32 block[4]; 450 u32 * Km; 451 u8 * Kr; 452 453 + block[0] = be32_to_cpu(src[0]); 454 + block[1] = be32_to_cpu(src[1]); 455 + block[2] = be32_to_cpu(src[2]); 456 + block[3] = be32_to_cpu(src[3]); 457 458 Km = c->Km[0]; Kr = c->Kr[0]; Q (block, Kr, Km); 459 Km = c->Km[1]; Kr = c->Kr[1]; Q (block, Kr, Km); ··· 465 Km = c->Km[9]; Kr = c->Kr[9]; QBAR (block, Kr, Km); 466 Km = c->Km[10]; Kr = c->Kr[10]; QBAR (block, Kr, Km); 467 Km = c->Km[11]; Kr = c->Kr[11]; QBAR (block, Kr, Km); 468 + 469 + dst[0] = cpu_to_be32(block[0]); 470 + dst[1] = cpu_to_be32(block[1]); 471 + dst[2] = cpu_to_be32(block[2]); 472 + dst[3] = cpu_to_be32(block[3]); 473 } 474 475 static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { 476 struct cast6_ctx * c = (struct cast6_ctx *)ctx; 477 + const __be32 *src = (const __be32 *)inbuf; 478 + __be32 *dst = (__be32 *)outbuf; 479 u32 block[4]; 480 u32 * Km; 481 u8 * Kr; 482 483 + block[0] = be32_to_cpu(src[0]); 484 + block[1] = be32_to_cpu(src[1]); 485 + block[2] = be32_to_cpu(src[2]); 486 + block[3] = be32_to_cpu(src[3]); 487 488 Km = c->Km[11]; Kr = c->Kr[11]; Q (block, Kr, Km); 489 Km = c->Km[10]; Kr = c->Kr[10]; Q (block, Kr, Km); ··· 508 Km = c->Km[1]; Kr = c->Kr[1]; QBAR (block, Kr, Km); 509 Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km); 510 511 + dst[0] = cpu_to_be32(block[0]); 512 + dst[1] = cpu_to_be32(block[1]); 513 + dst[2] = cpu_to_be32(block[2]); 514 + dst[3] = cpu_to_be32(block[3]); 515 } 516 517 static struct crypto_alg alg = { ··· 531 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 532 .cra_blocksize = CAST6_BLOCK_SIZE, 533 .cra_ctxsize = sizeof(struct cast6_ctx), 534 + .cra_alignmask = 3, 535 .cra_module = THIS_MODULE, 536 .cra_list = LIST_HEAD_INIT(alg.cra_list), 537 .cra_u = {
+3 -2
crypto/cipher.c
··· 212 struct crypto_tfm *tfm = desc->tfm; 213 void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block; 214 int bsize = crypto_tfm_alg_blocksize(tfm); 215 216 - u8 stack[src == dst ? bsize : 0]; 217 - u8 *buf = stack; 218 u8 **dst_p = src == dst ? &buf : &dst; 219 220 void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
··· 212 struct crypto_tfm *tfm = desc->tfm; 213 void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block; 214 int bsize = crypto_tfm_alg_blocksize(tfm); 215 + unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm); 216 217 + u8 stack[src == dst ? bsize + alignmask : 0]; 218 + u8 *buf = (u8 *)ALIGN((unsigned long)stack, alignmask + 1); 219 u8 **dst_p = src == dst ? &buf : &dst; 220 221 void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
+1
crypto/crc32c.c
··· 16 #include <linux/string.h> 17 #include <linux/crypto.h> 18 #include <linux/crc32c.h> 19 #include <asm/byteorder.h> 20 21 #define CHKSUM_BLOCK_SIZE 32
··· 16 #include <linux/string.h> 17 #include <linux/crypto.h> 18 #include <linux/crc32c.h> 19 + #include <linux/types.h> 20 #include <asm/byteorder.h> 21 22 #define CHKSUM_BLOCK_SIZE 32
+3
crypto/des.c
··· 12 * 13 */ 14 15 #include <linux/bitops.h> 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/errno.h> 19 #include <linux/crypto.h> 20 21 #define DES_KEY_SIZE 8 22 #define DES_EXPKEY_WORDS 32 ··· 949 .cra_blocksize = DES_BLOCK_SIZE, 950 .cra_ctxsize = sizeof(struct des_ctx), 951 .cra_module = THIS_MODULE, 952 .cra_list = LIST_HEAD_INIT(des_alg.cra_list), 953 .cra_u = { .cipher = { 954 .cia_min_keysize = DES_KEY_SIZE,
··· 12 * 13 */ 14 15 + #include <asm/byteorder.h> 16 #include <linux/bitops.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/errno.h> 20 #include <linux/crypto.h> 21 + #include <linux/types.h> 22 23 #define DES_KEY_SIZE 8 24 #define DES_EXPKEY_WORDS 32 ··· 947 .cra_blocksize = DES_BLOCK_SIZE, 948 .cra_ctxsize = sizeof(struct des_ctx), 949 .cra_module = THIS_MODULE, 950 + .cra_alignmask = 3, 951 .cra_list = LIST_HEAD_INIT(des_alg.cra_list), 952 .cra_u = { .cipher = { 953 .cia_min_keysize = DES_KEY_SIZE,
+6
crypto/internal.h
··· 2 * Cryptographic API. 3 * 4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free ··· 17 #include <linux/highmem.h> 18 #include <linux/interrupt.h> 19 #include <linux/init.h> 20 #include <linux/kernel.h> 21 #include <linux/slab.h> 22 #include <asm/kmap_types.h> 23 24 extern enum km_type crypto_km_types[]; 25
··· 2 * Cryptographic API. 3 * 4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 5 + * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License as published by the Free ··· 16 #include <linux/highmem.h> 17 #include <linux/interrupt.h> 18 #include <linux/init.h> 19 + #include <linux/list.h> 20 #include <linux/kernel.h> 21 + #include <linux/rwsem.h> 22 #include <linux/slab.h> 23 #include <asm/kmap_types.h> 24 + 25 + extern struct list_head crypto_alg_list; 26 + extern struct rw_semaphore crypto_alg_sem; 27 28 extern enum km_type crypto_km_types[]; 29
+10 -36
crypto/khazad.c
··· 22 #include <linux/init.h> 23 #include <linux/module.h> 24 #include <linux/mm.h> 25 #include <asm/scatterlist.h> 26 #include <linux/crypto.h> 27 28 #define KHAZAD_KEY_SIZE 16 29 #define KHAZAD_BLOCK_SIZE 8 ··· 757 static int khazad_setkey(void *ctx_arg, const u8 *in_key, 758 unsigned int key_len, u32 *flags) 759 { 760 - 761 struct khazad_ctx *ctx = ctx_arg; 762 int r; 763 const u64 *S = T7; 764 u64 K2, K1; ··· 769 return -EINVAL; 770 } 771 772 - K2 = ((u64)in_key[ 0] << 56) ^ 773 - ((u64)in_key[ 1] << 48) ^ 774 - ((u64)in_key[ 2] << 40) ^ 775 - ((u64)in_key[ 3] << 32) ^ 776 - ((u64)in_key[ 4] << 24) ^ 777 - ((u64)in_key[ 5] << 16) ^ 778 - ((u64)in_key[ 6] << 8) ^ 779 - ((u64)in_key[ 7] ); 780 - K1 = ((u64)in_key[ 8] << 56) ^ 781 - ((u64)in_key[ 9] << 48) ^ 782 - ((u64)in_key[10] << 40) ^ 783 - ((u64)in_key[11] << 32) ^ 784 - ((u64)in_key[12] << 24) ^ 785 - ((u64)in_key[13] << 16) ^ 786 - ((u64)in_key[14] << 8) ^ 787 - ((u64)in_key[15] ); 788 789 /* setup the encrypt key */ 790 for (r = 0; r <= KHAZAD_ROUNDS; r++) { ··· 808 static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], 809 u8 *ciphertext, const u8 *plaintext) 810 { 811 - 812 int r; 813 u64 state; 814 815 - state = ((u64)plaintext[0] << 56) ^ 816 - ((u64)plaintext[1] << 48) ^ 817 - ((u64)plaintext[2] << 40) ^ 818 - ((u64)plaintext[3] << 32) ^ 819 - ((u64)plaintext[4] << 24) ^ 820 - ((u64)plaintext[5] << 16) ^ 821 - ((u64)plaintext[6] << 8) ^ 822 - ((u64)plaintext[7] ) ^ 823 - roundKey[0]; 824 825 for (r = 1; r < KHAZAD_ROUNDS; r++) { 826 state = T0[(int)(state >> 56) ] ^ ··· 837 (T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^ 838 roundKey[KHAZAD_ROUNDS]; 839 840 - ciphertext[0] = (u8)(state >> 56); 841 - ciphertext[1] = (u8)(state >> 48); 842 - ciphertext[2] = (u8)(state >> 40); 843 - ciphertext[3] = (u8)(state >> 32); 844 - ciphertext[4] = (u8)(state >> 24); 845 - ciphertext[5] = (u8)(state >> 16); 846 - ciphertext[6] = (u8)(state >> 8); 847 - ciphertext[7] = (u8)(state ); 848 - 849 } 850 851 static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src) ··· 857 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 858 .cra_blocksize = KHAZAD_BLOCK_SIZE, 859 .cra_ctxsize = sizeof (struct khazad_ctx), 860 .cra_module = THIS_MODULE, 861 .cra_list = LIST_HEAD_INIT(khazad_alg.cra_list), 862 .cra_u = { .cipher = {
··· 22 #include <linux/init.h> 23 #include <linux/module.h> 24 #include <linux/mm.h> 25 + #include <asm/byteorder.h> 26 #include <asm/scatterlist.h> 27 #include <linux/crypto.h> 28 + #include <linux/types.h> 29 30 #define KHAZAD_KEY_SIZE 16 31 #define KHAZAD_BLOCK_SIZE 8 ··· 755 static int khazad_setkey(void *ctx_arg, const u8 *in_key, 756 unsigned int key_len, u32 *flags) 757 { 758 struct khazad_ctx *ctx = ctx_arg; 759 + const __be64 *key = (const __be64 *)in_key; 760 int r; 761 const u64 *S = T7; 762 u64 K2, K1; ··· 767 return -EINVAL; 768 } 769 770 + K2 = be64_to_cpu(key[0]); 771 + K1 = be64_to_cpu(key[1]); 772 773 /* setup the encrypt key */ 774 for (r = 0; r <= KHAZAD_ROUNDS; r++) { ··· 820 static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], 821 u8 *ciphertext, const u8 *plaintext) 822 { 823 + const __be64 *src = (const __be64 *)plaintext; 824 + __be64 *dst = (__be64 *)ciphertext; 825 int r; 826 u64 state; 827 828 + state = be64_to_cpu(*src) ^ roundKey[0]; 829 830 for (r = 1; r < KHAZAD_ROUNDS; r++) { 831 state = T0[(int)(state >> 56) ] ^ ··· 856 (T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^ 857 roundKey[KHAZAD_ROUNDS]; 858 859 + *dst = cpu_to_be64(state); 860 } 861 862 static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src) ··· 884 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 885 .cra_blocksize = KHAZAD_BLOCK_SIZE, 886 .cra_ctxsize = sizeof (struct khazad_ctx), 887 + .cra_alignmask = 7, 888 .cra_module = THIS_MODULE, 889 .cra_list = LIST_HEAD_INIT(khazad_alg.cra_list), 890 .cra_u = { .cipher = {
+1
crypto/md4.c
··· 24 #include <linux/crypto.h> 25 #include <linux/kernel.h> 26 #include <linux/string.h> 27 #include <asm/byteorder.h> 28 29 #define MD4_DIGEST_SIZE 16
··· 24 #include <linux/crypto.h> 25 #include <linux/kernel.h> 26 #include <linux/string.h> 27 + #include <linux/types.h> 28 #include <asm/byteorder.h> 29 30 #define MD4_DIGEST_SIZE 16
+1
crypto/md5.c
··· 19 #include <linux/module.h> 20 #include <linux/string.h> 21 #include <linux/crypto.h> 22 #include <asm/byteorder.h> 23 24 #define MD5_DIGEST_SIZE 16
··· 19 #include <linux/module.h> 20 #include <linux/string.h> 21 #include <linux/crypto.h> 22 + #include <linux/types.h> 23 #include <asm/byteorder.h> 24 25 #define MD5_DIGEST_SIZE 16
+17 -23
crypto/michael_mic.c
··· 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/init.h> 14 #include <linux/module.h> 15 #include <linux/string.h> 16 #include <linux/crypto.h> 17 18 19 struct michael_mic_ctx { ··· 45 } while (0) 46 47 48 - static inline u32 get_le32(const u8 *p) 49 - { 50 - return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24); 51 - } 52 - 53 - 54 - static inline void put_le32(u8 *p, u32 v) 55 - { 56 - p[0] = v; 57 - p[1] = v >> 8; 58 - p[2] = v >> 16; 59 - p[3] = v >> 24; 60 - } 61 - 62 - 63 static void michael_init(void *ctx) 64 { 65 struct michael_mic_ctx *mctx = ctx; ··· 55 static void michael_update(void *ctx, const u8 *data, unsigned int len) 56 { 57 struct michael_mic_ctx *mctx = ctx; 58 59 if (mctx->pending_len) { 60 int flen = 4 - mctx->pending_len; ··· 69 if (mctx->pending_len < 4) 70 return; 71 72 - mctx->l ^= get_le32(mctx->pending); 73 michael_block(mctx->l, mctx->r); 74 mctx->pending_len = 0; 75 } 76 77 while (len >= 4) { 78 - mctx->l ^= get_le32(data); 79 michael_block(mctx->l, mctx->r); 80 - data += 4; 81 len -= 4; 82 } 83 84 if (len > 0) { 85 mctx->pending_len = len; 86 - memcpy(mctx->pending, data, len); 87 } 88 } 89 ··· 94 { 95 struct michael_mic_ctx *mctx = ctx; 96 u8 *data = mctx->pending; 97 98 /* Last block and padding (0x5a, 4..7 x 0) */ 99 switch (mctx->pending_len) { ··· 116 /* l ^= 0; */ 117 michael_block(mctx->l, mctx->r); 118 119 - put_le32(out, mctx->l); 120 - put_le32(out + 4, mctx->r); 121 } 122 123 ··· 125 u32 *flags) 126 { 127 struct michael_mic_ctx *mctx = ctx; 128 if (keylen != 8) { 129 if (flags) 130 *flags = CRYPTO_TFM_RES_BAD_KEY_LEN; 131 return -EINVAL; 132 } 133 - mctx->l = get_le32(key); 134 - mctx->r = get_le32(key + 4); 135 return 0; 136 } 137
··· 10 * published by the Free Software Foundation. 11 */ 12 13 + #include <asm/byteorder.h> 14 #include <linux/init.h> 15 #include <linux/module.h> 16 #include <linux/string.h> 17 #include <linux/crypto.h> 18 + #include <linux/types.h> 19 20 21 struct michael_mic_ctx { ··· 43 } while (0) 44 45 46 static void michael_init(void *ctx) 47 { 48 struct michael_mic_ctx *mctx = ctx; ··· 68 static void michael_update(void *ctx, const u8 *data, unsigned int len) 69 { 70 struct michael_mic_ctx *mctx = ctx; 71 + const __le32 *src; 72 73 if (mctx->pending_len) { 74 int flen = 4 - mctx->pending_len; ··· 81 if (mctx->pending_len < 4) 82 return; 83 84 + src = (const __le32 *)mctx->pending; 85 + mctx->l ^= le32_to_cpup(src); 86 michael_block(mctx->l, mctx->r); 87 mctx->pending_len = 0; 88 } 89 90 + src = (const __le32 *)data; 91 + 92 while (len >= 4) { 93 + mctx->l ^= le32_to_cpup(src++); 94 michael_block(mctx->l, mctx->r); 95 len -= 4; 96 } 97 98 if (len > 0) { 99 mctx->pending_len = len; 100 + memcpy(mctx->pending, src, len); 101 } 102 } 103 ··· 104 { 105 struct michael_mic_ctx *mctx = ctx; 106 u8 *data = mctx->pending; 107 + __le32 *dst = (__le32 *)out; 108 109 /* Last block and padding (0x5a, 4..7 x 0) */ 110 switch (mctx->pending_len) { ··· 125 /* l ^= 0; */ 126 michael_block(mctx->l, mctx->r); 127 128 + dst[0] = cpu_to_le32(mctx->l); 129 + dst[1] = cpu_to_le32(mctx->r); 130 } 131 132 ··· 134 u32 *flags) 135 { 136 struct michael_mic_ctx *mctx = ctx; 137 + const __le32 *data = (const __le32 *)key; 138 + 139 if (keylen != 8) { 140 if (flags) 141 *flags = CRYPTO_TFM_RES_BAD_KEY_LEN; 142 return -EINVAL; 143 } 144 + 145 + mctx->l = le32_to_cpu(data[0]); 146 + mctx->r = le32_to_cpu(data[1]); 147 return 0; 148 } 149
+3 -3
crypto/proc.c
··· 4 * Procfs information. 5 * 6 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free ··· 18 #include <linux/proc_fs.h> 19 #include <linux/seq_file.h> 20 #include "internal.h" 21 - 22 - extern struct list_head crypto_alg_list; 23 - extern struct rw_semaphore crypto_alg_sem; 24 25 static void *c_start(struct seq_file *m, loff_t *pos) 26 { ··· 51 struct crypto_alg *alg = (struct crypto_alg *)p; 52 53 seq_printf(m, "name : %s\n", alg->cra_name); 54 seq_printf(m, "module : %s\n", module_name(alg->cra_module)); 55 56 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { 57 case CRYPTO_ALG_TYPE_CIPHER:
··· 4 * Procfs information. 5 * 6 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 7 + * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the Free ··· 17 #include <linux/proc_fs.h> 18 #include <linux/seq_file.h> 19 #include "internal.h" 20 21 static void *c_start(struct seq_file *m, loff_t *pos) 22 { ··· 53 struct crypto_alg *alg = (struct crypto_alg *)p; 54 55 seq_printf(m, "name : %s\n", alg->cra_name); 56 + seq_printf(m, "driver : %s\n", alg->cra_driver_name); 57 seq_printf(m, "module : %s\n", module_name(alg->cra_module)); 58 + seq_printf(m, "priority : %d\n", alg->cra_priority); 59 60 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { 61 case CRYPTO_ALG_TYPE_CIPHER:
+2
crypto/serpent.c
··· 20 #include <linux/errno.h> 21 #include <asm/byteorder.h> 22 #include <linux/crypto.h> 23 24 /* Key is padded to the maximum of 256 bits before round key generation. 25 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm. ··· 553 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 554 .cra_blocksize = SERPENT_BLOCK_SIZE, 555 .cra_ctxsize = sizeof(struct serpent_ctx), 556 .cra_module = THIS_MODULE, 557 .cra_list = LIST_HEAD_INIT(serpent_alg.cra_list), 558 .cra_u = { .cipher = {
··· 20 #include <linux/errno.h> 21 #include <asm/byteorder.h> 22 #include <linux/crypto.h> 23 + #include <linux/types.h> 24 25 /* Key is padded to the maximum of 256 bits before round key generation. 26 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm. ··· 552 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 553 .cra_blocksize = SERPENT_BLOCK_SIZE, 554 .cra_ctxsize = sizeof(struct serpent_ctx), 555 + .cra_alignmask = 3, 556 .cra_module = THIS_MODULE, 557 .cra_list = LIST_HEAD_INIT(serpent_alg.cra_list), 558 .cra_u = { .cipher = {
+32 -34
crypto/sha1.c
··· 21 #include <linux/mm.h> 22 #include <linux/crypto.h> 23 #include <linux/cryptohash.h> 24 #include <asm/scatterlist.h> 25 #include <asm/byteorder.h> 26 ··· 49 static void sha1_update(void *ctx, const u8 *data, unsigned int len) 50 { 51 struct sha1_ctx *sctx = ctx; 52 - unsigned int i, j; 53 - u32 temp[SHA_WORKSPACE_WORDS]; 54 55 - j = (sctx->count >> 3) & 0x3f; 56 - sctx->count += len << 3; 57 58 - if ((j + len) > 63) { 59 - memcpy(&sctx->buffer[j], data, (i = 64-j)); 60 - sha_transform(sctx->state, sctx->buffer, temp); 61 - for ( ; i + 63 < len; i += 64) { 62 - sha_transform(sctx->state, &data[i], temp); 63 } 64 - j = 0; 65 } 66 - else i = 0; 67 - memset(temp, 0, sizeof(temp)); 68 - memcpy(&sctx->buffer[j], &data[i], len - i); 69 } 70 71 ··· 83 static void sha1_final(void* ctx, u8 *out) 84 { 85 struct sha1_ctx *sctx = ctx; 86 - u32 i, j, index, padlen; 87 - u64 t; 88 - u8 bits[8] = { 0, }; 89 static const u8 padding[64] = { 0x80, }; 90 91 - t = sctx->count; 92 - bits[7] = 0xff & t; t>>=8; 93 - bits[6] = 0xff & t; t>>=8; 94 - bits[5] = 0xff & t; t>>=8; 95 - bits[4] = 0xff & t; t>>=8; 96 - bits[3] = 0xff & t; t>>=8; 97 - bits[2] = 0xff & t; t>>=8; 98 - bits[1] = 0xff & t; t>>=8; 99 - bits[0] = 0xff & t; 100 101 /* Pad out to 56 mod 64 */ 102 - index = (sctx->count >> 3) & 0x3f; 103 padlen = (index < 56) ? (56 - index) : ((64+56) - index); 104 sha1_update(sctx, padding, padlen); 105 106 /* Append length */ 107 - sha1_update(sctx, bits, sizeof bits); 108 109 /* Store state in digest */ 110 - for (i = j = 0; i < 5; i++, j += 4) { 111 - u32 t2 = sctx->state[i]; 112 - out[j+3] = t2 & 0xff; t2>>=8; 113 - out[j+2] = t2 & 0xff; t2>>=8; 114 - out[j+1] = t2 & 0xff; t2>>=8; 115 - out[j ] = t2 & 0xff; 116 - } 117 118 /* Wipe context */ 119 memset(sctx, 0, sizeof *sctx);
··· 21 #include <linux/mm.h> 22 #include <linux/crypto.h> 23 #include <linux/cryptohash.h> 24 + #include <linux/types.h> 25 #include <asm/scatterlist.h> 26 #include <asm/byteorder.h> 27 ··· 48 static void sha1_update(void *ctx, const u8 *data, unsigned int len) 49 { 50 struct sha1_ctx *sctx = ctx; 51 + unsigned int partial, done; 52 + const u8 *src; 53 54 + partial = sctx->count & 0x3f; 55 + sctx->count += len; 56 + done = 0; 57 + src = data; 58 59 + if ((partial + len) > 63) { 60 + u32 temp[SHA_WORKSPACE_WORDS]; 61 + 62 + if (partial) { 63 + done = -partial; 64 + memcpy(sctx->buffer + partial, data, done + 64); 65 + src = sctx->buffer; 66 } 67 + 68 + do { 69 + sha_transform(sctx->state, src, temp); 70 + done += 64; 71 + src = data + done; 72 + } while (done + 63 < len); 73 + 74 + memset(temp, 0, sizeof(temp)); 75 + partial = 0; 76 } 77 + memcpy(sctx->buffer + partial, src, len - done); 78 } 79 80 ··· 72 static void sha1_final(void* ctx, u8 *out) 73 { 74 struct sha1_ctx *sctx = ctx; 75 + __be32 *dst = (__be32 *)out; 76 + u32 i, index, padlen; 77 + __be64 bits; 78 static const u8 padding[64] = { 0x80, }; 79 80 + bits = cpu_to_be64(sctx->count << 3); 81 82 /* Pad out to 56 mod 64 */ 83 + index = sctx->count & 0x3f; 84 padlen = (index < 56) ? (56 - index) : ((64+56) - index); 85 sha1_update(sctx, padding, padlen); 86 87 /* Append length */ 88 + sha1_update(sctx, (const u8 *)&bits, sizeof(bits)); 89 90 /* Store state in digest */ 91 + for (i = 0; i < 5; i++) 92 + dst[i] = cpu_to_be32(sctx->state[i]); 93 94 /* Wipe context */ 95 memset(sctx, 0, sizeof *sctx);
+10 -21
crypto/sha256.c
··· 20 #include <linux/module.h> 21 #include <linux/mm.h> 22 #include <linux/crypto.h> 23 #include <asm/scatterlist.h> 24 #include <asm/byteorder.h> 25 ··· 280 static void sha256_final(void* ctx, u8 *out) 281 { 282 struct sha256_ctx *sctx = ctx; 283 - u8 bits[8]; 284 - unsigned int index, pad_len, t; 285 - int i, j; 286 static const u8 padding[64] = { 0x80, }; 287 288 /* Save number of bits */ 289 - t = sctx->count[0]; 290 - bits[7] = t; t >>= 8; 291 - bits[6] = t; t >>= 8; 292 - bits[5] = t; t >>= 8; 293 - bits[4] = t; 294 - t = sctx->count[1]; 295 - bits[3] = t; t >>= 8; 296 - bits[2] = t; t >>= 8; 297 - bits[1] = t; t >>= 8; 298 - bits[0] = t; 299 300 /* Pad out to 56 mod 64. */ 301 index = (sctx->count[0] >> 3) & 0x3f; ··· 296 sha256_update(sctx, padding, pad_len); 297 298 /* Append length (before padding) */ 299 - sha256_update(sctx, bits, 8); 300 301 /* Store state in digest */ 302 - for (i = j = 0; i < 8; i++, j += 4) { 303 - t = sctx->state[i]; 304 - out[j+3] = t; t >>= 8; 305 - out[j+2] = t; t >>= 8; 306 - out[j+1] = t; t >>= 8; 307 - out[j ] = t; 308 - } 309 310 /* Zeroize sensitive information. */ 311 memset(sctx, 0, sizeof(*sctx));
··· 20 #include <linux/module.h> 21 #include <linux/mm.h> 22 #include <linux/crypto.h> 23 + #include <linux/types.h> 24 #include <asm/scatterlist.h> 25 #include <asm/byteorder.h> 26 ··· 279 static void sha256_final(void* ctx, u8 *out) 280 { 281 struct sha256_ctx *sctx = ctx; 282 + __be32 *dst = (__be32 *)out; 283 + __be32 bits[2]; 284 + unsigned int index, pad_len; 285 + int i; 286 static const u8 padding[64] = { 0x80, }; 287 288 /* Save number of bits */ 289 + bits[1] = cpu_to_be32(sctx->count[0]); 290 + bits[0] = cpu_to_be32(sctx->count[1]); 291 292 /* Pad out to 56 mod 64. */ 293 index = (sctx->count[0] >> 3) & 0x3f; ··· 302 sha256_update(sctx, padding, pad_len); 303 304 /* Append length (before padding) */ 305 + sha256_update(sctx, (const u8 *)bits, sizeof(bits)); 306 307 /* Store state in digest */ 308 + for (i = 0; i < 8; i++) 309 + dst[i] = cpu_to_be32(sctx->state[i]); 310 311 /* Zeroize sensitive information. */ 312 memset(sctx, 0, sizeof(*sctx));
+12 -42
crypto/sha512.c
··· 17 #include <linux/mm.h> 18 #include <linux/init.h> 19 #include <linux/crypto.h> 20 21 #include <asm/scatterlist.h> 22 #include <asm/byteorder.h> ··· 236 sha512_final(void *ctx, u8 *hash) 237 { 238 struct sha512_ctx *sctx = ctx; 239 - 240 static u8 padding[128] = { 0x80, }; 241 - 242 - u32 t; 243 - u64 t2; 244 - u8 bits[128]; 245 unsigned int index, pad_len; 246 - int i, j; 247 - 248 - index = pad_len = t = i = j = 0; 249 - t2 = 0; 250 251 /* Save number of bits */ 252 - t = sctx->count[0]; 253 - bits[15] = t; t>>=8; 254 - bits[14] = t; t>>=8; 255 - bits[13] = t; t>>=8; 256 - bits[12] = t; 257 - t = sctx->count[1]; 258 - bits[11] = t; t>>=8; 259 - bits[10] = t; t>>=8; 260 - bits[9 ] = t; t>>=8; 261 - bits[8 ] = t; 262 - t = sctx->count[2]; 263 - bits[7 ] = t; t>>=8; 264 - bits[6 ] = t; t>>=8; 265 - bits[5 ] = t; t>>=8; 266 - bits[4 ] = t; 267 - t = sctx->count[3]; 268 - bits[3 ] = t; t>>=8; 269 - bits[2 ] = t; t>>=8; 270 - bits[1 ] = t; t>>=8; 271 - bits[0 ] = t; 272 273 /* Pad out to 112 mod 128. */ 274 index = (sctx->count[0] >> 3) & 0x7f; ··· 254 sha512_update(sctx, padding, pad_len); 255 256 /* Append length (before padding) */ 257 - sha512_update(sctx, bits, 16); 258 259 /* Store state in digest */ 260 - for (i = j = 0; i < 8; i++, j += 8) { 261 - t2 = sctx->state[i]; 262 - hash[j+7] = (char)t2 & 0xff; t2>>=8; 263 - hash[j+6] = (char)t2 & 0xff; t2>>=8; 264 - hash[j+5] = (char)t2 & 0xff; t2>>=8; 265 - hash[j+4] = (char)t2 & 0xff; t2>>=8; 266 - hash[j+3] = (char)t2 & 0xff; t2>>=8; 267 - hash[j+2] = (char)t2 & 0xff; t2>>=8; 268 - hash[j+1] = (char)t2 & 0xff; t2>>=8; 269 - hash[j ] = (char)t2 & 0xff; 270 - } 271 - 272 /* Zeroize sensitive information. */ 273 memset(sctx, 0, sizeof(struct sha512_ctx)); 274 }
··· 17 #include <linux/mm.h> 18 #include <linux/init.h> 19 #include <linux/crypto.h> 20 + #include <linux/types.h> 21 22 #include <asm/scatterlist.h> 23 #include <asm/byteorder.h> ··· 235 sha512_final(void *ctx, u8 *hash) 236 { 237 struct sha512_ctx *sctx = ctx; 238 static u8 padding[128] = { 0x80, }; 239 + __be64 *dst = (__be64 *)hash; 240 + __be32 bits[4]; 241 unsigned int index, pad_len; 242 + int i; 243 244 /* Save number of bits */ 245 + bits[3] = cpu_to_be32(sctx->count[0]); 246 + bits[2] = cpu_to_be32(sctx->count[1]); 247 + bits[1] = cpu_to_be32(sctx->count[2]); 248 + bits[0] = cpu_to_be32(sctx->count[3]); 249 250 /* Pad out to 112 mod 128. */ 251 index = (sctx->count[0] >> 3) & 0x7f; ··· 275 sha512_update(sctx, padding, pad_len); 276 277 /* Append length (before padding) */ 278 + sha512_update(sctx, (const u8 *)bits, sizeof(bits)); 279 280 /* Store state in digest */ 281 + for (i = 0; i < 8; i++) 282 + dst[i] = cpu_to_be64(sctx->state[i]); 283 + 284 /* Zeroize sensitive information. */ 285 memset(sctx, 0, sizeof(struct sha512_ctx)); 286 }
+51 -47
crypto/tea.c
··· 22 #include <linux/init.h> 23 #include <linux/module.h> 24 #include <linux/mm.h> 25 #include <asm/scatterlist.h> 26 #include <linux/crypto.h> 27 28 #define TEA_KEY_SIZE 16 29 #define TEA_BLOCK_SIZE 8 ··· 36 #define XTEA_BLOCK_SIZE 8 37 #define XTEA_ROUNDS 32 38 #define XTEA_DELTA 0x9e3779b9 39 - 40 - #define u32_in(x) le32_to_cpu(*(const __le32 *)(x)) 41 - #define u32_out(to, from) (*(__le32 *)(to) = cpu_to_le32(from)) 42 43 struct tea_ctx { 44 u32 KEY[4]; ··· 48 static int tea_setkey(void *ctx_arg, const u8 *in_key, 49 unsigned int key_len, u32 *flags) 50 { 51 - 52 struct tea_ctx *ctx = ctx_arg; 53 54 if (key_len != 16) 55 { ··· 57 return -EINVAL; 58 } 59 60 - ctx->KEY[0] = u32_in (in_key); 61 - ctx->KEY[1] = u32_in (in_key + 4); 62 - ctx->KEY[2] = u32_in (in_key + 8); 63 - ctx->KEY[3] = u32_in (in_key + 12); 64 65 return 0; 66 ··· 72 u32 k0, k1, k2, k3; 73 74 struct tea_ctx *ctx = ctx_arg; 75 76 - y = u32_in (src); 77 - z = u32_in (src + 4); 78 79 k0 = ctx->KEY[0]; 80 k1 = ctx->KEY[1]; ··· 91 z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); 92 } 93 94 - u32_out (dst, y); 95 - u32_out (dst + 4, z); 96 } 97 98 static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) 99 { 100 u32 y, z, n, sum; 101 u32 k0, k1, k2, k3; 102 - 103 struct tea_ctx *ctx = ctx_arg; 104 105 - y = u32_in (src); 106 - z = u32_in (src + 4); 107 108 k0 = ctx->KEY[0]; 109 k1 = ctx->KEY[1]; ··· 121 sum -= TEA_DELTA; 122 } 123 124 - u32_out (dst, y); 125 - u32_out (dst + 4, z); 126 - 127 } 128 129 static int xtea_setkey(void *ctx_arg, const u8 *in_key, 130 unsigned int key_len, u32 *flags) 131 { 132 - 133 struct xtea_ctx *ctx = ctx_arg; 134 135 if (key_len != 16) 136 { ··· 137 return -EINVAL; 138 } 139 140 - ctx->KEY[0] = u32_in (in_key); 141 - ctx->KEY[1] = u32_in (in_key + 4); 142 - ctx->KEY[2] = u32_in (in_key + 8); 143 - ctx->KEY[3] = u32_in (in_key + 12); 144 145 return 0; 146 ··· 148 149 static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) 150 { 151 - 152 u32 y, z, sum = 0; 153 u32 limit = XTEA_DELTA * XTEA_ROUNDS; 154 155 struct xtea_ctx *ctx = ctx_arg; 156 157 - y = u32_in (src); 158 - z = u32_in (src + 4); 159 160 while (sum != limit) { 161 y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]); ··· 164 z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]); 165 } 166 167 - u32_out (dst, y); 168 - u32_out (dst + 4, z); 169 - 170 } 171 172 static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) 173 { 174 - 175 u32 y, z, sum; 176 struct tea_ctx *ctx = ctx_arg; 177 178 - y = u32_in (src); 179 - z = u32_in (src + 4); 180 181 sum = XTEA_DELTA * XTEA_ROUNDS; 182 ··· 186 y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]); 187 } 188 189 - u32_out (dst, y); 190 - u32_out (dst + 4, z); 191 - 192 } 193 194 195 static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src) 196 { 197 - 198 u32 y, z, sum = 0; 199 u32 limit = XTEA_DELTA * XTEA_ROUNDS; 200 201 struct xtea_ctx *ctx = ctx_arg; 202 203 - y = u32_in (src); 204 - z = u32_in (src + 4); 205 206 while (sum != limit) { 207 y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; ··· 209 z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; 210 } 211 212 - u32_out (dst, y); 213 - u32_out (dst + 4, z); 214 - 215 } 216 217 static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src) 218 { 219 - 220 u32 y, z, sum; 221 struct tea_ctx *ctx = ctx_arg; 222 223 - y = u32_in (src); 224 - z = u32_in (src + 4); 225 226 sum = XTEA_DELTA * XTEA_ROUNDS; 227 ··· 231 y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; 232 } 233 234 - u32_out (dst, y); 235 - u32_out (dst + 4, z); 236 - 237 } 238 239 static struct crypto_alg tea_alg = { ··· 240 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 241 .cra_blocksize = TEA_BLOCK_SIZE, 242 .cra_ctxsize = sizeof (struct tea_ctx), 243 .cra_module = THIS_MODULE, 244 .cra_list = LIST_HEAD_INIT(tea_alg.cra_list), 245 .cra_u = { .cipher = { ··· 256 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 257 .cra_blocksize = XTEA_BLOCK_SIZE, 258 .cra_ctxsize = sizeof (struct xtea_ctx), 259 .cra_module = THIS_MODULE, 260 .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), 261 .cra_u = { .cipher = { ··· 272 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 273 .cra_blocksize = XTEA_BLOCK_SIZE, 274 .cra_ctxsize = sizeof (struct xtea_ctx), 275 .cra_module = THIS_MODULE, 276 .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), 277 .cra_u = { .cipher = {
··· 22 #include <linux/init.h> 23 #include <linux/module.h> 24 #include <linux/mm.h> 25 + #include <asm/byteorder.h> 26 #include <asm/scatterlist.h> 27 #include <linux/crypto.h> 28 + #include <linux/types.h> 29 30 #define TEA_KEY_SIZE 16 31 #define TEA_BLOCK_SIZE 8 ··· 34 #define XTEA_BLOCK_SIZE 8 35 #define XTEA_ROUNDS 32 36 #define XTEA_DELTA 0x9e3779b9 37 38 struct tea_ctx { 39 u32 KEY[4]; ··· 49 static int tea_setkey(void *ctx_arg, const u8 *in_key, 50 unsigned int key_len, u32 *flags) 51 { 52 struct tea_ctx *ctx = ctx_arg; 53 + const __le32 *key = (const __le32 *)in_key; 54 55 if (key_len != 16) 56 { ··· 58 return -EINVAL; 59 } 60 61 + ctx->KEY[0] = le32_to_cpu(key[0]); 62 + ctx->KEY[1] = le32_to_cpu(key[1]); 63 + ctx->KEY[2] = le32_to_cpu(key[2]); 64 + ctx->KEY[3] = le32_to_cpu(key[3]); 65 66 return 0; 67 ··· 73 u32 k0, k1, k2, k3; 74 75 struct tea_ctx *ctx = ctx_arg; 76 + const __le32 *in = (const __le32 *)src; 77 + __le32 *out = (__le32 *)dst; 78 79 + y = le32_to_cpu(in[0]); 80 + z = le32_to_cpu(in[1]); 81 82 k0 = ctx->KEY[0]; 83 k1 = ctx->KEY[1]; ··· 90 z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); 91 } 92 93 + out[0] = cpu_to_le32(y); 94 + out[1] = cpu_to_le32(z); 95 } 96 97 static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) 98 { 99 u32 y, z, n, sum; 100 u32 k0, k1, k2, k3; 101 struct tea_ctx *ctx = ctx_arg; 102 + const __le32 *in = (const __le32 *)src; 103 + __le32 *out = (__le32 *)dst; 104 105 + y = le32_to_cpu(in[0]); 106 + z = le32_to_cpu(in[1]); 107 108 k0 = ctx->KEY[0]; 109 k1 = ctx->KEY[1]; ··· 119 sum -= TEA_DELTA; 120 } 121 122 + out[0] = cpu_to_le32(y); 123 + out[1] = cpu_to_le32(z); 124 } 125 126 static int xtea_setkey(void *ctx_arg, const u8 *in_key, 127 unsigned int key_len, u32 *flags) 128 { 129 struct xtea_ctx *ctx = ctx_arg; 130 + const __le32 *key = (const __le32 *)in_key; 131 132 if (key_len != 16) 133 { ··· 136 return -EINVAL; 137 } 138 139 + ctx->KEY[0] = le32_to_cpu(key[0]); 140 + ctx->KEY[1] = le32_to_cpu(key[1]); 141 + ctx->KEY[2] = le32_to_cpu(key[2]); 142 + ctx->KEY[3] = le32_to_cpu(key[3]); 143 144 return 0; 145 ··· 147 148 static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) 149 { 150 u32 y, z, sum = 0; 151 u32 limit = XTEA_DELTA * XTEA_ROUNDS; 152 153 struct xtea_ctx *ctx = ctx_arg; 154 + const __le32 *in = (const __le32 *)src; 155 + __le32 *out = (__le32 *)dst; 156 157 + y = le32_to_cpu(in[0]); 158 + z = le32_to_cpu(in[1]); 159 160 while (sum != limit) { 161 y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]); ··· 162 z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]); 163 } 164 165 + out[0] = cpu_to_le32(y); 166 + out[1] = cpu_to_le32(z); 167 } 168 169 static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) 170 { 171 u32 y, z, sum; 172 struct tea_ctx *ctx = ctx_arg; 173 + const __le32 *in = (const __le32 *)src; 174 + __le32 *out = (__le32 *)dst; 175 176 + y = le32_to_cpu(in[0]); 177 + z = le32_to_cpu(in[1]); 178 179 sum = XTEA_DELTA * XTEA_ROUNDS; 180 ··· 184 y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]); 185 } 186 187 + out[0] = cpu_to_le32(y); 188 + out[1] = cpu_to_le32(z); 189 } 190 191 192 static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src) 193 { 194 u32 y, z, sum = 0; 195 u32 limit = XTEA_DELTA * XTEA_ROUNDS; 196 197 struct xtea_ctx *ctx = ctx_arg; 198 + const __le32 *in = (const __le32 *)src; 199 + __le32 *out = (__le32 *)dst; 200 201 + y = le32_to_cpu(in[0]); 202 + z = le32_to_cpu(in[1]); 203 204 while (sum != limit) { 205 y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; ··· 207 z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; 208 } 209 210 + out[0] = cpu_to_le32(y); 211 + out[1] = cpu_to_le32(z); 212 } 213 214 static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src) 215 { 216 u32 y, z, sum; 217 struct tea_ctx *ctx = ctx_arg; 218 + const __le32 *in = (const __le32 *)src; 219 + __le32 *out = (__le32 *)dst; 220 221 + y = le32_to_cpu(in[0]); 222 + z = le32_to_cpu(in[1]); 223 224 sum = XTEA_DELTA * XTEA_ROUNDS; 225 ··· 229 y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; 230 } 231 232 + out[0] = cpu_to_le32(y); 233 + out[1] = cpu_to_le32(z); 234 } 235 236 static struct crypto_alg tea_alg = { ··· 239 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 240 .cra_blocksize = TEA_BLOCK_SIZE, 241 .cra_ctxsize = sizeof (struct tea_ctx), 242 + .cra_alignmask = 3, 243 .cra_module = THIS_MODULE, 244 .cra_list = LIST_HEAD_INIT(tea_alg.cra_list), 245 .cra_u = { .cipher = { ··· 254 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 255 .cra_blocksize = XTEA_BLOCK_SIZE, 256 .cra_ctxsize = sizeof (struct xtea_ctx), 257 + .cra_alignmask = 3, 258 .cra_module = THIS_MODULE, 259 .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), 260 .cra_u = { .cipher = { ··· 269 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 270 .cra_blocksize = XTEA_BLOCK_SIZE, 271 .cra_ctxsize = sizeof (struct xtea_ctx), 272 + .cra_alignmask = 3, 273 .cra_module = THIS_MODULE, 274 .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), 275 .cra_u = { .cipher = {
+16 -46
crypto/tgr192.c
··· 24 #include <linux/init.h> 25 #include <linux/module.h> 26 #include <linux/mm.h> 27 #include <asm/scatterlist.h> 28 #include <linux/crypto.h> 29 30 #define TGR192_DIGEST_SIZE 24 31 #define TGR160_DIGEST_SIZE 20 ··· 469 u64 a, b, c, aa, bb, cc; 470 u64 x[8]; 471 int i; 472 - const u8 *ptr = data; 473 474 - for (i = 0; i < 8; i++, ptr += 8) { 475 - x[i] = (((u64)ptr[7] ) << 56) ^ 476 - (((u64)ptr[6] & 0xffL) << 48) ^ 477 - (((u64)ptr[5] & 0xffL) << 40) ^ 478 - (((u64)ptr[4] & 0xffL) << 32) ^ 479 - (((u64)ptr[3] & 0xffL) << 24) ^ 480 - (((u64)ptr[2] & 0xffL) << 16) ^ 481 - (((u64)ptr[1] & 0xffL) << 8) ^ 482 - (((u64)ptr[0] & 0xffL) ); 483 - } 484 485 /* save */ 486 a = aa = tctx->a; ··· 552 static void tgr192_final(void *ctx, u8 * out) 553 { 554 struct tgr192_ctx *tctx = ctx; 555 u32 t, msb, lsb; 556 - u8 *p; 557 - int i, j; 558 559 tgr192_update(tctx, NULL, 0); /* flush */ ; 560 ··· 589 memset(tctx->hash, 0, 56); /* fill next block with zeroes */ 590 } 591 /* append the 64 bit count */ 592 - tctx->hash[56] = lsb; 593 - tctx->hash[57] = lsb >> 8; 594 - tctx->hash[58] = lsb >> 16; 595 - tctx->hash[59] = lsb >> 24; 596 - tctx->hash[60] = msb; 597 - tctx->hash[61] = msb >> 8; 598 - tctx->hash[62] = msb >> 16; 599 - tctx->hash[63] = msb >> 24; 600 tgr192_transform(tctx, tctx->hash); 601 602 - p = tctx->hash; 603 - *p++ = tctx->a >> 56; *p++ = tctx->a >> 48; *p++ = tctx->a >> 40; 604 - *p++ = tctx->a >> 32; *p++ = tctx->a >> 24; *p++ = tctx->a >> 16; 605 - *p++ = tctx->a >> 8; *p++ = tctx->a;\ 606 - *p++ = tctx->b >> 56; *p++ = tctx->b >> 48; *p++ = tctx->b >> 40; 607 - *p++ = tctx->b >> 32; *p++ = tctx->b >> 24; *p++ = tctx->b >> 16; 608 - *p++ = tctx->b >> 8; *p++ = tctx->b; 609 - *p++ = tctx->c >> 56; *p++ = tctx->c >> 48; *p++ = tctx->c >> 40; 610 - *p++ = tctx->c >> 32; *p++ = tctx->c >> 24; *p++ = tctx->c >> 16; 611 - *p++ = tctx->c >> 8; *p++ = tctx->c; 612 - 613 - 614 - /* unpack the hash */ 615 - j = 7; 616 - for (i = 0; i < 8; i++) { 617 - out[j--] = (tctx->a >> 8 * i) & 0xff; 618 - } 619 - j = 15; 620 - for (i = 0; i < 8; i++) { 621 - out[j--] = (tctx->b >> 8 * i) & 0xff; 622 - } 623 - j = 23; 624 - for (i = 0; i < 8; i++) { 625 - out[j--] = (tctx->c >> 8 * i) & 0xff; 626 - } 627 } 628 629 static void tgr160_final(void *ctx, u8 * out)
··· 24 #include <linux/init.h> 25 #include <linux/module.h> 26 #include <linux/mm.h> 27 + #include <asm/byteorder.h> 28 #include <asm/scatterlist.h> 29 #include <linux/crypto.h> 30 + #include <linux/types.h> 31 32 #define TGR192_DIGEST_SIZE 24 33 #define TGR160_DIGEST_SIZE 20 ··· 467 u64 a, b, c, aa, bb, cc; 468 u64 x[8]; 469 int i; 470 + const __le64 *ptr = (const __le64 *)data; 471 472 + for (i = 0; i < 8; i++) 473 + x[i] = le64_to_cpu(ptr[i]); 474 475 /* save */ 476 a = aa = tctx->a; ··· 558 static void tgr192_final(void *ctx, u8 * out) 559 { 560 struct tgr192_ctx *tctx = ctx; 561 + __be64 *dst = (__be64 *)out; 562 + __be64 *be64p; 563 + __le32 *le32p; 564 u32 t, msb, lsb; 565 566 tgr192_update(tctx, NULL, 0); /* flush */ ; 567 ··· 594 memset(tctx->hash, 0, 56); /* fill next block with zeroes */ 595 } 596 /* append the 64 bit count */ 597 + le32p = (__le32 *)&tctx->hash[56]; 598 + le32p[0] = cpu_to_le32(lsb); 599 + le32p[1] = cpu_to_le32(msb); 600 + 601 tgr192_transform(tctx, tctx->hash); 602 603 + be64p = (__be64 *)tctx->hash; 604 + dst[0] = be64p[0] = cpu_to_be64(tctx->a); 605 + dst[1] = be64p[1] = cpu_to_be64(tctx->b); 606 + dst[2] = be64p[2] = cpu_to_be64(tctx->c); 607 } 608 609 static void tgr160_final(void *ctx, u8 * out)
+9 -4
crypto/twofish.c
··· 37 * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the 38 * Third Edition. 39 */ 40 #include <linux/module.h> 41 #include <linux/init.h> 42 #include <linux/types.h> ··· 623 * whitening subkey number m. */ 624 625 #define INPACK(n, x, m) \ 626 - x = in[4 * (n)] ^ (in[4 * (n) + 1] << 8) \ 627 - ^ (in[4 * (n) + 2] << 16) ^ (in[4 * (n) + 3] << 24) ^ ctx->w[m] 628 629 #define OUTUNPACK(n, x, m) \ 630 x ^= ctx->w[m]; \ 631 - out[4 * (n)] = x; out[4 * (n) + 1] = x >> 8; \ 632 - out[4 * (n) + 2] = x >> 16; out[4 * (n) + 3] = x >> 24 633 634 #define TF_MIN_KEY_SIZE 16 635 #define TF_MAX_KEY_SIZE 32 ··· 804 static void twofish_encrypt(void *cx, u8 *out, const u8 *in) 805 { 806 struct twofish_ctx *ctx = cx; 807 808 /* The four 32-bit chunks of the text. */ 809 u32 a, b, c, d; ··· 841 static void twofish_decrypt(void *cx, u8 *out, const u8 *in) 842 { 843 struct twofish_ctx *ctx = cx; 844 845 /* The four 32-bit chunks of the text. */ 846 u32 a, b, c, d; ··· 879 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 880 .cra_blocksize = TF_BLOCK_SIZE, 881 .cra_ctxsize = sizeof(struct twofish_ctx), 882 .cra_module = THIS_MODULE, 883 .cra_list = LIST_HEAD_INIT(alg.cra_list), 884 .cra_u = { .cipher = {
··· 37 * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the 38 * Third Edition. 39 */ 40 + 41 + #include <asm/byteorder.h> 42 #include <linux/module.h> 43 #include <linux/init.h> 44 #include <linux/types.h> ··· 621 * whitening subkey number m. */ 622 623 #define INPACK(n, x, m) \ 624 + x = le32_to_cpu(src[n]) ^ ctx->w[m] 625 626 #define OUTUNPACK(n, x, m) \ 627 x ^= ctx->w[m]; \ 628 + dst[n] = cpu_to_le32(x) 629 630 #define TF_MIN_KEY_SIZE 16 631 #define TF_MAX_KEY_SIZE 32 ··· 804 static void twofish_encrypt(void *cx, u8 *out, const u8 *in) 805 { 806 struct twofish_ctx *ctx = cx; 807 + const __le32 *src = (const __le32 *)in; 808 + __le32 *dst = (__le32 *)out; 809 810 /* The four 32-bit chunks of the text. */ 811 u32 a, b, c, d; ··· 839 static void twofish_decrypt(void *cx, u8 *out, const u8 *in) 840 { 841 struct twofish_ctx *ctx = cx; 842 + const __le32 *src = (const __le32 *)in; 843 + __le32 *dst = (__le32 *)out; 844 845 /* The four 32-bit chunks of the text. */ 846 u32 a, b, c, d; ··· 875 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 876 .cra_blocksize = TF_BLOCK_SIZE, 877 .cra_ctxsize = sizeof(struct twofish_ctx), 878 + .cra_alignmask = 3, 879 .cra_module = THIS_MODULE, 880 .cra_list = LIST_HEAD_INIT(alg.cra_list), 881 .cra_u = { .cipher = {
+8 -24
crypto/wp512.c
··· 22 #include <linux/init.h> 23 #include <linux/module.h> 24 #include <linux/mm.h> 25 #include <asm/scatterlist.h> 26 #include <linux/crypto.h> 27 28 #define WP512_DIGEST_SIZE 64 29 #define WP384_DIGEST_SIZE 48 ··· 780 u64 block[8]; /* mu(buffer) */ 781 u64 state[8]; /* the cipher state */ 782 u64 L[8]; 783 - u8 *buffer = wctx->buffer; 784 785 - for (i = 0; i < 8; i++, buffer += 8) { 786 - block[i] = 787 - (((u64)buffer[0] ) << 56) ^ 788 - (((u64)buffer[1] & 0xffL) << 48) ^ 789 - (((u64)buffer[2] & 0xffL) << 40) ^ 790 - (((u64)buffer[3] & 0xffL) << 32) ^ 791 - (((u64)buffer[4] & 0xffL) << 24) ^ 792 - (((u64)buffer[5] & 0xffL) << 16) ^ 793 - (((u64)buffer[6] & 0xffL) << 8) ^ 794 - (((u64)buffer[7] & 0xffL) ); 795 - } 796 797 state[0] = block[0] ^ (K[0] = wctx->hash[0]); 798 state[1] = block[1] ^ (K[1] = wctx->hash[1]); ··· 1062 u8 *bitLength = wctx->bitLength; 1063 int bufferBits = wctx->bufferBits; 1064 int bufferPos = wctx->bufferPos; 1065 - u8 *digest = out; 1066 1067 buffer[bufferPos] |= 0x80U >> (bufferBits & 7); 1068 bufferPos++; ··· 1081 memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES], 1082 bitLength, WP512_LENGTHBYTES); 1083 wp512_process_buffer(wctx); 1084 - for (i = 0; i < WP512_DIGEST_SIZE/8; i++) { 1085 - digest[0] = (u8)(wctx->hash[i] >> 56); 1086 - digest[1] = (u8)(wctx->hash[i] >> 48); 1087 - digest[2] = (u8)(wctx->hash[i] >> 40); 1088 - digest[3] = (u8)(wctx->hash[i] >> 32); 1089 - digest[4] = (u8)(wctx->hash[i] >> 24); 1090 - digest[5] = (u8)(wctx->hash[i] >> 16); 1091 - digest[6] = (u8)(wctx->hash[i] >> 8); 1092 - digest[7] = (u8)(wctx->hash[i] ); 1093 - digest += 8; 1094 - } 1095 wctx->bufferBits = bufferBits; 1096 wctx->bufferPos = bufferPos; 1097 }
··· 22 #include <linux/init.h> 23 #include <linux/module.h> 24 #include <linux/mm.h> 25 + #include <asm/byteorder.h> 26 #include <asm/scatterlist.h> 27 #include <linux/crypto.h> 28 + #include <linux/types.h> 29 30 #define WP512_DIGEST_SIZE 64 31 #define WP384_DIGEST_SIZE 48 ··· 778 u64 block[8]; /* mu(buffer) */ 779 u64 state[8]; /* the cipher state */ 780 u64 L[8]; 781 + const __be64 *buffer = (const __be64 *)wctx->buffer; 782 783 + for (i = 0; i < 8; i++) 784 + block[i] = be64_to_cpu(buffer[i]); 785 786 state[0] = block[0] ^ (K[0] = wctx->hash[0]); 787 state[1] = block[1] ^ (K[1] = wctx->hash[1]); ··· 1069 u8 *bitLength = wctx->bitLength; 1070 int bufferBits = wctx->bufferBits; 1071 int bufferPos = wctx->bufferPos; 1072 + __be64 *digest = (__be64 *)out; 1073 1074 buffer[bufferPos] |= 0x80U >> (bufferBits & 7); 1075 bufferPos++; ··· 1088 memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES], 1089 bitLength, WP512_LENGTHBYTES); 1090 wp512_process_buffer(wctx); 1091 + for (i = 0; i < WP512_DIGEST_SIZE/8; i++) 1092 + digest[i] = cpu_to_be64(wctx->hash[i]); 1093 wctx->bufferBits = bufferBits; 1094 wctx->bufferPos = bufferPos; 1095 }
+13 -13
drivers/crypto/padlock-aes.c
··· 99 return x >> (n << 3); 100 } 101 102 - #define uint32_t_in(x) le32_to_cpu(*(const uint32_t *)(x)) 103 - #define uint32_t_out(to, from) (*(uint32_t *)(to) = cpu_to_le32(from)) 104 - 105 #define E_KEY ctx->E 106 #define D_KEY ctx->D 107 ··· 291 aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags) 292 { 293 struct aes_ctx *ctx = aes_ctx(ctx_arg); 294 uint32_t i, t, u, v, w; 295 uint32_t P[AES_EXTENDED_KEY_SIZE]; 296 uint32_t rounds; ··· 311 ctx->E = ctx->e_data; 312 ctx->D = ctx->e_data; 313 314 - E_KEY[0] = uint32_t_in (in_key); 315 - E_KEY[1] = uint32_t_in (in_key + 4); 316 - E_KEY[2] = uint32_t_in (in_key + 8); 317 - E_KEY[3] = uint32_t_in (in_key + 12); 318 319 /* Prepare control words. */ 320 memset(&ctx->cword, 0, sizeof(ctx->cword)); ··· 341 break; 342 343 case 24: 344 - E_KEY[4] = uint32_t_in (in_key + 16); 345 - t = E_KEY[5] = uint32_t_in (in_key + 20); 346 for (i = 0; i < 8; ++i) 347 loop6 (i); 348 break; 349 350 case 32: 351 - E_KEY[4] = uint32_t_in (in_key + 16); 352 - E_KEY[5] = uint32_t_in (in_key + 20); 353 - E_KEY[6] = uint32_t_in (in_key + 24); 354 - t = E_KEY[7] = uint32_t_in (in_key + 28); 355 for (i = 0; i < 7; ++i) 356 loop8 (i); 357 break; ··· 466 467 static struct crypto_alg aes_alg = { 468 .cra_name = "aes", 469 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 470 .cra_blocksize = AES_BLOCK_SIZE, 471 .cra_ctxsize = sizeof(struct aes_ctx),
··· 99 return x >> (n << 3); 100 } 101 102 #define E_KEY ctx->E 103 #define D_KEY ctx->D 104 ··· 294 aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags) 295 { 296 struct aes_ctx *ctx = aes_ctx(ctx_arg); 297 + const __le32 *key = (const __le32 *)in_key; 298 uint32_t i, t, u, v, w; 299 uint32_t P[AES_EXTENDED_KEY_SIZE]; 300 uint32_t rounds; ··· 313 ctx->E = ctx->e_data; 314 ctx->D = ctx->e_data; 315 316 + E_KEY[0] = le32_to_cpu(key[0]); 317 + E_KEY[1] = le32_to_cpu(key[1]); 318 + E_KEY[2] = le32_to_cpu(key[2]); 319 + E_KEY[3] = le32_to_cpu(key[3]); 320 321 /* Prepare control words. */ 322 memset(&ctx->cword, 0, sizeof(ctx->cword)); ··· 343 break; 344 345 case 24: 346 + E_KEY[4] = le32_to_cpu(key[4]); 347 + t = E_KEY[5] = le32_to_cpu(key[5]); 348 for (i = 0; i < 8; ++i) 349 loop6 (i); 350 break; 351 352 case 32: 353 + E_KEY[4] = le32_to_cpu(in_key[4]); 354 + E_KEY[5] = le32_to_cpu(in_key[5]); 355 + E_KEY[6] = le32_to_cpu(in_key[6]); 356 + t = E_KEY[7] = le32_to_cpu(in_key[7]); 357 for (i = 0; i < 7; ++i) 358 loop8 (i); 359 break; ··· 468 469 static struct crypto_alg aes_alg = { 470 .cra_name = "aes", 471 + .cra_driver_name = "aes-padlock", 472 + .cra_priority = 300, 473 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 474 .cra_blocksize = AES_BLOCK_SIZE, 475 .cra_ctxsize = sizeof(struct aes_ctx),
+1 -1
drivers/crypto/padlock.h
··· 17 18 /* Control word. */ 19 struct cword { 20 - int __attribute__ ((__packed__)) 21 rounds:4, 22 algo:3, 23 keygen:1,
··· 17 18 /* Control word. */ 19 struct cword { 20 + unsigned int __attribute__ ((__packed__)) 21 rounds:4, 22 algo:3, 23 keygen:1,
+13
drivers/net/Kconfig
··· 27 # that for each of the symbols. 28 if NETDEVICES 29 30 config DUMMY 31 tristate "Dummy net driver support" 32 ---help---
··· 27 # that for each of the symbols. 28 if NETDEVICES 29 30 + config IFB 31 + tristate "Intermediate Functional Block support" 32 + depends on NET_CLS_ACT 33 + ---help--- 34 + This is an intermidiate driver that allows sharing of 35 + resources. 36 + To compile this driver as a module, choose M here: the module 37 + will be called ifb. If you want to use more than one ifb 38 + device at a time, you need to compile this driver as a module. 39 + Instead of 'ifb', the devices will then be called 'ifb0', 40 + 'ifb1' etc. 41 + Look at the iproute2 documentation directory for usage etc 42 + 43 config DUMMY 44 tristate "Dummy net driver support" 45 ---help---
+1
drivers/net/Makefile
··· 125 endif 126 127 obj-$(CONFIG_DUMMY) += dummy.o 128 obj-$(CONFIG_DE600) += de600.o 129 obj-$(CONFIG_DE620) += de620.o 130 obj-$(CONFIG_LANCE) += lance.o
··· 125 endif 126 127 obj-$(CONFIG_DUMMY) += dummy.o 128 + obj-$(CONFIG_IFB) += ifb.o 129 obj-$(CONFIG_DE600) += de600.o 130 obj-$(CONFIG_DE620) += de620.o 131 obj-$(CONFIG_LANCE) += lance.o
+1 -1
drivers/net/hamradio/mkiss.c
··· 515 count = kiss_esc(p, (unsigned char *)ax->xbuff, len); 516 } 517 } 518 519 set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); 520 actual = ax->tty->driver->write(ax->tty, ax->xbuff, count); ··· 525 ax->dev->trans_start = jiffies; 526 ax->xleft = count - actual; 527 ax->xhead = ax->xbuff + actual; 528 - spin_unlock_bh(&ax->buflock); 529 } 530 531 /* Encapsulate an AX.25 packet and kick it into a TTY queue. */
··· 515 count = kiss_esc(p, (unsigned char *)ax->xbuff, len); 516 } 517 } 518 + spin_unlock_bh(&ax->buflock); 519 520 set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); 521 actual = ax->tty->driver->write(ax->tty, ax->xbuff, count); ··· 524 ax->dev->trans_start = jiffies; 525 ax->xleft = count - actual; 526 ax->xhead = ax->xbuff + actual; 527 } 528 529 /* Encapsulate an AX.25 packet and kick it into a TTY queue. */
+294
drivers/net/ifb.c
···
··· 1 + /* drivers/net/ifb.c: 2 + 3 + The purpose of this driver is to provide a device that allows 4 + for sharing of resources: 5 + 6 + 1) qdiscs/policies that are per device as opposed to system wide. 7 + ifb allows for a device which can be redirected to thus providing 8 + an impression of sharing. 9 + 10 + 2) Allows for queueing incoming traffic for shaping instead of 11 + dropping. 12 + 13 + The original concept is based on what is known as the IMQ 14 + driver initially written by Martin Devera, later rewritten 15 + by Patrick McHardy and then maintained by Andre Correa. 16 + 17 + You need the tc action mirror or redirect to feed this device 18 + packets. 19 + 20 + This program is free software; you can redistribute it and/or 21 + modify it under the terms of the GNU General Public License 22 + as published by the Free Software Foundation; either version 23 + 2 of the License, or (at your option) any later version. 24 + 25 + Authors: Jamal Hadi Salim (2005) 26 + 27 + */ 28 + 29 + 30 + #include <linux/config.h> 31 + #include <linux/module.h> 32 + #include <linux/kernel.h> 33 + #include <linux/netdevice.h> 34 + #include <linux/etherdevice.h> 35 + #include <linux/init.h> 36 + #include <linux/moduleparam.h> 37 + #include <net/pkt_sched.h> 38 + 39 + #define TX_TIMEOUT (2*HZ) 40 + 41 + #define TX_Q_LIMIT 32 42 + struct ifb_private { 43 + struct net_device_stats stats; 44 + struct tasklet_struct ifb_tasklet; 45 + int tasklet_pending; 46 + /* mostly debug stats leave in for now */ 47 + unsigned long st_task_enter; /* tasklet entered */ 48 + unsigned long st_txq_refl_try; /* transmit queue refill attempt */ 49 + unsigned long st_rxq_enter; /* receive queue entered */ 50 + unsigned long st_rx2tx_tran; /* receive to trasmit transfers */ 51 + unsigned long st_rxq_notenter; /*receiveQ not entered, resched */ 52 + unsigned long st_rx_frm_egr; /* received from egress path */ 53 + unsigned long st_rx_frm_ing; /* received from ingress path */ 54 + unsigned long st_rxq_check; 55 + unsigned long st_rxq_rsch; 56 + struct sk_buff_head rq; 57 + struct sk_buff_head tq; 58 + }; 59 + 60 + static int numifbs = 1; 61 + 62 + static void ri_tasklet(unsigned long dev); 63 + static int ifb_xmit(struct sk_buff *skb, struct net_device *dev); 64 + static struct net_device_stats *ifb_get_stats(struct net_device *dev); 65 + static int ifb_open(struct net_device *dev); 66 + static int ifb_close(struct net_device *dev); 67 + 68 + static void ri_tasklet(unsigned long dev) 69 + { 70 + 71 + struct net_device *_dev = (struct net_device *)dev; 72 + struct ifb_private *dp = netdev_priv(_dev); 73 + struct net_device_stats *stats = &dp->stats; 74 + struct sk_buff *skb; 75 + 76 + dp->st_task_enter++; 77 + if ((skb = skb_peek(&dp->tq)) == NULL) { 78 + dp->st_txq_refl_try++; 79 + if (spin_trylock(&_dev->xmit_lock)) { 80 + dp->st_rxq_enter++; 81 + while ((skb = skb_dequeue(&dp->rq)) != NULL) { 82 + skb_queue_tail(&dp->tq, skb); 83 + dp->st_rx2tx_tran++; 84 + } 85 + spin_unlock(&_dev->xmit_lock); 86 + } else { 87 + /* reschedule */ 88 + dp->st_rxq_notenter++; 89 + goto resched; 90 + } 91 + } 92 + 93 + while ((skb = skb_dequeue(&dp->tq)) != NULL) { 94 + u32 from = G_TC_FROM(skb->tc_verd); 95 + 96 + skb->tc_verd = 0; 97 + skb->tc_verd = SET_TC_NCLS(skb->tc_verd); 98 + stats->tx_packets++; 99 + stats->tx_bytes +=skb->len; 100 + if (from & AT_EGRESS) { 101 + dp->st_rx_frm_egr++; 102 + dev_queue_xmit(skb); 103 + } else if (from & AT_INGRESS) { 104 + 105 + dp->st_rx_frm_ing++; 106 + netif_rx(skb); 107 + } else { 108 + dev_kfree_skb(skb); 109 + stats->tx_dropped++; 110 + } 111 + } 112 + 113 + if (spin_trylock(&_dev->xmit_lock)) { 114 + dp->st_rxq_check++; 115 + if ((skb = skb_peek(&dp->rq)) == NULL) { 116 + dp->tasklet_pending = 0; 117 + if (netif_queue_stopped(_dev)) 118 + netif_wake_queue(_dev); 119 + } else { 120 + dp->st_rxq_rsch++; 121 + spin_unlock(&_dev->xmit_lock); 122 + goto resched; 123 + } 124 + spin_unlock(&_dev->xmit_lock); 125 + } else { 126 + resched: 127 + dp->tasklet_pending = 1; 128 + tasklet_schedule(&dp->ifb_tasklet); 129 + } 130 + 131 + } 132 + 133 + static void __init ifb_setup(struct net_device *dev) 134 + { 135 + /* Initialize the device structure. */ 136 + dev->get_stats = ifb_get_stats; 137 + dev->hard_start_xmit = ifb_xmit; 138 + dev->open = &ifb_open; 139 + dev->stop = &ifb_close; 140 + 141 + /* Fill in device structure with ethernet-generic values. */ 142 + ether_setup(dev); 143 + dev->tx_queue_len = TX_Q_LIMIT; 144 + dev->change_mtu = NULL; 145 + dev->flags |= IFF_NOARP; 146 + dev->flags &= ~IFF_MULTICAST; 147 + SET_MODULE_OWNER(dev); 148 + random_ether_addr(dev->dev_addr); 149 + } 150 + 151 + static int ifb_xmit(struct sk_buff *skb, struct net_device *dev) 152 + { 153 + struct ifb_private *dp = netdev_priv(dev); 154 + struct net_device_stats *stats = &dp->stats; 155 + int ret = 0; 156 + u32 from = G_TC_FROM(skb->tc_verd); 157 + 158 + stats->tx_packets++; 159 + stats->tx_bytes+=skb->len; 160 + 161 + if (!from || !skb->input_dev) { 162 + dropped: 163 + dev_kfree_skb(skb); 164 + stats->rx_dropped++; 165 + return ret; 166 + } else { 167 + /* 168 + * note we could be going 169 + * ingress -> egress or 170 + * egress -> ingress 171 + */ 172 + skb->dev = skb->input_dev; 173 + skb->input_dev = dev; 174 + if (from & AT_INGRESS) { 175 + skb_pull(skb, skb->dev->hard_header_len); 176 + } else { 177 + if (!(from & AT_EGRESS)) { 178 + goto dropped; 179 + } 180 + } 181 + } 182 + 183 + if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) { 184 + netif_stop_queue(dev); 185 + } 186 + 187 + dev->trans_start = jiffies; 188 + skb_queue_tail(&dp->rq, skb); 189 + if (!dp->tasklet_pending) { 190 + dp->tasklet_pending = 1; 191 + tasklet_schedule(&dp->ifb_tasklet); 192 + } 193 + 194 + return ret; 195 + } 196 + 197 + static struct net_device_stats *ifb_get_stats(struct net_device *dev) 198 + { 199 + struct ifb_private *dp = netdev_priv(dev); 200 + struct net_device_stats *stats = &dp->stats; 201 + 202 + pr_debug("tasklets stats %ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld \n", 203 + dp->st_task_enter, dp->st_txq_refl_try, dp->st_rxq_enter, 204 + dp->st_rx2tx_tran dp->st_rxq_notenter, dp->st_rx_frm_egr, 205 + dp->st_rx_frm_ing, dp->st_rxq_check, dp->st_rxq_rsch ); 206 + 207 + return stats; 208 + } 209 + 210 + static struct net_device **ifbs; 211 + 212 + /* Number of ifb devices to be set up by this module. */ 213 + module_param(numifbs, int, 0); 214 + MODULE_PARM_DESC(numifbs, "Number of ifb devices"); 215 + 216 + static int ifb_close(struct net_device *dev) 217 + { 218 + struct ifb_private *dp = netdev_priv(dev); 219 + 220 + tasklet_kill(&dp->ifb_tasklet); 221 + netif_stop_queue(dev); 222 + skb_queue_purge(&dp->rq); 223 + skb_queue_purge(&dp->tq); 224 + return 0; 225 + } 226 + 227 + static int ifb_open(struct net_device *dev) 228 + { 229 + struct ifb_private *dp = netdev_priv(dev); 230 + 231 + tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev); 232 + skb_queue_head_init(&dp->rq); 233 + skb_queue_head_init(&dp->tq); 234 + netif_start_queue(dev); 235 + 236 + return 0; 237 + } 238 + 239 + static int __init ifb_init_one(int index) 240 + { 241 + struct net_device *dev_ifb; 242 + int err; 243 + 244 + dev_ifb = alloc_netdev(sizeof(struct ifb_private), 245 + "ifb%d", ifb_setup); 246 + 247 + if (!dev_ifb) 248 + return -ENOMEM; 249 + 250 + if ((err = register_netdev(dev_ifb))) { 251 + free_netdev(dev_ifb); 252 + dev_ifb = NULL; 253 + } else { 254 + ifbs[index] = dev_ifb; 255 + } 256 + 257 + return err; 258 + } 259 + 260 + static void ifb_free_one(int index) 261 + { 262 + unregister_netdev(ifbs[index]); 263 + free_netdev(ifbs[index]); 264 + } 265 + 266 + static int __init ifb_init_module(void) 267 + { 268 + int i, err = 0; 269 + ifbs = kmalloc(numifbs * sizeof(void *), GFP_KERNEL); 270 + if (!ifbs) 271 + return -ENOMEM; 272 + for (i = 0; i < numifbs && !err; i++) 273 + err = ifb_init_one(i); 274 + if (err) { 275 + while (--i >= 0) 276 + ifb_free_one(i); 277 + } 278 + 279 + return err; 280 + } 281 + 282 + static void __exit ifb_cleanup_module(void) 283 + { 284 + int i; 285 + 286 + for (i = 0; i < numifbs; i++) 287 + ifb_free_one(i); 288 + kfree(ifbs); 289 + } 290 + 291 + module_init(ifb_init_module); 292 + module_exit(ifb_cleanup_module); 293 + MODULE_LICENSE("GPL"); 294 + MODULE_AUTHOR("Jamal Hadi Salim");
+5
include/linux/crypto.h
··· 3 * 4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 5 * Copyright (c) 2002 David S. Miller (davem@redhat.com) 6 * 7 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> 8 * and Nettle, by Niels M�ller. ··· 127 unsigned int cra_blocksize; 128 unsigned int cra_ctxsize; 129 unsigned int cra_alignmask; 130 const char cra_name[CRYPTO_MAX_ALG_NAME]; 131 132 union { 133 struct cipher_alg cipher;
··· 3 * 4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 5 * Copyright (c) 2002 David S. Miller (davem@redhat.com) 6 + * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> 7 * 8 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> 9 * and Nettle, by Niels M�ller. ··· 126 unsigned int cra_blocksize; 127 unsigned int cra_ctxsize; 128 unsigned int cra_alignmask; 129 + 130 + int cra_priority; 131 + 132 const char cra_name[CRYPTO_MAX_ALG_NAME]; 133 + const char cra_driver_name[CRYPTO_MAX_ALG_NAME]; 134 135 union { 136 struct cipher_alg cipher;
+1 -1
include/net/act_api.h
··· 63 __u32 type; /* TBD to match kind */ 64 __u32 capab; /* capabilities includes 4 bit version */ 65 struct module *owner; 66 - int (*act)(struct sk_buff **, struct tc_action *, struct tcf_result *); 67 int (*get_stats)(struct sk_buff *, struct tc_action *); 68 int (*dump)(struct sk_buff *, struct tc_action *,int , int); 69 int (*cleanup)(struct tc_action *, int bind);
··· 63 __u32 type; /* TBD to match kind */ 64 __u32 capab; /* capabilities includes 4 bit version */ 65 struct module *owner; 66 + int (*act)(struct sk_buff *, struct tc_action *, struct tcf_result *); 67 int (*get_stats)(struct sk_buff *, struct tc_action *); 68 int (*dump)(struct sk_buff *, struct tc_action *,int , int); 69 int (*cleanup)(struct tc_action *, int bind);
+12 -11
include/net/pkt_sched.h
··· 1 #ifndef __NET_PKT_SCHED_H 2 #define __NET_PKT_SCHED_H 3 4 #include <net/sch_generic.h> 5 6 struct qdisc_walker ··· 60 typedef long psched_tdiff_t; 61 62 #define PSCHED_GET_TIME(stamp) do_gettimeofday(&(stamp)) 63 - #define PSCHED_US2JIFFIE(usecs) (((usecs)+(1000000/HZ-1))/(1000000/HZ)) 64 - #define PSCHED_JIFFIE2US(delay) ((delay)*(1000000/HZ)) 65 66 #else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */ 67 ··· 124 default: \ 125 __delta = 0; \ 126 case 2: \ 127 - __delta += 1000000; \ 128 case 1: \ 129 - __delta += 1000000; \ 130 } \ 131 } \ 132 __delta; \ ··· 137 { 138 int delta; 139 140 - if (bound <= 1000000 || delta_sec > (0x7FFFFFFF/1000000)-1) 141 return bound; 142 - delta = delta_sec * 1000000; 143 if (delta > bound || delta < 0) 144 delta = bound; 145 return delta; ··· 153 default: \ 154 __delta = psched_tod_diff(__delta_sec, bound); break; \ 155 case 2: \ 156 - __delta += 1000000; \ 157 case 1: \ 158 - __delta += 1000000; \ 159 case 0: \ 160 if (__delta > bound || __delta < 0) \ 161 __delta = bound; \ ··· 171 ({ \ 172 int __delta = (tv).tv_usec + (delta); \ 173 (tv_res).tv_sec = (tv).tv_sec; \ 174 - if (__delta > 1000000) { (tv_res).tv_sec++; __delta -= 1000000; } \ 175 (tv_res).tv_usec = __delta; \ 176 }) 177 178 #define PSCHED_TADD(tv, delta) \ 179 ({ \ 180 (tv).tv_usec += (delta); \ 181 - if ((tv).tv_usec > 1000000) { (tv).tv_sec++; \ 182 - (tv).tv_usec -= 1000000; } \ 183 }) 184 185 /* Set/check that time is in the "past perfect";
··· 1 #ifndef __NET_PKT_SCHED_H 2 #define __NET_PKT_SCHED_H 3 4 + #include <linux/jiffies.h> 5 #include <net/sch_generic.h> 6 7 struct qdisc_walker ··· 59 typedef long psched_tdiff_t; 60 61 #define PSCHED_GET_TIME(stamp) do_gettimeofday(&(stamp)) 62 + #define PSCHED_US2JIFFIE(usecs) usecs_to_jiffies(usecs) 63 + #define PSCHED_JIFFIE2US(delay) jiffies_to_usecs(delay) 64 65 #else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */ 66 ··· 123 default: \ 124 __delta = 0; \ 125 case 2: \ 126 + __delta += USEC_PER_SEC; \ 127 case 1: \ 128 + __delta += USEC_PER_SEC; \ 129 } \ 130 } \ 131 __delta; \ ··· 136 { 137 int delta; 138 139 + if (bound <= USEC_PER_SEC || delta_sec > (0x7FFFFFFF/USEC_PER_SEC)-1) 140 return bound; 141 + delta = delta_sec * USEC_PER_SEC; 142 if (delta > bound || delta < 0) 143 delta = bound; 144 return delta; ··· 152 default: \ 153 __delta = psched_tod_diff(__delta_sec, bound); break; \ 154 case 2: \ 155 + __delta += USEC_PER_SEC; \ 156 case 1: \ 157 + __delta += USEC_PER_SEC; \ 158 case 0: \ 159 if (__delta > bound || __delta < 0) \ 160 __delta = bound; \ ··· 170 ({ \ 171 int __delta = (tv).tv_usec + (delta); \ 172 (tv_res).tv_sec = (tv).tv_sec; \ 173 + if (__delta > USEC_PER_SEC) { (tv_res).tv_sec++; __delta -= USEC_PER_SEC; } \ 174 (tv_res).tv_usec = __delta; \ 175 }) 176 177 #define PSCHED_TADD(tv, delta) \ 178 ({ \ 179 (tv).tv_usec += (delta); \ 180 + if ((tv).tv_usec > USEC_PER_SEC) { (tv).tv_sec++; \ 181 + (tv).tv_usec -= USEC_PER_SEC; } \ 182 }) 183 184 /* Set/check that time is in the "past perfect";
+3 -6
net/core/dev.c
··· 1092 goto out; 1093 } 1094 1095 - if (offset > (int)skb->len) 1096 - BUG(); 1097 csum = skb_checksum(skb, offset, skb->len-offset, 0); 1098 1099 offset = skb->tail - skb->h.raw; 1100 - if (offset <= 0) 1101 - BUG(); 1102 - if (skb->csum + 2 > offset) 1103 - BUG(); 1104 1105 *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); 1106 skb->ip_summed = CHECKSUM_NONE;
··· 1092 goto out; 1093 } 1094 1095 + BUG_ON(offset > (int)skb->len); 1096 csum = skb_checksum(skb, offset, skb->len-offset, 0); 1097 1098 offset = skb->tail - skb->h.raw; 1099 + BUG_ON(offset <= 0); 1100 + BUG_ON(skb->csum + 2 > offset); 1101 1102 *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); 1103 skb->ip_summed = CHECKSUM_NONE;
+5 -10
net/core/skbuff.c
··· 791 int end = offset + skb_shinfo(skb)->frags[i].size; 792 if (end > len) { 793 if (skb_cloned(skb)) { 794 - if (!realloc) 795 - BUG(); 796 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 797 return -ENOMEM; 798 } ··· 893 struct sk_buff *insp = NULL; 894 895 do { 896 - if (!list) 897 - BUG(); 898 899 if (list->len <= eat) { 900 /* Eaten as whole. */ ··· 1197 start = end; 1198 } 1199 } 1200 - if (len) 1201 - BUG(); 1202 1203 return csum; 1204 } ··· 1279 start = end; 1280 } 1281 } 1282 - if (len) 1283 - BUG(); 1284 return csum; 1285 } 1286 ··· 1293 else 1294 csstart = skb_headlen(skb); 1295 1296 - if (csstart > skb_headlen(skb)) 1297 - BUG(); 1298 1299 memcpy(to, skb->data, csstart); 1300
··· 791 int end = offset + skb_shinfo(skb)->frags[i].size; 792 if (end > len) { 793 if (skb_cloned(skb)) { 794 + BUG_ON(!realloc); 795 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 796 return -ENOMEM; 797 } ··· 894 struct sk_buff *insp = NULL; 895 896 do { 897 + BUG_ON(!list); 898 899 if (list->len <= eat) { 900 /* Eaten as whole. */ ··· 1199 start = end; 1200 } 1201 } 1202 + BUG_ON(len); 1203 1204 return csum; 1205 } ··· 1282 start = end; 1283 } 1284 } 1285 + BUG_ON(len); 1286 return csum; 1287 } 1288 ··· 1297 else 1298 csstart = skb_headlen(skb); 1299 1300 + BUG_ON(csstart > skb_headlen(skb)); 1301 1302 memcpy(to, skb->data, csstart); 1303
+1 -2
net/ipv4/icmp.c
··· 899 u32 _mask, *mp; 900 901 mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask); 902 - if (mp == NULL) 903 - BUG(); 904 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { 905 if (*mp == ifa->ifa_mask && 906 inet_ifa_match(rt->rt_src, ifa))
··· 899 u32 _mask, *mp; 900 901 mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask); 902 + BUG_ON(mp == NULL); 903 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { 904 if (*mp == ifa->ifa_mask && 905 inet_ifa_match(rt->rt_src, ifa))
+160 -83
net/ipv4/inet_diag.c
··· 50 #define INET_DIAG_PUT(skb, attrtype, attrlen) \ 51 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) 52 53 - static int inet_diag_fill(struct sk_buff *skb, struct sock *sk, 54 - int ext, u32 pid, u32 seq, u16 nlmsg_flags, 55 - const struct nlmsghdr *unlh) 56 { 57 const struct inet_sock *inet = inet_sk(sk); 58 const struct inet_connection_sock *icsk = inet_csk(sk); ··· 71 nlh->nlmsg_flags = nlmsg_flags; 72 73 r = NLMSG_DATA(nlh); 74 - if (sk->sk_state != TCP_TIME_WAIT) { 75 - if (ext & (1 << (INET_DIAG_MEMINFO - 1))) 76 - minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, 77 - sizeof(*minfo)); 78 - if (ext & (1 << (INET_DIAG_INFO - 1))) 79 - info = INET_DIAG_PUT(skb, INET_DIAG_INFO, 80 - handler->idiag_info_size); 81 - 82 - if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) { 83 - size_t len = strlen(icsk->icsk_ca_ops->name); 84 - strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1), 85 - icsk->icsk_ca_ops->name); 86 - } 87 } 88 r->idiag_family = sk->sk_family; 89 r->idiag_state = sk->sk_state; 90 r->idiag_timer = 0; ··· 95 r->id.idiag_if = sk->sk_bound_dev_if; 96 r->id.idiag_cookie[0] = (u32)(unsigned long)sk; 97 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); 98 - 99 - if (r->idiag_state == TCP_TIME_WAIT) { 100 - const struct inet_timewait_sock *tw = inet_twsk(sk); 101 - long tmo = tw->tw_ttd - jiffies; 102 - if (tmo < 0) 103 - tmo = 0; 104 - 105 - r->id.idiag_sport = tw->tw_sport; 106 - r->id.idiag_dport = tw->tw_dport; 107 - r->id.idiag_src[0] = tw->tw_rcv_saddr; 108 - r->id.idiag_dst[0] = tw->tw_daddr; 109 - r->idiag_state = tw->tw_substate; 110 - r->idiag_timer = 3; 111 - r->idiag_expires = (tmo * 1000 + HZ - 1) / HZ; 112 - r->idiag_rqueue = 0; 113 - r->idiag_wqueue = 0; 114 - r->idiag_uid = 0; 115 - r->idiag_inode = 0; 116 - #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 117 - if (r->idiag_family == AF_INET6) { 118 - const struct inet6_timewait_sock *tw6 = inet6_twsk(sk); 119 - 120 - ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, 121 - &tw6->tw_v6_rcv_saddr); 122 - ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, 123 - &tw6->tw_v6_daddr); 124 - } 125 - #endif 126 - nlh->nlmsg_len = skb->tail - b; 127 - return skb->len; 128 - } 129 130 r->id.idiag_sport = inet->sport; 131 r->id.idiag_dport = inet->dport; ··· 157 return -1; 158 } 159 160 - static int inet_diag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nlh) 161 { 162 int err; 163 struct sock *sk; ··· 275 if (!rep) 276 goto out; 277 278 - if (inet_diag_fill(rep, sk, req->idiag_ext, 279 NETLINK_CB(in_skb).pid, 280 nlh->nlmsg_seq, 0, nlh) <= 0) 281 BUG(); ··· 323 324 325 static int inet_diag_bc_run(const void *bc, int len, 326 - const struct inet_diag_entry *entry) 327 { 328 while (len > 0) { 329 int yes = 1; ··· 362 yes = 0; 363 break; 364 } 365 - 366 if (cond->prefix_len == 0) 367 break; 368 ··· 371 else 372 addr = entry->daddr; 373 374 - if (bitstring_match(addr, cond->addr, cond->prefix_len)) 375 break; 376 if (entry->family == AF_INET6 && 377 cond->family == AF_INET) { ··· 387 } 388 } 389 390 - if (yes) { 391 len -= op->yes; 392 bc += op->yes; 393 } else { ··· 448 default: 449 return -EINVAL; 450 } 451 - bc += op->yes; 452 len -= op->yes; 453 } 454 return len == 0 ? 0 : -EINVAL; 455 } 456 457 - static int inet_diag_dump_sock(struct sk_buff *skb, struct sock *sk, 458 - struct netlink_callback *cb) 459 { 460 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 461 ··· 486 return 0; 487 } 488 489 - return inet_diag_fill(skb, sk, r->idiag_ext, NETLINK_CB(cb->skb).pid, 490 - cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 491 } 492 493 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, 494 - struct request_sock *req, 495 - u32 pid, u32 seq, 496 - const struct nlmsghdr *unlh) 497 { 498 const struct inet_request_sock *ireq = inet_rsk(req); 499 struct inet_sock *inet = inet_sk(sk); ··· 582 } 583 584 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, 585 - struct netlink_callback *cb) 586 { 587 struct inet_diag_entry entry; 588 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); ··· 634 inet6_rsk(req)->loc_addr.s6_addr32 : 635 #endif 636 &ireq->loc_addr; 637 - entry.daddr = 638 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 639 (entry.family == AF_INET6) ? 640 inet6_rsk(req)->rmt_addr.s6_addr32 : ··· 677 handler = inet_diag_table[cb->nlh->nlmsg_type]; 678 BUG_ON(handler == NULL); 679 hashinfo = handler->idiag_hashinfo; 680 - 681 s_i = cb->args[1]; 682 s_num = num = cb->args[2]; 683 ··· 708 cb->args[3] > 0) 709 goto syn_recv; 710 711 - if (inet_diag_dump_sock(skb, sk, cb) < 0) { 712 inet_listen_unlock(hashinfo); 713 goto done; 714 } ··· 750 s_num = 0; 751 752 read_lock_bh(&head->lock); 753 - 754 num = 0; 755 sk_for_each(sk, node, &head->chain) { 756 struct inet_sock *inet = inet_sk(sk); ··· 761 if (r->id.idiag_sport != inet->sport && 762 r->id.idiag_sport) 763 goto next_normal; 764 - if (r->id.idiag_dport != inet->dport && r->id.idiag_dport) 765 goto next_normal; 766 - if (inet_diag_dump_sock(skb, sk, cb) < 0) { 767 read_unlock_bh(&head->lock); 768 goto done; 769 } ··· 773 } 774 775 if (r->idiag_states & TCPF_TIME_WAIT) { 776 - sk_for_each(sk, node, 777 &hashinfo->ehash[i + hashinfo->ehash_size].chain) { 778 - struct inet_sock *inet = inet_sk(sk); 779 780 if (num < s_num) 781 goto next_dying; 782 - if (r->id.idiag_sport != inet->sport && 783 r->id.idiag_sport) 784 goto next_dying; 785 - if (r->id.idiag_dport != inet->dport && 786 r->id.idiag_dport) 787 goto next_dying; 788 - if (inet_diag_dump_sock(skb, sk, cb) < 0) { 789 read_unlock_bh(&head->lock); 790 goto done; 791 } ··· 803 return skb->len; 804 } 805 806 - static __inline__ int 807 - inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 808 { 809 if (!(nlh->nlmsg_flags&NLM_F_REQUEST)) 810 return 0; ··· 833 } 834 return netlink_dump_start(idiagnl, skb, nlh, 835 inet_diag_dump, NULL); 836 - } else { 837 return inet_diag_get_exact(skb, nlh); 838 - } 839 840 err_inval: 841 return -EINVAL; ··· 843 844 static inline void inet_diag_rcv_skb(struct sk_buff *skb) 845 { 846 - int err; 847 - struct nlmsghdr * nlh; 848 - 849 if (skb->len >= NLMSG_SPACE(0)) { 850 - nlh = (struct nlmsghdr *)skb->data; 851 - if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) 852 return; 853 err = inet_diag_rcv_msg(skb, nlh); 854 - if (err || nlh->nlmsg_flags & NLM_F_ACK) 855 netlink_ack(skb, nlh, err); 856 } 857 }
··· 50 #define INET_DIAG_PUT(skb, attrtype, attrlen) \ 51 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) 52 53 + static int inet_csk_diag_fill(struct sock *sk, 54 + struct sk_buff *skb, 55 + int ext, u32 pid, u32 seq, u16 nlmsg_flags, 56 + const struct nlmsghdr *unlh) 57 { 58 const struct inet_sock *inet = inet_sk(sk); 59 const struct inet_connection_sock *icsk = inet_csk(sk); ··· 70 nlh->nlmsg_flags = nlmsg_flags; 71 72 r = NLMSG_DATA(nlh); 73 + BUG_ON(sk->sk_state == TCP_TIME_WAIT); 74 + 75 + if (ext & (1 << (INET_DIAG_MEMINFO - 1))) 76 + minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo)); 77 + 78 + if (ext & (1 << (INET_DIAG_INFO - 1))) 79 + info = INET_DIAG_PUT(skb, INET_DIAG_INFO, 80 + handler->idiag_info_size); 81 + 82 + if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) { 83 + const size_t len = strlen(icsk->icsk_ca_ops->name); 84 + 85 + strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1), 86 + icsk->icsk_ca_ops->name); 87 } 88 + 89 r->idiag_family = sk->sk_family; 90 r->idiag_state = sk->sk_state; 91 r->idiag_timer = 0; ··· 92 r->id.idiag_if = sk->sk_bound_dev_if; 93 r->id.idiag_cookie[0] = (u32)(unsigned long)sk; 94 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); 95 96 r->id.idiag_sport = inet->sport; 97 r->id.idiag_dport = inet->dport; ··· 185 return -1; 186 } 187 188 + static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, 189 + struct sk_buff *skb, int ext, u32 pid, 190 + u32 seq, u16 nlmsg_flags, 191 + const struct nlmsghdr *unlh) 192 + { 193 + long tmo; 194 + struct inet_diag_msg *r; 195 + const unsigned char *previous_tail = skb->tail; 196 + struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq, 197 + unlh->nlmsg_type, sizeof(*r)); 198 + 199 + r = NLMSG_DATA(nlh); 200 + BUG_ON(tw->tw_state != TCP_TIME_WAIT); 201 + 202 + nlh->nlmsg_flags = nlmsg_flags; 203 + 204 + tmo = tw->tw_ttd - jiffies; 205 + if (tmo < 0) 206 + tmo = 0; 207 + 208 + r->idiag_family = tw->tw_family; 209 + r->idiag_state = tw->tw_state; 210 + r->idiag_timer = 0; 211 + r->idiag_retrans = 0; 212 + r->id.idiag_if = tw->tw_bound_dev_if; 213 + r->id.idiag_cookie[0] = (u32)(unsigned long)tw; 214 + r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1); 215 + r->id.idiag_sport = tw->tw_sport; 216 + r->id.idiag_dport = tw->tw_dport; 217 + r->id.idiag_src[0] = tw->tw_rcv_saddr; 218 + r->id.idiag_dst[0] = tw->tw_daddr; 219 + r->idiag_state = tw->tw_substate; 220 + r->idiag_timer = 3; 221 + r->idiag_expires = (tmo * 1000 + HZ - 1) / HZ; 222 + r->idiag_rqueue = 0; 223 + r->idiag_wqueue = 0; 224 + r->idiag_uid = 0; 225 + r->idiag_inode = 0; 226 + #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 227 + if (tw->tw_family == AF_INET6) { 228 + const struct inet6_timewait_sock *tw6 = 229 + inet6_twsk((struct sock *)tw); 230 + 231 + ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, 232 + &tw6->tw_v6_rcv_saddr); 233 + ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, 234 + &tw6->tw_v6_daddr); 235 + } 236 + #endif 237 + nlh->nlmsg_len = skb->tail - previous_tail; 238 + return skb->len; 239 + nlmsg_failure: 240 + skb_trim(skb, previous_tail - skb->data); 241 + return -1; 242 + } 243 + 244 + static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 245 + int ext, u32 pid, u32 seq, u16 nlmsg_flags, 246 + const struct nlmsghdr *unlh) 247 + { 248 + if (sk->sk_state == TCP_TIME_WAIT) 249 + return inet_twsk_diag_fill((struct inet_timewait_sock *)sk, 250 + skb, ext, pid, seq, nlmsg_flags, 251 + unlh); 252 + return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh); 253 + } 254 + 255 + static int inet_diag_get_exact(struct sk_buff *in_skb, 256 + const struct nlmsghdr *nlh) 257 { 258 int err; 259 struct sock *sk; ··· 235 if (!rep) 236 goto out; 237 238 + if (sk_diag_fill(sk, rep, req->idiag_ext, 239 NETLINK_CB(in_skb).pid, 240 nlh->nlmsg_seq, 0, nlh) <= 0) 241 BUG(); ··· 283 284 285 static int inet_diag_bc_run(const void *bc, int len, 286 + const struct inet_diag_entry *entry) 287 { 288 while (len > 0) { 289 int yes = 1; ··· 322 yes = 0; 323 break; 324 } 325 + 326 if (cond->prefix_len == 0) 327 break; 328 ··· 331 else 332 addr = entry->daddr; 333 334 + if (bitstring_match(addr, cond->addr, 335 + cond->prefix_len)) 336 break; 337 if (entry->family == AF_INET6 && 338 cond->family == AF_INET) { ··· 346 } 347 } 348 349 + if (yes) { 350 len -= op->yes; 351 bc += op->yes; 352 } else { ··· 407 default: 408 return -EINVAL; 409 } 410 + bc += op->yes; 411 len -= op->yes; 412 } 413 return len == 0 ? 0 : -EINVAL; 414 } 415 416 + static int inet_csk_diag_dump(struct sock *sk, 417 + struct sk_buff *skb, 418 + struct netlink_callback *cb) 419 { 420 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 421 ··· 444 return 0; 445 } 446 447 + return inet_csk_diag_fill(sk, skb, r->idiag_ext, 448 + NETLINK_CB(cb->skb).pid, 449 + cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 450 + } 451 + 452 + static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, 453 + struct sk_buff *skb, 454 + struct netlink_callback *cb) 455 + { 456 + struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 457 + 458 + if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 459 + struct inet_diag_entry entry; 460 + struct rtattr *bc = (struct rtattr *)(r + 1); 461 + 462 + entry.family = tw->tw_family; 463 + #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 464 + if (tw->tw_family == AF_INET6) { 465 + struct inet6_timewait_sock *tw6 = 466 + inet6_twsk((struct sock *)tw); 467 + entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32; 468 + entry.daddr = tw6->tw_v6_daddr.s6_addr32; 469 + } else 470 + #endif 471 + { 472 + entry.saddr = &tw->tw_rcv_saddr; 473 + entry.daddr = &tw->tw_daddr; 474 + } 475 + entry.sport = tw->tw_num; 476 + entry.dport = ntohs(tw->tw_dport); 477 + entry.userlocks = 0; 478 + 479 + if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) 480 + return 0; 481 + } 482 + 483 + return inet_twsk_diag_fill(tw, skb, r->idiag_ext, 484 + NETLINK_CB(cb->skb).pid, 485 + cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 486 } 487 488 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, 489 + struct request_sock *req, u32 pid, u32 seq, 490 + const struct nlmsghdr *unlh) 491 { 492 const struct inet_request_sock *ireq = inet_rsk(req); 493 struct inet_sock *inet = inet_sk(sk); ··· 504 } 505 506 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, 507 + struct netlink_callback *cb) 508 { 509 struct inet_diag_entry entry; 510 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); ··· 556 inet6_rsk(req)->loc_addr.s6_addr32 : 557 #endif 558 &ireq->loc_addr; 559 + entry.daddr = 560 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 561 (entry.family == AF_INET6) ? 562 inet6_rsk(req)->rmt_addr.s6_addr32 : ··· 599 handler = inet_diag_table[cb->nlh->nlmsg_type]; 600 BUG_ON(handler == NULL); 601 hashinfo = handler->idiag_hashinfo; 602 + 603 s_i = cb->args[1]; 604 s_num = num = cb->args[2]; 605 ··· 630 cb->args[3] > 0) 631 goto syn_recv; 632 633 + if (inet_csk_diag_dump(sk, skb, cb) < 0) { 634 inet_listen_unlock(hashinfo); 635 goto done; 636 } ··· 672 s_num = 0; 673 674 read_lock_bh(&head->lock); 675 num = 0; 676 sk_for_each(sk, node, &head->chain) { 677 struct inet_sock *inet = inet_sk(sk); ··· 684 if (r->id.idiag_sport != inet->sport && 685 r->id.idiag_sport) 686 goto next_normal; 687 + if (r->id.idiag_dport != inet->dport && 688 + r->id.idiag_dport) 689 goto next_normal; 690 + if (inet_csk_diag_dump(sk, skb, cb) < 0) { 691 read_unlock_bh(&head->lock); 692 goto done; 693 } ··· 695 } 696 697 if (r->idiag_states & TCPF_TIME_WAIT) { 698 + struct inet_timewait_sock *tw; 699 + 700 + inet_twsk_for_each(tw, node, 701 &hashinfo->ehash[i + hashinfo->ehash_size].chain) { 702 703 if (num < s_num) 704 goto next_dying; 705 + if (r->id.idiag_sport != tw->tw_sport && 706 r->id.idiag_sport) 707 goto next_dying; 708 + if (r->id.idiag_dport != tw->tw_dport && 709 r->id.idiag_dport) 710 goto next_dying; 711 + if (inet_twsk_diag_dump(tw, skb, cb) < 0) { 712 read_unlock_bh(&head->lock); 713 goto done; 714 } ··· 724 return skb->len; 725 } 726 727 + static inline int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 728 { 729 if (!(nlh->nlmsg_flags&NLM_F_REQUEST)) 730 return 0; ··· 755 } 756 return netlink_dump_start(idiagnl, skb, nlh, 757 inet_diag_dump, NULL); 758 + } else 759 return inet_diag_get_exact(skb, nlh); 760 761 err_inval: 762 return -EINVAL; ··· 766 767 static inline void inet_diag_rcv_skb(struct sk_buff *skb) 768 { 769 if (skb->len >= NLMSG_SPACE(0)) { 770 + int err; 771 + struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data; 772 + 773 + if (nlh->nlmsg_len < sizeof(*nlh) || 774 + skb->len < nlh->nlmsg_len) 775 return; 776 err = inet_diag_rcv_msg(skb, nlh); 777 + if (err || nlh->nlmsg_flags & NLM_F_ACK) 778 netlink_ack(skb, nlh, err); 779 } 780 }
+2 -4
net/ipv4/inetpeer.c
··· 304 /* look for a node to insert instead of p */ 305 struct inet_peer *t; 306 t = lookup_rightempty(p); 307 - if (*stackptr[-1] != t) 308 - BUG(); 309 **--stackptr = t->avl_left; 310 /* t is removed, t->v4daddr > x->v4daddr for any 311 * x in p->avl_left subtree. ··· 313 t->avl_left = p->avl_left; 314 t->avl_right = p->avl_right; 315 t->avl_height = p->avl_height; 316 - if (delp[1] != &p->avl_left) 317 - BUG(); 318 delp[1] = &t->avl_left; /* was &p->avl_left */ 319 } 320 peer_avl_rebalance(stack, stackptr);
··· 304 /* look for a node to insert instead of p */ 305 struct inet_peer *t; 306 t = lookup_rightempty(p); 307 + BUG_ON(*stackptr[-1] != t); 308 **--stackptr = t->avl_left; 309 /* t is removed, t->v4daddr > x->v4daddr for any 310 * x in p->avl_left subtree. ··· 314 t->avl_left = p->avl_left; 315 t->avl_right = p->avl_right; 316 t->avl_height = p->avl_height; 317 + BUG_ON(delp[1] != &p->avl_left); 318 delp[1] = &t->avl_left; /* was &p->avl_left */ 319 } 320 peer_avl_rebalance(stack, stackptr);
+15 -18
net/ipv4/ip_gre.c
··· 188 } 189 190 if (ipgre_fb_tunnel_dev->flags&IFF_UP) 191 - return ipgre_fb_tunnel_dev->priv; 192 return NULL; 193 } 194 ··· 278 return NULL; 279 280 dev->init = ipgre_tunnel_init; 281 - nt = dev->priv; 282 nt->parms = *parms; 283 284 if (register_netdevice(dev) < 0) { 285 free_netdev(dev); 286 goto failed; 287 } 288 - 289 - nt = dev->priv; 290 - nt->parms = *parms; 291 292 dev_hold(dev); 293 ipgre_tunnel_link(nt); ··· 296 297 static void ipgre_tunnel_uninit(struct net_device *dev) 298 { 299 - ipgre_tunnel_unlink((struct ip_tunnel*)dev->priv); 300 dev_put(dev); 301 } 302 ··· 515 skb2->dst->ops->update_pmtu(skb2->dst, rel_info); 516 rel_info = htonl(rel_info); 517 } else if (type == ICMP_TIME_EXCEEDED) { 518 - struct ip_tunnel *t = (struct ip_tunnel*)skb2->dev->priv; 519 if (t->parms.iph.ttl) { 520 rel_type = ICMP_DEST_UNREACH; 521 rel_code = ICMP_HOST_UNREACH; ··· 666 667 static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 668 { 669 - struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; 670 struct net_device_stats *stats = &tunnel->stat; 671 struct iphdr *old_iph = skb->nh.iph; 672 struct iphdr *tiph; ··· 912 t = ipgre_tunnel_locate(&p, 0); 913 } 914 if (t == NULL) 915 - t = (struct ip_tunnel*)dev->priv; 916 memcpy(&p, &t->parms, sizeof(p)); 917 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 918 err = -EFAULT; ··· 952 } else { 953 unsigned nflags=0; 954 955 - t = (struct ip_tunnel*)dev->priv; 956 957 if (MULTICAST(p.iph.daddr)) 958 nflags = IFF_BROADCAST; ··· 1001 if ((t = ipgre_tunnel_locate(&p, 0)) == NULL) 1002 goto done; 1003 err = -EPERM; 1004 - if (t == ipgre_fb_tunnel_dev->priv) 1005 goto done; 1006 dev = t->dev; 1007 } ··· 1018 1019 static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev) 1020 { 1021 - return &(((struct ip_tunnel*)dev->priv)->stat); 1022 } 1023 1024 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) 1025 { 1026 - struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; 1027 if (new_mtu < 68 || new_mtu > 0xFFF8 - tunnel->hlen) 1028 return -EINVAL; 1029 dev->mtu = new_mtu; ··· 1063 static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, 1064 void *daddr, void *saddr, unsigned len) 1065 { 1066 - struct ip_tunnel *t = (struct ip_tunnel*)dev->priv; 1067 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); 1068 u16 *p = (u16*)(iph+1); 1069 ··· 1090 1091 static int ipgre_open(struct net_device *dev) 1092 { 1093 - struct ip_tunnel *t = (struct ip_tunnel*)dev->priv; 1094 1095 if (MULTICAST(t->parms.iph.daddr)) { 1096 struct flowi fl = { .oif = t->parms.link, ··· 1114 1115 static int ipgre_close(struct net_device *dev) 1116 { 1117 - struct ip_tunnel *t = (struct ip_tunnel*)dev->priv; 1118 if (MULTICAST(t->parms.iph.daddr) && t->mlink) { 1119 struct in_device *in_dev = inetdev_by_index(t->mlink); 1120 if (in_dev) { ··· 1154 int mtu = ETH_DATA_LEN; 1155 int addend = sizeof(struct iphdr) + 4; 1156 1157 - tunnel = (struct ip_tunnel*)dev->priv; 1158 iph = &tunnel->parms.iph; 1159 1160 tunnel->dev = dev; ··· 1218 1219 static int __init ipgre_fb_tunnel_init(struct net_device *dev) 1220 { 1221 - struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; 1222 struct iphdr *iph = &tunnel->parms.iph; 1223 1224 tunnel->dev = dev;
··· 188 } 189 190 if (ipgre_fb_tunnel_dev->flags&IFF_UP) 191 + return netdev_priv(ipgre_fb_tunnel_dev); 192 return NULL; 193 } 194 ··· 278 return NULL; 279 280 dev->init = ipgre_tunnel_init; 281 + nt = netdev_priv(dev); 282 nt->parms = *parms; 283 284 if (register_netdevice(dev) < 0) { 285 free_netdev(dev); 286 goto failed; 287 } 288 289 dev_hold(dev); 290 ipgre_tunnel_link(nt); ··· 299 300 static void ipgre_tunnel_uninit(struct net_device *dev) 301 { 302 + ipgre_tunnel_unlink(netdev_priv(dev)); 303 dev_put(dev); 304 } 305 ··· 518 skb2->dst->ops->update_pmtu(skb2->dst, rel_info); 519 rel_info = htonl(rel_info); 520 } else if (type == ICMP_TIME_EXCEEDED) { 521 + struct ip_tunnel *t = netdev_priv(skb2->dev); 522 if (t->parms.iph.ttl) { 523 rel_type = ICMP_DEST_UNREACH; 524 rel_code = ICMP_HOST_UNREACH; ··· 669 670 static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 671 { 672 + struct ip_tunnel *tunnel = netdev_priv(dev); 673 struct net_device_stats *stats = &tunnel->stat; 674 struct iphdr *old_iph = skb->nh.iph; 675 struct iphdr *tiph; ··· 915 t = ipgre_tunnel_locate(&p, 0); 916 } 917 if (t == NULL) 918 + t = netdev_priv(dev); 919 memcpy(&p, &t->parms, sizeof(p)); 920 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 921 err = -EFAULT; ··· 955 } else { 956 unsigned nflags=0; 957 958 + t = netdev_priv(dev); 959 960 if (MULTICAST(p.iph.daddr)) 961 nflags = IFF_BROADCAST; ··· 1004 if ((t = ipgre_tunnel_locate(&p, 0)) == NULL) 1005 goto done; 1006 err = -EPERM; 1007 + if (t == netdev_priv(ipgre_fb_tunnel_dev)) 1008 goto done; 1009 dev = t->dev; 1010 } ··· 1021 1022 static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev) 1023 { 1024 + return &(((struct ip_tunnel*)netdev_priv(dev))->stat); 1025 } 1026 1027 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) 1028 { 1029 + struct ip_tunnel *tunnel = netdev_priv(dev); 1030 if (new_mtu < 68 || new_mtu > 0xFFF8 - tunnel->hlen) 1031 return -EINVAL; 1032 dev->mtu = new_mtu; ··· 1066 static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, 1067 void *daddr, void *saddr, unsigned len) 1068 { 1069 + struct ip_tunnel *t = netdev_priv(dev); 1070 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); 1071 u16 *p = (u16*)(iph+1); 1072 ··· 1093 1094 static int ipgre_open(struct net_device *dev) 1095 { 1096 + struct ip_tunnel *t = netdev_priv(dev); 1097 1098 if (MULTICAST(t->parms.iph.daddr)) { 1099 struct flowi fl = { .oif = t->parms.link, ··· 1117 1118 static int ipgre_close(struct net_device *dev) 1119 { 1120 + struct ip_tunnel *t = netdev_priv(dev); 1121 if (MULTICAST(t->parms.iph.daddr) && t->mlink) { 1122 struct in_device *in_dev = inetdev_by_index(t->mlink); 1123 if (in_dev) { ··· 1157 int mtu = ETH_DATA_LEN; 1158 int addend = sizeof(struct iphdr) + 4; 1159 1160 + tunnel = netdev_priv(dev); 1161 iph = &tunnel->parms.iph; 1162 1163 tunnel->dev = dev; ··· 1221 1222 static int __init ipgre_fb_tunnel_init(struct net_device *dev) 1223 { 1224 + struct ip_tunnel *tunnel = netdev_priv(dev); 1225 struct iphdr *iph = &tunnel->parms.iph; 1226 1227 tunnel->dev = dev;
+1
net/ipv4/ip_output.c
··· 69 #include <net/ip.h> 70 #include <net/protocol.h> 71 #include <net/route.h> 72 #include <linux/skbuff.h> 73 #include <net/sock.h> 74 #include <net/arp.h>
··· 69 #include <net/ip.h> 70 #include <net/protocol.h> 71 #include <net/route.h> 72 + #include <net/xfrm.h> 73 #include <linux/skbuff.h> 74 #include <net/sock.h> 75 #include <net/arp.h>
+9 -9
net/ipv4/ipip.c
··· 244 if (dev == NULL) 245 return NULL; 246 247 - nt = dev->priv; 248 SET_MODULE_OWNER(dev); 249 dev->init = ipip_tunnel_init; 250 nt->parms = *parms; ··· 269 tunnels_wc[0] = NULL; 270 write_unlock_bh(&ipip_lock); 271 } else 272 - ipip_tunnel_unlink((struct ip_tunnel*)dev->priv); 273 dev_put(dev); 274 } 275 ··· 443 skb2->dst->ops->update_pmtu(skb2->dst, rel_info); 444 rel_info = htonl(rel_info); 445 } else if (type == ICMP_TIME_EXCEEDED) { 446 - struct ip_tunnel *t = (struct ip_tunnel*)skb2->dev->priv; 447 if (t->parms.iph.ttl) { 448 rel_type = ICMP_DEST_UNREACH; 449 rel_code = ICMP_HOST_UNREACH; ··· 514 515 static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 516 { 517 - struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; 518 struct net_device_stats *stats = &tunnel->stat; 519 struct iphdr *tiph = &tunnel->parms.iph; 520 u8 tos = tunnel->parms.iph.tos; ··· 674 t = ipip_tunnel_locate(&p, 0); 675 } 676 if (t == NULL) 677 - t = (struct ip_tunnel*)dev->priv; 678 memcpy(&p, &t->parms, sizeof(p)); 679 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 680 err = -EFAULT; ··· 711 err = -EINVAL; 712 break; 713 } 714 - t = (struct ip_tunnel*)dev->priv; 715 ipip_tunnel_unlink(t); 716 t->parms.iph.saddr = p.iph.saddr; 717 t->parms.iph.daddr = p.iph.daddr; ··· 765 766 static struct net_device_stats *ipip_tunnel_get_stats(struct net_device *dev) 767 { 768 - return &(((struct ip_tunnel*)dev->priv)->stat); 769 } 770 771 static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu) ··· 800 struct ip_tunnel *tunnel; 801 struct iphdr *iph; 802 803 - tunnel = (struct ip_tunnel*)dev->priv; 804 iph = &tunnel->parms.iph; 805 806 tunnel->dev = dev; ··· 838 839 static int __init ipip_fb_tunnel_init(struct net_device *dev) 840 { 841 - struct ip_tunnel *tunnel = dev->priv; 842 struct iphdr *iph = &tunnel->parms.iph; 843 844 tunnel->dev = dev;
··· 244 if (dev == NULL) 245 return NULL; 246 247 + nt = netdev_priv(dev); 248 SET_MODULE_OWNER(dev); 249 dev->init = ipip_tunnel_init; 250 nt->parms = *parms; ··· 269 tunnels_wc[0] = NULL; 270 write_unlock_bh(&ipip_lock); 271 } else 272 + ipip_tunnel_unlink(netdev_priv(dev)); 273 dev_put(dev); 274 } 275 ··· 443 skb2->dst->ops->update_pmtu(skb2->dst, rel_info); 444 rel_info = htonl(rel_info); 445 } else if (type == ICMP_TIME_EXCEEDED) { 446 + struct ip_tunnel *t = netdev_priv(skb2->dev); 447 if (t->parms.iph.ttl) { 448 rel_type = ICMP_DEST_UNREACH; 449 rel_code = ICMP_HOST_UNREACH; ··· 514 515 static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 516 { 517 + struct ip_tunnel *tunnel = netdev_priv(dev); 518 struct net_device_stats *stats = &tunnel->stat; 519 struct iphdr *tiph = &tunnel->parms.iph; 520 u8 tos = tunnel->parms.iph.tos; ··· 674 t = ipip_tunnel_locate(&p, 0); 675 } 676 if (t == NULL) 677 + t = netdev_priv(dev); 678 memcpy(&p, &t->parms, sizeof(p)); 679 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 680 err = -EFAULT; ··· 711 err = -EINVAL; 712 break; 713 } 714 + t = netdev_priv(dev); 715 ipip_tunnel_unlink(t); 716 t->parms.iph.saddr = p.iph.saddr; 717 t->parms.iph.daddr = p.iph.daddr; ··· 765 766 static struct net_device_stats *ipip_tunnel_get_stats(struct net_device *dev) 767 { 768 + return &(((struct ip_tunnel*)netdev_priv(dev))->stat); 769 } 770 771 static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu) ··· 800 struct ip_tunnel *tunnel; 801 struct iphdr *iph; 802 803 + tunnel = netdev_priv(dev); 804 iph = &tunnel->parms.iph; 805 806 tunnel->dev = dev; ··· 838 839 static int __init ipip_fb_tunnel_init(struct net_device *dev) 840 { 841 + struct ip_tunnel *tunnel = netdev_priv(dev); 842 struct iphdr *iph = &tunnel->parms.iph; 843 844 tunnel->dev = dev;
+11 -11
net/ipv4/ipmr.c
··· 178 static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 179 { 180 read_lock(&mrt_lock); 181 - ((struct net_device_stats*)dev->priv)->tx_bytes += skb->len; 182 - ((struct net_device_stats*)dev->priv)->tx_packets++; 183 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT); 184 read_unlock(&mrt_lock); 185 kfree_skb(skb); ··· 188 189 static struct net_device_stats *reg_vif_get_stats(struct net_device *dev) 190 { 191 - return (struct net_device_stats*)dev->priv; 192 } 193 194 static void reg_vif_setup(struct net_device *dev) ··· 1149 if (vif->flags & VIFF_REGISTER) { 1150 vif->pkt_out++; 1151 vif->bytes_out+=skb->len; 1152 - ((struct net_device_stats*)vif->dev->priv)->tx_bytes += skb->len; 1153 - ((struct net_device_stats*)vif->dev->priv)->tx_packets++; 1154 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); 1155 kfree_skb(skb); 1156 return; ··· 1210 if (vif->flags & VIFF_TUNNEL) { 1211 ip_encap(skb, vif->local, vif->remote); 1212 /* FIXME: extra output firewall step used to be here. --RR */ 1213 - ((struct ip_tunnel *)vif->dev->priv)->stat.tx_packets++; 1214 - ((struct ip_tunnel *)vif->dev->priv)->stat.tx_bytes+=skb->len; 1215 } 1216 1217 IPCB(skb)->flags |= IPSKB_FORWARDED; ··· 1467 skb->pkt_type = PACKET_HOST; 1468 dst_release(skb->dst); 1469 skb->dst = NULL; 1470 - ((struct net_device_stats*)reg_dev->priv)->rx_bytes += skb->len; 1471 - ((struct net_device_stats*)reg_dev->priv)->rx_packets++; 1472 nf_reset(skb); 1473 netif_rx(skb); 1474 dev_put(reg_dev); ··· 1522 skb->ip_summed = 0; 1523 skb->pkt_type = PACKET_HOST; 1524 dst_release(skb->dst); 1525 - ((struct net_device_stats*)reg_dev->priv)->rx_bytes += skb->len; 1526 - ((struct net_device_stats*)reg_dev->priv)->rx_packets++; 1527 skb->dst = NULL; 1528 nf_reset(skb); 1529 netif_rx(skb);
··· 178 static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 179 { 180 read_lock(&mrt_lock); 181 + ((struct net_device_stats*)netdev_priv(dev))->tx_bytes += skb->len; 182 + ((struct net_device_stats*)netdev_priv(dev))->tx_packets++; 183 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT); 184 read_unlock(&mrt_lock); 185 kfree_skb(skb); ··· 188 189 static struct net_device_stats *reg_vif_get_stats(struct net_device *dev) 190 { 191 + return (struct net_device_stats*)netdev_priv(dev); 192 } 193 194 static void reg_vif_setup(struct net_device *dev) ··· 1149 if (vif->flags & VIFF_REGISTER) { 1150 vif->pkt_out++; 1151 vif->bytes_out+=skb->len; 1152 + ((struct net_device_stats*)netdev_priv(vif->dev))->tx_bytes += skb->len; 1153 + ((struct net_device_stats*)netdev_priv(vif->dev))->tx_packets++; 1154 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT); 1155 kfree_skb(skb); 1156 return; ··· 1210 if (vif->flags & VIFF_TUNNEL) { 1211 ip_encap(skb, vif->local, vif->remote); 1212 /* FIXME: extra output firewall step used to be here. --RR */ 1213 + ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_packets++; 1214 + ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_bytes+=skb->len; 1215 } 1216 1217 IPCB(skb)->flags |= IPSKB_FORWARDED; ··· 1467 skb->pkt_type = PACKET_HOST; 1468 dst_release(skb->dst); 1469 skb->dst = NULL; 1470 + ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len; 1471 + ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++; 1472 nf_reset(skb); 1473 netif_rx(skb); 1474 dev_put(reg_dev); ··· 1522 skb->ip_summed = 0; 1523 skb->pkt_type = PACKET_HOST; 1524 dst_release(skb->dst); 1525 + ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len; 1526 + ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++; 1527 skb->dst = NULL; 1528 nf_reset(skb); 1529 netif_rx(skb);
+1 -1
net/ipv4/tcp_input.c
··· 3347 int offset = start - TCP_SKB_CB(skb)->seq; 3348 int size = TCP_SKB_CB(skb)->end_seq - start; 3349 3350 - if (offset < 0) BUG(); 3351 if (size > 0) { 3352 size = min(copy, size); 3353 if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
··· 3347 int offset = start - TCP_SKB_CB(skb)->seq; 3348 int size = TCP_SKB_CB(skb)->end_seq - start; 3349 3350 + BUG_ON(offset < 0); 3351 if (size > 0) { 3352 size = min(copy, size); 3353 if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
+4
net/ipv6/ip6_output.c
··· 226 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); 227 ipv6_addr_copy(&hdr->daddr, first_hop); 228 229 mtu = dst_mtu(dst); 230 if ((skb->len <= mtu) || ipfragok) { 231 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); ··· 1183 hdr->nexthdr = proto; 1184 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); 1185 ipv6_addr_copy(&hdr->daddr, final_dst); 1186 1187 skb->dst = dst_clone(&rt->u.dst); 1188 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
··· 226 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); 227 ipv6_addr_copy(&hdr->daddr, first_hop); 228 229 + skb->priority = sk->sk_priority; 230 + 231 mtu = dst_mtu(dst); 232 if ((skb->len <= mtu) || ipfragok) { 233 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); ··· 1181 hdr->nexthdr = proto; 1182 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); 1183 ipv6_addr_copy(&hdr->daddr, final_dst); 1184 + 1185 + skb->priority = sk->sk_priority; 1186 1187 skb->dst = dst_clone(&rt->u.dst); 1188 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
+12 -12
net/ipv6/ip6_tunnel.c
··· 243 if (dev == NULL) 244 return -ENOMEM; 245 246 - t = dev->priv; 247 dev->init = ip6ip6_tnl_dev_init; 248 t->parms = *p; 249 ··· 308 static void 309 ip6ip6_tnl_dev_uninit(struct net_device *dev) 310 { 311 - struct ip6_tnl *t = dev->priv; 312 313 if (dev == ip6ip6_fb_tnl_dev) { 314 write_lock_bh(&ip6ip6_lock); ··· 623 static int 624 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 625 { 626 - struct ip6_tnl *t = (struct ip6_tnl *) dev->priv; 627 struct net_device_stats *stats = &t->stat; 628 struct ipv6hdr *ipv6h = skb->nh.ipv6h; 629 struct ipv6_txoptions *opt = NULL; ··· 933 break; 934 } 935 if ((err = ip6ip6_tnl_locate(&p, &t, 0)) == -ENODEV) 936 - t = (struct ip6_tnl *) dev->priv; 937 else if (err) 938 break; 939 } else 940 - t = (struct ip6_tnl *) dev->priv; 941 942 memcpy(&p, &t->parms, sizeof (p)); 943 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { ··· 955 break; 956 } 957 if (!create && dev != ip6ip6_fb_tnl_dev) { 958 - t = (struct ip6_tnl *) dev->priv; 959 } 960 if (!t && (err = ip6ip6_tnl_locate(&p, &t, create))) { 961 break; ··· 991 err = ip6ip6_tnl_locate(&p, &t, 0); 992 if (err) 993 break; 994 - if (t == ip6ip6_fb_tnl_dev->priv) { 995 err = -EPERM; 996 break; 997 } 998 } else { 999 - t = (struct ip6_tnl *) dev->priv; 1000 } 1001 err = unregister_netdevice(t->dev); 1002 break; ··· 1016 static struct net_device_stats * 1017 ip6ip6_tnl_get_stats(struct net_device *dev) 1018 { 1019 - return &(((struct ip6_tnl *) dev->priv)->stat); 1020 } 1021 1022 /** ··· 1073 static inline void 1074 ip6ip6_tnl_dev_init_gen(struct net_device *dev) 1075 { 1076 - struct ip6_tnl *t = (struct ip6_tnl *) dev->priv; 1077 t->fl.proto = IPPROTO_IPV6; 1078 t->dev = dev; 1079 strcpy(t->parms.name, dev->name); ··· 1087 static int 1088 ip6ip6_tnl_dev_init(struct net_device *dev) 1089 { 1090 - struct ip6_tnl *t = (struct ip6_tnl *) dev->priv; 1091 ip6ip6_tnl_dev_init_gen(dev); 1092 ip6ip6_tnl_link_config(t); 1093 return 0; ··· 1103 static int 1104 ip6ip6_fb_tnl_dev_init(struct net_device *dev) 1105 { 1106 - struct ip6_tnl *t = dev->priv; 1107 ip6ip6_tnl_dev_init_gen(dev); 1108 dev_hold(dev); 1109 tnls_wc[0] = t;
··· 243 if (dev == NULL) 244 return -ENOMEM; 245 246 + t = netdev_priv(dev); 247 dev->init = ip6ip6_tnl_dev_init; 248 t->parms = *p; 249 ··· 308 static void 309 ip6ip6_tnl_dev_uninit(struct net_device *dev) 310 { 311 + struct ip6_tnl *t = netdev_priv(dev); 312 313 if (dev == ip6ip6_fb_tnl_dev) { 314 write_lock_bh(&ip6ip6_lock); ··· 623 static int 624 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 625 { 626 + struct ip6_tnl *t = netdev_priv(dev); 627 struct net_device_stats *stats = &t->stat; 628 struct ipv6hdr *ipv6h = skb->nh.ipv6h; 629 struct ipv6_txoptions *opt = NULL; ··· 933 break; 934 } 935 if ((err = ip6ip6_tnl_locate(&p, &t, 0)) == -ENODEV) 936 + t = netdev_priv(dev); 937 else if (err) 938 break; 939 } else 940 + t = netdev_priv(dev); 941 942 memcpy(&p, &t->parms, sizeof (p)); 943 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { ··· 955 break; 956 } 957 if (!create && dev != ip6ip6_fb_tnl_dev) { 958 + t = netdev_priv(dev); 959 } 960 if (!t && (err = ip6ip6_tnl_locate(&p, &t, create))) { 961 break; ··· 991 err = ip6ip6_tnl_locate(&p, &t, 0); 992 if (err) 993 break; 994 + if (t == netdev_priv(ip6ip6_fb_tnl_dev)) { 995 err = -EPERM; 996 break; 997 } 998 } else { 999 + t = netdev_priv(dev); 1000 } 1001 err = unregister_netdevice(t->dev); 1002 break; ··· 1016 static struct net_device_stats * 1017 ip6ip6_tnl_get_stats(struct net_device *dev) 1018 { 1019 + return &(((struct ip6_tnl *)netdev_priv(dev))->stat); 1020 } 1021 1022 /** ··· 1073 static inline void 1074 ip6ip6_tnl_dev_init_gen(struct net_device *dev) 1075 { 1076 + struct ip6_tnl *t = netdev_priv(dev); 1077 t->fl.proto = IPPROTO_IPV6; 1078 t->dev = dev; 1079 strcpy(t->parms.name, dev->name); ··· 1087 static int 1088 ip6ip6_tnl_dev_init(struct net_device *dev) 1089 { 1090 + struct ip6_tnl *t = netdev_priv(dev); 1091 ip6ip6_tnl_dev_init_gen(dev); 1092 ip6ip6_tnl_link_config(t); 1093 return 0; ··· 1103 static int 1104 ip6ip6_fb_tnl_dev_init(struct net_device *dev) 1105 { 1106 + struct ip6_tnl *t = netdev_priv(dev); 1107 ip6ip6_tnl_dev_init_gen(dev); 1108 dev_hold(dev); 1109 tnls_wc[0] = t;
+10 -10
net/ipv6/sit.c
··· 184 if (dev == NULL) 185 return NULL; 186 187 - nt = dev->priv; 188 dev->init = ipip6_tunnel_init; 189 nt->parms = *parms; 190 ··· 210 write_unlock_bh(&ipip6_lock); 211 dev_put(dev); 212 } else { 213 - ipip6_tunnel_unlink((struct ip_tunnel*)dev->priv); 214 dev_put(dev); 215 } 216 } ··· 346 rt6i = rt6_lookup(&iph6->daddr, &iph6->saddr, NULL, 0); 347 348 if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) { 349 - struct ip_tunnel * t = (struct ip_tunnel*)rt6i->rt6i_dev->priv; 350 if (rel_type == ICMPV6_TIME_EXCEED && t->parms.iph.ttl) { 351 rel_type = ICMPV6_DEST_UNREACH; 352 rel_code = ICMPV6_ADDR_UNREACH; ··· 424 425 static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 426 { 427 - struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv; 428 struct net_device_stats *stats = &tunnel->stat; 429 struct iphdr *tiph = &tunnel->parms.iph; 430 struct ipv6hdr *iph6 = skb->nh.ipv6h; ··· 610 t = ipip6_tunnel_locate(&p, 0); 611 } 612 if (t == NULL) 613 - t = (struct ip_tunnel*)dev->priv; 614 memcpy(&p, &t->parms, sizeof(p)); 615 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 616 err = -EFAULT; ··· 647 err = -EINVAL; 648 break; 649 } 650 - t = (struct ip_tunnel*)dev->priv; 651 ipip6_tunnel_unlink(t); 652 t->parms.iph.saddr = p.iph.saddr; 653 t->parms.iph.daddr = p.iph.daddr; ··· 683 if ((t = ipip6_tunnel_locate(&p, 0)) == NULL) 684 goto done; 685 err = -EPERM; 686 - if (t == ipip6_fb_tunnel_dev->priv) 687 goto done; 688 dev = t->dev; 689 } ··· 700 701 static struct net_device_stats *ipip6_tunnel_get_stats(struct net_device *dev) 702 { 703 - return &(((struct ip_tunnel*)dev->priv)->stat); 704 } 705 706 static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) ··· 735 struct ip_tunnel *tunnel; 736 struct iphdr *iph; 737 738 - tunnel = (struct ip_tunnel*)dev->priv; 739 iph = &tunnel->parms.iph; 740 741 tunnel->dev = dev; ··· 775 776 static int __init ipip6_fb_tunnel_init(struct net_device *dev) 777 { 778 - struct ip_tunnel *tunnel = dev->priv; 779 struct iphdr *iph = &tunnel->parms.iph; 780 781 tunnel->dev = dev;
··· 184 if (dev == NULL) 185 return NULL; 186 187 + nt = netdev_priv(dev); 188 dev->init = ipip6_tunnel_init; 189 nt->parms = *parms; 190 ··· 210 write_unlock_bh(&ipip6_lock); 211 dev_put(dev); 212 } else { 213 + ipip6_tunnel_unlink(netdev_priv(dev)); 214 dev_put(dev); 215 } 216 } ··· 346 rt6i = rt6_lookup(&iph6->daddr, &iph6->saddr, NULL, 0); 347 348 if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) { 349 + struct ip_tunnel *t = netdev_priv(rt6i->rt6i_dev); 350 if (rel_type == ICMPV6_TIME_EXCEED && t->parms.iph.ttl) { 351 rel_type = ICMPV6_DEST_UNREACH; 352 rel_code = ICMPV6_ADDR_UNREACH; ··· 424 425 static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 426 { 427 + struct ip_tunnel *tunnel = netdev_priv(dev); 428 struct net_device_stats *stats = &tunnel->stat; 429 struct iphdr *tiph = &tunnel->parms.iph; 430 struct ipv6hdr *iph6 = skb->nh.ipv6h; ··· 610 t = ipip6_tunnel_locate(&p, 0); 611 } 612 if (t == NULL) 613 + t = netdev_priv(dev); 614 memcpy(&p, &t->parms, sizeof(p)); 615 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 616 err = -EFAULT; ··· 647 err = -EINVAL; 648 break; 649 } 650 + t = netdev_priv(dev); 651 ipip6_tunnel_unlink(t); 652 t->parms.iph.saddr = p.iph.saddr; 653 t->parms.iph.daddr = p.iph.daddr; ··· 683 if ((t = ipip6_tunnel_locate(&p, 0)) == NULL) 684 goto done; 685 err = -EPERM; 686 + if (t == netdev_priv(ipip6_fb_tunnel_dev)) 687 goto done; 688 dev = t->dev; 689 } ··· 700 701 static struct net_device_stats *ipip6_tunnel_get_stats(struct net_device *dev) 702 { 703 + return &(((struct ip_tunnel*)netdev_priv(dev))->stat); 704 } 705 706 static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) ··· 735 struct ip_tunnel *tunnel; 736 struct iphdr *iph; 737 738 + tunnel = netdev_priv(dev); 739 iph = &tunnel->parms.iph; 740 741 tunnel->dev = dev; ··· 775 776 static int __init ipip6_fb_tunnel_init(struct net_device *dev) 777 { 778 + struct ip_tunnel *tunnel = netdev_priv(dev); 779 struct iphdr *iph = &tunnel->parms.iph; 780 781 tunnel->dev = dev;
+1 -2
net/key/af_key.c
··· 297 err = EINTR; 298 if (err >= 512) 299 err = EINVAL; 300 - if (err <= 0 || err >= 256) 301 - BUG(); 302 303 hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); 304 pfkey_hdr_dup(hdr, orig);
··· 297 err = EINTR; 298 if (err >= 512) 299 err = EINVAL; 300 + BUG_ON(err <= 0 || err >= 256); 301 302 hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); 303 pfkey_hdr_dup(hdr, orig);
+7 -7
net/sched/Makefile
··· 7 obj-$(CONFIG_NET_SCHED) += sch_api.o sch_fifo.o sch_blackhole.o 8 obj-$(CONFIG_NET_CLS) += cls_api.o 9 obj-$(CONFIG_NET_CLS_ACT) += act_api.o 10 - obj-$(CONFIG_NET_ACT_POLICE) += police.o 11 - obj-$(CONFIG_NET_CLS_POLICE) += police.o 12 - obj-$(CONFIG_NET_ACT_GACT) += gact.o 13 - obj-$(CONFIG_NET_ACT_MIRRED) += mirred.o 14 - obj-$(CONFIG_NET_ACT_IPT) += ipt.o 15 - obj-$(CONFIG_NET_ACT_PEDIT) += pedit.o 16 - obj-$(CONFIG_NET_ACT_SIMP) += simple.o 17 obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o 18 obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o 19 obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o
··· 7 obj-$(CONFIG_NET_SCHED) += sch_api.o sch_fifo.o sch_blackhole.o 8 obj-$(CONFIG_NET_CLS) += cls_api.o 9 obj-$(CONFIG_NET_CLS_ACT) += act_api.o 10 + obj-$(CONFIG_NET_ACT_POLICE) += act_police.o 11 + obj-$(CONFIG_NET_CLS_POLICE) += act_police.o 12 + obj-$(CONFIG_NET_ACT_GACT) += act_gact.o 13 + obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o 14 + obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o 15 + obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o 16 + obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o 17 obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o 18 obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o 19 obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o
+2 -2
net/sched/act_api.c
··· 165 while ((a = act) != NULL) { 166 repeat: 167 if (a->ops && a->ops->act) { 168 - ret = a->ops->act(&skb, a, res); 169 if (TC_MUNGED & skb->tc_verd) { 170 /* copied already, allow trampling */ 171 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); ··· 290 if (a_o == NULL) { 291 #ifdef CONFIG_KMOD 292 rtnl_unlock(); 293 - request_module(act_name); 294 rtnl_lock(); 295 296 a_o = tc_lookup_action_n(act_name);
··· 165 while ((a = act) != NULL) { 166 repeat: 167 if (a->ops && a->ops->act) { 168 + ret = a->ops->act(skb, a, res); 169 if (TC_MUNGED & skb->tc_verd) { 170 /* copied already, allow trampling */ 171 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); ··· 290 if (a_o == NULL) { 291 #ifdef CONFIG_KMOD 292 rtnl_unlock(); 293 + request_module("act_%s", act_name); 294 rtnl_lock(); 295 296 a_o = tc_lookup_action_n(act_name);
+1 -2
net/sched/gact.c net/sched/act_gact.c
··· 135 } 136 137 static int 138 - tcf_gact(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) 139 { 140 struct tcf_gact *p = PRIV(a, gact); 141 - struct sk_buff *skb = *pskb; 142 int action = TC_ACT_SHOT; 143 144 spin_lock(&p->lock);
··· 135 } 136 137 static int 138 + tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 139 { 140 struct tcf_gact *p = PRIV(a, gact); 141 int action = TC_ACT_SHOT; 142 143 spin_lock(&p->lock);
+4 -2
net/sched/ipt.c net/sched/act_ipt.c
··· 201 } 202 203 static int 204 - tcf_ipt(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) 205 { 206 int ret = 0, result = 0; 207 struct tcf_ipt *p = PRIV(a, ipt); 208 - struct sk_buff *skb = *pskb; 209 210 if (skb_cloned(skb)) { 211 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) ··· 221 worry later - danger - this API seems to have changed 222 from earlier kernels */ 223 224 ret = p->t->u.kernel.target->target(&skb, skb->dev, NULL, 225 p->hook, p->t->data, NULL); 226 switch (ret) {
··· 201 } 202 203 static int 204 + tcf_ipt(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 205 { 206 int ret = 0, result = 0; 207 struct tcf_ipt *p = PRIV(a, ipt); 208 209 if (skb_cloned(skb)) { 210 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) ··· 222 worry later - danger - this API seems to have changed 223 from earlier kernels */ 224 225 + /* iptables targets take a double skb pointer in case the skb 226 + * needs to be replaced. We don't own the skb, so this must not 227 + * happen. The pskb_expand_head above should make sure of this */ 228 ret = p->t->u.kernel.target->target(&skb, skb->dev, NULL, 229 p->hook, p->t->data, NULL); 230 switch (ret) {
+1 -2
net/sched/mirred.c net/sched/act_mirred.c
··· 158 } 159 160 static int 161 - tcf_mirred(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) 162 { 163 struct tcf_mirred *p = PRIV(a, mirred); 164 struct net_device *dev; 165 struct sk_buff *skb2 = NULL; 166 - struct sk_buff *skb = *pskb; 167 u32 at = G_TC_AT(skb->tc_verd); 168 169 spin_lock(&p->lock);
··· 158 } 159 160 static int 161 + tcf_mirred(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 162 { 163 struct tcf_mirred *p = PRIV(a, mirred); 164 struct net_device *dev; 165 struct sk_buff *skb2 = NULL; 166 u32 at = G_TC_AT(skb->tc_verd); 167 168 spin_lock(&p->lock);
+3 -2
net/sched/pedit.c net/sched/act_pedit.c
··· 130 } 131 132 static int 133 - tcf_pedit(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) 134 { 135 struct tcf_pedit *p = PRIV(a, pedit); 136 - struct sk_buff *skb = *pskb; 137 int i, munged = 0; 138 u8 *pptr; 139 ··· 245 t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); 246 t.expires = jiffies_to_clock_t(p->tm.expires); 247 RTA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t); 248 return skb->len; 249 250 rtattr_failure: 251 skb_trim(skb, b - skb->data); 252 return -1; 253 } 254
··· 130 } 131 132 static int 133 + tcf_pedit(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 134 { 135 struct tcf_pedit *p = PRIV(a, pedit); 136 int i, munged = 0; 137 u8 *pptr; 138 ··· 246 t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); 247 t.expires = jiffies_to_clock_t(p->tm.expires); 248 RTA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t); 249 + kfree(opt); 250 return skb->len; 251 252 rtattr_failure: 253 skb_trim(skb, b - skb->data); 254 + kfree(opt); 255 return -1; 256 } 257
+4 -13
net/sched/police.c net/sched/act_police.c
··· 284 return 0; 285 } 286 287 - static int tcf_act_police(struct sk_buff **pskb, struct tc_action *a, 288 struct tcf_result *res) 289 { 290 psched_time_t now; 291 - struct sk_buff *skb = *pskb; 292 struct tcf_police *p = PRIV(a); 293 long toks; 294 long ptoks = 0; ··· 407 module_init(police_init_module); 408 module_exit(police_cleanup_module); 409 410 - #endif 411 412 struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est) 413 { ··· 544 spin_unlock(&p->lock); 545 return p->action; 546 } 547 548 int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p) 549 { ··· 601 return -1; 602 } 603 604 - 605 - EXPORT_SYMBOL(tcf_police); 606 - EXPORT_SYMBOL(tcf_police_destroy); 607 - EXPORT_SYMBOL(tcf_police_dump); 608 - EXPORT_SYMBOL(tcf_police_dump_stats); 609 - EXPORT_SYMBOL(tcf_police_hash); 610 - EXPORT_SYMBOL(tcf_police_ht); 611 - EXPORT_SYMBOL(tcf_police_locate); 612 - EXPORT_SYMBOL(tcf_police_lookup); 613 - EXPORT_SYMBOL(tcf_police_new_index);
··· 284 return 0; 285 } 286 287 + static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, 288 struct tcf_result *res) 289 { 290 psched_time_t now; 291 struct tcf_police *p = PRIV(a); 292 long toks; 293 long ptoks = 0; ··· 408 module_init(police_init_module); 409 module_exit(police_cleanup_module); 410 411 + #else /* CONFIG_NET_CLS_ACT */ 412 413 struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est) 414 { ··· 545 spin_unlock(&p->lock); 546 return p->action; 547 } 548 + EXPORT_SYMBOL(tcf_police); 549 550 int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p) 551 { ··· 601 return -1; 602 } 603 604 + #endif /* CONFIG_NET_CLS_ACT */
+2 -2
net/sched/sch_cbq.c
··· 257 (cl = cbq_class_lookup(q, prio)) != NULL) 258 return cl; 259 260 - *qerr = NET_XMIT_DROP; 261 for (;;) { 262 int result = 0; 263 defmap = head->defaults; ··· 413 q->rx_class = cl; 414 #endif 415 if (cl == NULL) { 416 - if (ret == NET_XMIT_DROP) 417 sch->qstats.drops++; 418 kfree_skb(skb); 419 return ret;
··· 257 (cl = cbq_class_lookup(q, prio)) != NULL) 258 return cl; 259 260 + *qerr = NET_XMIT_BYPASS; 261 for (;;) { 262 int result = 0; 263 defmap = head->defaults; ··· 413 q->rx_class = cl; 414 #endif 415 if (cl == NULL) { 416 + if (ret == NET_XMIT_BYPASS) 417 sch->qstats.drops++; 418 kfree_skb(skb); 419 return ret;
+6 -6
net/sched/sch_hfsc.c
··· 208 do { \ 209 struct timeval tv; \ 210 do_gettimeofday(&tv); \ 211 - (stamp) = 1000000ULL * tv.tv_sec + tv.tv_usec; \ 212 } while (0) 213 #endif 214 ··· 502 u64 dx; 503 504 dx = ((u64)d * PSCHED_JIFFIE2US(HZ)); 505 - dx += 1000000 - 1; 506 - do_div(dx, 1000000); 507 return dx; 508 } 509 ··· 523 { 524 u64 d; 525 526 - d = dx * 1000000; 527 do_div(d, PSCHED_JIFFIE2US(HZ)); 528 return (u32)d; 529 } ··· 1227 if (cl->level == 0) 1228 return cl; 1229 1230 - *qerr = NET_XMIT_DROP; 1231 tcf = q->root.filter_list; 1232 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 1233 #ifdef CONFIG_NET_CLS_ACT ··· 1643 1644 cl = hfsc_classify(skb, sch, &err); 1645 if (cl == NULL) { 1646 - if (err == NET_XMIT_DROP) 1647 sch->qstats.drops++; 1648 kfree_skb(skb); 1649 return err;
··· 208 do { \ 209 struct timeval tv; \ 210 do_gettimeofday(&tv); \ 211 + (stamp) = 1ULL * USEC_PER_SEC * tv.tv_sec + tv.tv_usec; \ 212 } while (0) 213 #endif 214 ··· 502 u64 dx; 503 504 dx = ((u64)d * PSCHED_JIFFIE2US(HZ)); 505 + dx += USEC_PER_SEC - 1; 506 + do_div(dx, USEC_PER_SEC); 507 return dx; 508 } 509 ··· 523 { 524 u64 d; 525 526 + d = dx * USEC_PER_SEC; 527 do_div(d, PSCHED_JIFFIE2US(HZ)); 528 return (u32)d; 529 } ··· 1227 if (cl->level == 0) 1228 return cl; 1229 1230 + *qerr = NET_XMIT_BYPASS; 1231 tcf = q->root.filter_list; 1232 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 1233 #ifdef CONFIG_NET_CLS_ACT ··· 1643 1644 cl = hfsc_classify(skb, sch, &err); 1645 if (cl == NULL) { 1646 + if (err == NET_XMIT_BYPASS) 1647 sch->qstats.drops++; 1648 kfree_skb(skb); 1649 return err;
+2 -2
net/sched/sch_htb.c
··· 321 if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0) 322 return cl; 323 324 - *qerr = NET_XMIT_DROP; 325 tcf = q->filter_list; 326 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 327 #ifdef CONFIG_NET_CLS_ACT ··· 724 } 725 #ifdef CONFIG_NET_CLS_ACT 726 } else if (!cl) { 727 - if (ret == NET_XMIT_DROP) 728 sch->qstats.drops++; 729 kfree_skb (skb); 730 return ret;
··· 321 if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0) 322 return cl; 323 324 + *qerr = NET_XMIT_BYPASS; 325 tcf = q->filter_list; 326 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 327 #ifdef CONFIG_NET_CLS_ACT ··· 724 } 725 #ifdef CONFIG_NET_CLS_ACT 726 } else if (!cl) { 727 + if (ret == NET_XMIT_BYPASS) 728 sch->qstats.drops++; 729 kfree_skb (skb); 730 return ret;
+4 -3
net/sched/sch_prio.c
··· 54 u32 band = skb->priority; 55 struct tcf_result res; 56 57 - *qerr = NET_XMIT_DROP; 58 if (TC_H_MAJ(skb->priority) != sch->handle) { 59 #ifdef CONFIG_NET_CLS_ACT 60 switch (tc_classify(skb, q->filter_list, &res)) { ··· 91 qdisc = prio_classify(skb, sch, &ret); 92 #ifdef CONFIG_NET_CLS_ACT 93 if (qdisc == NULL) { 94 - if (ret == NET_XMIT_DROP) 95 sch->qstats.drops++; 96 kfree_skb(skb); 97 return ret; ··· 119 qdisc = prio_classify(skb, sch, &ret); 120 #ifdef CONFIG_NET_CLS_ACT 121 if (qdisc == NULL) { 122 - if (ret == NET_XMIT_DROP) 123 sch->qstats.drops++; 124 kfree_skb(skb); 125 return ret;
··· 54 u32 band = skb->priority; 55 struct tcf_result res; 56 57 + *qerr = NET_XMIT_BYPASS; 58 if (TC_H_MAJ(skb->priority) != sch->handle) { 59 #ifdef CONFIG_NET_CLS_ACT 60 switch (tc_classify(skb, q->filter_list, &res)) { ··· 91 qdisc = prio_classify(skb, sch, &ret); 92 #ifdef CONFIG_NET_CLS_ACT 93 if (qdisc == NULL) { 94 + 95 + if (ret == NET_XMIT_BYPASS) 96 sch->qstats.drops++; 97 kfree_skb(skb); 98 return ret; ··· 118 qdisc = prio_classify(skb, sch, &ret); 119 #ifdef CONFIG_NET_CLS_ACT 120 if (qdisc == NULL) { 121 + if (ret == NET_XMIT_BYPASS) 122 sch->qstats.drops++; 123 kfree_skb(skb); 124 return ret;
+6 -6
net/sched/sch_teql.c
··· 274 275 static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) 276 { 277 - struct teql_master *master = (void*)dev->priv; 278 struct Qdisc *start, *q; 279 int busy; 280 int nores; ··· 350 static int teql_master_open(struct net_device *dev) 351 { 352 struct Qdisc * q; 353 - struct teql_master *m = (void*)dev->priv; 354 int mtu = 0xFFFE; 355 unsigned flags = IFF_NOARP|IFF_MULTICAST; 356 ··· 397 398 static struct net_device_stats *teql_master_stats(struct net_device *dev) 399 { 400 - struct teql_master *m = (void*)dev->priv; 401 return &m->stats; 402 } 403 404 static int teql_master_mtu(struct net_device *dev, int new_mtu) 405 { 406 - struct teql_master *m = (void*)dev->priv; 407 struct Qdisc *q; 408 409 if (new_mtu < 68) ··· 423 424 static __init void teql_master_setup(struct net_device *dev) 425 { 426 - struct teql_master *master = dev->priv; 427 struct Qdisc_ops *ops = &master->qops; 428 429 master->dev = dev; ··· 476 break; 477 } 478 479 - master = dev->priv; 480 481 strlcpy(master->qops.id, dev->name, IFNAMSIZ); 482 err = register_qdisc(&master->qops);
··· 274 275 static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) 276 { 277 + struct teql_master *master = netdev_priv(dev); 278 struct Qdisc *start, *q; 279 int busy; 280 int nores; ··· 350 static int teql_master_open(struct net_device *dev) 351 { 352 struct Qdisc * q; 353 + struct teql_master *m = netdev_priv(dev); 354 int mtu = 0xFFFE; 355 unsigned flags = IFF_NOARP|IFF_MULTICAST; 356 ··· 397 398 static struct net_device_stats *teql_master_stats(struct net_device *dev) 399 { 400 + struct teql_master *m = netdev_priv(dev); 401 return &m->stats; 402 } 403 404 static int teql_master_mtu(struct net_device *dev, int new_mtu) 405 { 406 + struct teql_master *m = netdev_priv(dev); 407 struct Qdisc *q; 408 409 if (new_mtu < 68) ··· 423 424 static __init void teql_master_setup(struct net_device *dev) 425 { 426 + struct teql_master *master = netdev_priv(dev); 427 struct Qdisc_ops *ops = &master->qops; 428 429 master->dev = dev; ··· 476 break; 477 } 478 479 + master = netdev_priv(dev); 480 481 strlcpy(master->qops.id, dev->name, IFNAMSIZ); 482 err = register_qdisc(&master->qops);
+1 -2
net/sched/simple.c net/sched/act_simple.c
··· 44 #include <net/pkt_act.h> 45 #include <net/act_generic.h> 46 47 - static int tcf_simp(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res) 48 { 49 - struct sk_buff *skb = *pskb; 50 struct tcf_defact *p = PRIV(a, defact); 51 52 spin_lock(&p->lock);
··· 44 #include <net/pkt_act.h> 45 #include <net/act_generic.h> 46 47 + static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 48 { 49 struct tcf_defact *p = PRIV(a, defact); 50 51 spin_lock(&p->lock);
+1 -2
net/sctp/sm_sideeffect.c
··· 1250 case SCTP_CMD_TIMER_START: 1251 timer = &asoc->timers[cmd->obj.to]; 1252 timeout = asoc->timeouts[cmd->obj.to]; 1253 - if (!timeout) 1254 - BUG(); 1255 1256 timer->expires = jiffies + timeout; 1257 sctp_association_hold(asoc);
··· 1250 case SCTP_CMD_TIMER_START: 1251 timer = &asoc->timers[cmd->obj.to]; 1252 timeout = asoc->timeouts[cmd->obj.to]; 1253 + BUG_ON(!timeout); 1254 1255 timer->expires = jiffies + timeout; 1256 sctp_association_hold(asoc);
+2 -3
net/sunrpc/cache.c
··· 575 if (rp->q.list.next == &cd->queue) { 576 spin_unlock(&queue_lock); 577 up(&queue_io_sem); 578 - if (rp->offset) 579 - BUG(); 580 return 0; 581 } 582 rq = container_of(rp->q.list.next, struct cache_request, q.list); 583 - if (rq->q.reader) BUG(); 584 if (rp->offset == 0) 585 rq->readers++; 586 spin_unlock(&queue_lock);
··· 575 if (rp->q.list.next == &cd->queue) { 576 spin_unlock(&queue_lock); 577 up(&queue_io_sem); 578 + BUG_ON(rp->offset); 579 return 0; 580 } 581 rq = container_of(rp->q.list.next, struct cache_request, q.list); 582 + BUG_ON(rq->q.reader); 583 if (rp->offset == 0) 584 rq->readers++; 585 spin_unlock(&queue_lock);
+1 -2
net/sunrpc/svc.c
··· 122 rqstp->rq_argused = 0; 123 rqstp->rq_resused = 0; 124 arghi = 0; 125 - if (pages > RPCSVC_MAXPAGES) 126 - BUG(); 127 while (pages) { 128 struct page *p = alloc_page(GFP_KERNEL); 129 if (!p)
··· 122 rqstp->rq_argused = 0; 123 rqstp->rq_resused = 0; 124 arghi = 0; 125 + BUG_ON(pages > RPCSVC_MAXPAGES); 126 while (pages) { 127 struct page *p = alloc_page(GFP_KERNEL); 128 if (!p)
+2 -4
net/xfrm/xfrm_algo.c
··· 540 start = end; 541 } 542 } 543 - if (len) 544 - BUG(); 545 } 546 EXPORT_SYMBOL_GPL(skb_icv_walk); 547 ··· 609 start = end; 610 } 611 } 612 - if (len) 613 - BUG(); 614 return elt; 615 } 616 EXPORT_SYMBOL_GPL(skb_to_sgvec);
··· 540 start = end; 541 } 542 } 543 + BUG_ON(len); 544 } 545 EXPORT_SYMBOL_GPL(skb_icv_walk); 546 ··· 610 start = end; 611 } 612 } 613 + BUG_ON(len); 614 return elt; 615 } 616 EXPORT_SYMBOL_GPL(skb_to_sgvec);
+2 -4
net/xfrm/xfrm_policy.c
··· 248 249 void __xfrm_policy_destroy(struct xfrm_policy *policy) 250 { 251 - if (!policy->dead) 252 - BUG(); 253 254 - if (policy->bundles) 255 - BUG(); 256 257 if (del_timer(&policy->timer)) 258 BUG();
··· 248 249 void __xfrm_policy_destroy(struct xfrm_policy *policy) 250 { 251 + BUG_ON(!policy->dead); 252 253 + BUG_ON(policy->bundles); 254 255 if (del_timer(&policy->timer)) 256 BUG();