Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rpc: handle rotated gss data for Windows interoperability

The data in Kerberos gss tokens can be rotated. But we were lazy and
rejected any nonzero rotation value. It wasn't necessary for the
implementations we were testing against at the time.

But it appears that Windows does use a nonzero value here.

So, implement rotation to bring ourselves into compliance with the spec
and to interoperate with Windows.

Signed-off-by: J. Bruce Fields <bfields@redhat.com>

+45 -16
+45 -16
net/sunrpc/auth_gss/gss_krb5_wrap.c
··· 381 381 } 382 382 383 383 /* 384 - * We cannot currently handle tokens with rotated data. We need a 385 - * generalized routine to rotate the data in place. It is anticipated 386 - * that we won't encounter rotated data in the general case. 384 + * We can shift data by up to LOCAL_BUF_LEN bytes in a pass. If we need 385 + * to do more than that, we shift repeatedly. Kevin Coffman reports 386 + * seeing 28 bytes as the value used by Microsoft clients and servers 387 + * with AES, so this constant is chosen to allow handling 28 in one pass 388 + * without using too much stack space. 389 + * 390 + * If that proves to a problem perhaps we could use a more clever 391 + * algorithm. 387 392 */ 388 - static u32 389 - rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc) 393 + #define LOCAL_BUF_LEN 32u 394 + 395 + static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift) 390 396 { 391 - unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN); 397 + char head[LOCAL_BUF_LEN]; 398 + char tmp[LOCAL_BUF_LEN]; 399 + unsigned int this_len, i; 392 400 393 - if (realrrc == 0) 394 - return 0; 401 + BUG_ON(shift > LOCAL_BUF_LEN); 395 402 396 - dprintk("%s: cannot process token with rotated data: " 397 - "rrc %u, realrrc %u\n", __func__, rrc, realrrc); 398 - return 1; 403 + read_bytes_from_xdr_buf(buf, 0, head, shift); 404 + for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) { 405 + this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift)); 406 + read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len); 407 + write_bytes_to_xdr_buf(buf, i, tmp, this_len); 408 + } 409 + write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift); 410 + } 411 + 412 + static void _rotate_left(struct xdr_buf *buf, unsigned int shift) 413 + { 414 + int shifted = 0; 415 + int this_shift; 416 + 417 + shift %= buf->len; 418 + while (shifted < shift) { 419 + this_shift = min(shift - shifted, LOCAL_BUF_LEN); 420 + rotate_buf_a_little(buf, this_shift); 421 + shifted += this_shift; 422 + } 423 + } 424 + 425 + static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift) 426 + { 427 + struct xdr_buf subbuf; 428 + 429 + xdr_buf_subsegment(buf, &subbuf, base, buf->len - base); 430 + _rotate_left(&subbuf, shift); 399 431 } 400 432 401 433 static u32 ··· 527 495 528 496 seqnum = be64_to_cpup((__be64 *)(ptr + 8)); 529 497 530 - if (rrc != 0) { 531 - err = rotate_left(kctx, offset, buf, rrc); 532 - if (err) 533 - return GSS_S_FAILURE; 534 - } 498 + if (rrc != 0) 499 + rotate_left(offset + 16, buf, rrc); 535 500 536 501 err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf, 537 502 &headskip, &tailskip);