Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-davem' of git://gitorious.org/linux-can/linux-can-next

+33 -57
+33 -57
net/can/gw.c
··· 444 444 return NOTIFY_DONE; 445 445 } 446 446 447 - static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj) 447 + static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, 448 + u32 pid, u32 seq, int flags) 448 449 { 449 450 struct cgw_frame_mod mb; 450 451 struct rtcanmsg *rtcan; 451 - struct nlmsghdr *nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*rtcan), 0); 452 + struct nlmsghdr *nlh; 453 + 454 + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags); 452 455 if (!nlh) 453 456 return -EMSGSIZE; 454 457 ··· 465 462 if (gwj->handled_frames) { 466 463 if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0) 467 464 goto cancel; 468 - else 469 - nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32)); 470 465 } 471 466 472 467 if (gwj->dropped_frames) { 473 468 if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0) 474 469 goto cancel; 475 - else 476 - nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32)); 477 470 } 478 471 479 472 /* check non default settings of attributes */ ··· 479 480 mb.modtype = gwj->mod.modtype.and; 480 481 if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0) 481 482 goto cancel; 482 - else 483 - nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb)); 484 483 } 485 484 486 485 if (gwj->mod.modtype.or) { ··· 486 489 mb.modtype = gwj->mod.modtype.or; 487 490 if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0) 488 491 goto cancel; 489 - else 490 - nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb)); 491 492 } 492 493 493 494 if (gwj->mod.modtype.xor) { ··· 493 498 mb.modtype = gwj->mod.modtype.xor; 494 499 if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0) 495 500 goto cancel; 496 - else 497 - nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb)); 498 501 } 499 502 500 503 if (gwj->mod.modtype.set) { ··· 500 507 mb.modtype = gwj->mod.modtype.set; 501 508 if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0) 502 509 goto cancel; 503 - else 504 - nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb)); 505 510 } 506 511 507 512 if (gwj->mod.csumfunc.crc8) { 508 513 if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN, 509 514 &gwj->mod.csum.crc8) < 0) 510 515 goto cancel; 511 - else 512 - nlh->nlmsg_len += NLA_HDRLEN + \ 513 - NLA_ALIGN(CGW_CS_CRC8_LEN); 514 516 } 515 517 516 518 if (gwj->mod.csumfunc.xor) { 517 519 if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN, 518 520 &gwj->mod.csum.xor) < 0) 519 521 goto cancel; 520 - else 521 - nlh->nlmsg_len += NLA_HDRLEN + \ 522 - NLA_ALIGN(CGW_CS_XOR_LEN); 523 522 } 524 523 525 524 if (gwj->gwtype == CGW_TYPE_CAN_CAN) { ··· 520 535 if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter), 521 536 &gwj->ccgw.filter) < 0) 522 537 goto cancel; 523 - else 524 - nlh->nlmsg_len += NLA_HDRLEN + 525 - NLA_ALIGN(sizeof(struct can_filter)); 526 538 } 527 539 528 540 if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0) 529 541 goto cancel; 530 - else 531 - nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32)); 532 542 533 543 if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0) 534 544 goto cancel; 535 - else 536 - nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32)); 537 545 } 538 546 539 - return skb->len; 547 + return nlmsg_end(skb, nlh); 540 548 541 549 cancel: 542 550 nlmsg_cancel(skb, nlh); ··· 549 571 if (idx < s_idx) 550 572 goto cont; 551 573 552 - if (cgw_put_job(skb, gwj) < 0) 574 + if (cgw_put_job(skb, gwj, RTM_NEWROUTE, NETLINK_CB(cb->skb).pid, 575 + cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0) 553 576 break; 554 577 cont: 555 578 idx++; ··· 561 582 562 583 return skb->len; 563 584 } 585 + 586 + static const struct nla_policy cgw_policy[CGW_MAX+1] = { 587 + [CGW_MOD_AND] = { .len = sizeof(struct cgw_frame_mod) }, 588 + [CGW_MOD_OR] = { .len = sizeof(struct cgw_frame_mod) }, 589 + [CGW_MOD_XOR] = { .len = sizeof(struct cgw_frame_mod) }, 590 + [CGW_MOD_SET] = { .len = sizeof(struct cgw_frame_mod) }, 591 + [CGW_CS_XOR] = { .len = sizeof(struct cgw_csum_xor) }, 592 + [CGW_CS_CRC8] = { .len = sizeof(struct cgw_csum_crc8) }, 593 + [CGW_SRC_IF] = { .type = NLA_U32 }, 594 + [CGW_DST_IF] = { .type = NLA_U32 }, 595 + [CGW_FILTER] = { .len = sizeof(struct can_filter) }, 596 + }; 564 597 565 598 /* check for common and gwtype specific attributes */ 566 599 static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, ··· 586 595 /* initialize modification & checksum data space */ 587 596 memset(mod, 0, sizeof(*mod)); 588 597 589 - err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, NULL); 598 + err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, 599 + cgw_policy); 590 600 if (err < 0) 591 601 return err; 592 602 593 603 /* check for AND/OR/XOR/SET modifications */ 594 604 595 - if (tb[CGW_MOD_AND] && 596 - nla_len(tb[CGW_MOD_AND]) == CGW_MODATTR_LEN) { 605 + if (tb[CGW_MOD_AND]) { 597 606 nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN); 598 607 599 608 canframecpy(&mod->modframe.and, &mb.cf); ··· 609 618 mod->modfunc[modidx++] = mod_and_data; 610 619 } 611 620 612 - if (tb[CGW_MOD_OR] && 613 - nla_len(tb[CGW_MOD_OR]) == CGW_MODATTR_LEN) { 621 + if (tb[CGW_MOD_OR]) { 614 622 nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN); 615 623 616 624 canframecpy(&mod->modframe.or, &mb.cf); ··· 625 635 mod->modfunc[modidx++] = mod_or_data; 626 636 } 627 637 628 - if (tb[CGW_MOD_XOR] && 629 - nla_len(tb[CGW_MOD_XOR]) == CGW_MODATTR_LEN) { 638 + if (tb[CGW_MOD_XOR]) { 630 639 nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN); 631 640 632 641 canframecpy(&mod->modframe.xor, &mb.cf); ··· 641 652 mod->modfunc[modidx++] = mod_xor_data; 642 653 } 643 654 644 - if (tb[CGW_MOD_SET] && 645 - nla_len(tb[CGW_MOD_SET]) == CGW_MODATTR_LEN) { 655 + if (tb[CGW_MOD_SET]) { 646 656 nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN); 647 657 648 658 canframecpy(&mod->modframe.set, &mb.cf); ··· 660 672 /* check for checksum operations after CAN frame modifications */ 661 673 if (modidx) { 662 674 663 - if (tb[CGW_CS_CRC8] && 664 - nla_len(tb[CGW_CS_CRC8]) == CGW_CS_CRC8_LEN) { 665 - 666 - struct cgw_csum_crc8 *c = (struct cgw_csum_crc8 *)\ 667 - nla_data(tb[CGW_CS_CRC8]); 675 + if (tb[CGW_CS_CRC8]) { 676 + struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]); 668 677 669 678 err = cgw_chk_csum_parms(c->from_idx, c->to_idx, 670 679 c->result_idx); ··· 684 699 mod->csumfunc.crc8 = cgw_csum_crc8_neg; 685 700 } 686 701 687 - if (tb[CGW_CS_XOR] && 688 - nla_len(tb[CGW_CS_XOR]) == CGW_CS_XOR_LEN) { 689 - 690 - struct cgw_csum_xor *c = (struct cgw_csum_xor *)\ 691 - nla_data(tb[CGW_CS_XOR]); 702 + if (tb[CGW_CS_XOR]) { 703 + struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]); 692 704 693 705 err = cgw_chk_csum_parms(c->from_idx, c->to_idx, 694 706 c->result_idx); ··· 717 735 memset(ccgw, 0, sizeof(*ccgw)); 718 736 719 737 /* check for can_filter in attributes */ 720 - if (tb[CGW_FILTER] && 721 - nla_len(tb[CGW_FILTER]) == sizeof(struct can_filter)) 738 + if (tb[CGW_FILTER]) 722 739 nla_memcpy(&ccgw->filter, tb[CGW_FILTER], 723 740 sizeof(struct can_filter)); 724 741 ··· 727 746 if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF]) 728 747 return err; 729 748 730 - if (nla_len(tb[CGW_SRC_IF]) == sizeof(u32)) 731 - nla_memcpy(&ccgw->src_idx, tb[CGW_SRC_IF], 732 - sizeof(u32)); 733 - 734 - if (nla_len(tb[CGW_DST_IF]) == sizeof(u32)) 735 - nla_memcpy(&ccgw->dst_idx, tb[CGW_DST_IF], 736 - sizeof(u32)); 749 + ccgw->src_idx = nla_get_u32(tb[CGW_SRC_IF]); 750 + ccgw->dst_idx = nla_get_u32(tb[CGW_DST_IF]); 737 751 738 752 /* both indices set to 0 for flushing all routing entries */ 739 753 if (!ccgw->src_idx && !ccgw->dst_idx)