Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/vxy/lksctp-dev

+190 -77
+1 -1
include/net/sctp/sm.h
··· 214 214 const struct sctp_chunk *); 215 215 struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *, 216 216 const struct sctp_chunk *); 217 - void sctp_init_cause(struct sctp_chunk *, __be16 cause, const void *, size_t); 217 + void sctp_init_cause(struct sctp_chunk *, __be16 cause, size_t); 218 218 struct sctp_chunk *sctp_make_abort(const struct sctp_association *, 219 219 const struct sctp_chunk *, 220 220 const size_t hint);
+1
include/net/sctp/structs.h
··· 726 726 struct iovec *data); 727 727 void sctp_chunk_free(struct sctp_chunk *); 728 728 void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); 729 + void *sctp_addto_param(struct sctp_chunk *, int len, const void *data); 729 730 struct sctp_chunk *sctp_chunkify(struct sk_buff *, 730 731 const struct sctp_association *, 731 732 struct sock *);
+1
include/net/sctp/ulpqueue.h
··· 83 83 /* Skip over an SSN. */ 84 84 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn); 85 85 86 + void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32); 86 87 #endif /* __sctp_ulpqueue_h__ */ 87 88 88 89
+6 -1
net/sctp/associola.c
··· 727 727 break; 728 728 729 729 case SCTP_TRANSPORT_DOWN: 730 - transport->state = SCTP_INACTIVE; 730 + /* if the transort was never confirmed, do not transition it 731 + * to inactive state. 732 + */ 733 + if (transport->state != SCTP_UNCONFIRMED) 734 + transport->state = SCTP_INACTIVE; 735 + 731 736 spc_state = SCTP_ADDR_UNREACHABLE; 732 737 break; 733 738
+7
net/sctp/outqueue.c
··· 421 421 */ 422 422 if ((fast_retransmit && (chunk->fast_retransmit > 0)) || 423 423 (!fast_retransmit && !chunk->tsn_gap_acked)) { 424 + /* If this chunk was sent less then 1 rto ago, do not 425 + * retransmit this chunk, but give the peer time 426 + * to acknowlege it. 427 + */ 428 + if ((jiffies - chunk->sent_at) < transport->rto) 429 + continue; 430 + 424 431 /* RFC 2960 6.2.1 Processing a Received SACK 425 432 * 426 433 * C) Any time a DATA chunk is marked for
+77 -35
net/sctp/sm_make_chunk.c
··· 110 110 * abort chunk. 111 111 */ 112 112 void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, 113 - const void *payload, size_t paylen) 113 + size_t paylen) 114 114 { 115 115 sctp_errhdr_t err; 116 116 __u16 len; ··· 120 120 len = sizeof(sctp_errhdr_t) + paylen; 121 121 err.length = htons(len); 122 122 chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); 123 - sctp_addto_chunk(chunk, paylen, payload); 124 123 } 125 124 126 125 /* 3.3.2 Initiation (INIT) (1) ··· 779 780 780 781 /* Put the tsn back into network byte order. */ 781 782 payload = htonl(tsn); 782 - sctp_init_cause(retval, SCTP_ERROR_NO_DATA, (const void *)&payload, 783 - sizeof(payload)); 783 + sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload)); 784 + sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload); 784 785 785 786 /* RFC 2960 6.4 Multi-homed SCTP Endpoints 786 787 * ··· 822 823 goto err_copy; 823 824 } 824 825 825 - sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); 826 + sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen); 827 + sctp_addto_chunk(retval, paylen, payload); 826 828 827 829 if (paylen) 828 830 kfree(payload); ··· 850 850 struct sctp_paramhdr phdr; 851 851 852 852 retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen 853 - + sizeof(sctp_chunkhdr_t)); 853 + + sizeof(sctp_paramhdr_t)); 854 854 if (!retval) 855 855 goto end; 856 856 857 - sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, payload, paylen); 857 + sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen 858 + + sizeof(sctp_paramhdr_t)); 858 859 859 860 phdr.type = htons(chunk->chunk_hdr->type); 860 861 phdr.length = chunk->chunk_hdr->length; 861 - sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &phdr); 862 + sctp_addto_chunk(retval, paylen, payload); 863 + sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr); 862 864 863 865 end: 864 866 return retval; ··· 957 955 if (!retval) 958 956 goto nodata; 959 957 960 - sctp_init_cause(retval, cause_code, payload, paylen); 958 + sctp_init_cause(retval, cause_code, paylen); 959 + sctp_addto_chunk(retval, paylen, payload); 961 960 962 961 nodata: 963 962 return retval; ··· 1131 1128 void *target; 1132 1129 void *padding; 1133 1130 int chunklen = ntohs(chunk->chunk_hdr->length); 1134 - int padlen = chunklen % 4; 1131 + int padlen = WORD_ROUND(chunklen) - chunklen; 1135 1132 1136 1133 padding = skb_put(chunk->skb, padlen); 1137 1134 target = skb_put(chunk->skb, len); ··· 1141 1138 1142 1139 /* Adjust the chunk length field. */ 1143 1140 chunk->chunk_hdr->length = htons(chunklen + padlen + len); 1141 + chunk->chunk_end = skb_tail_pointer(chunk->skb); 1142 + 1143 + return target; 1144 + } 1145 + 1146 + /* Append bytes to the end of a parameter. Will panic if chunk is not big 1147 + * enough. 1148 + */ 1149 + void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data) 1150 + { 1151 + void *target; 1152 + int chunklen = ntohs(chunk->chunk_hdr->length); 1153 + 1154 + target = skb_put(chunk->skb, len); 1155 + 1156 + memcpy(target, data, len); 1157 + 1158 + /* Adjust the chunk length field. */ 1159 + chunk->chunk_hdr->length = htons(chunklen + len); 1144 1160 chunk->chunk_end = skb_tail_pointer(chunk->skb); 1145 1161 1146 1162 return target; ··· 1196 1174 */ 1197 1175 void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) 1198 1176 { 1177 + struct sctp_datamsg *msg; 1178 + struct sctp_chunk *lchunk; 1179 + struct sctp_stream *stream; 1199 1180 __u16 ssn; 1200 1181 __u16 sid; 1201 1182 1202 1183 if (chunk->has_ssn) 1203 1184 return; 1204 1185 1205 - /* This is the last possible instant to assign a SSN. */ 1206 - if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { 1207 - ssn = 0; 1208 - } else { 1209 - sid = ntohs(chunk->subh.data_hdr->stream); 1210 - if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) 1211 - ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid); 1212 - else 1213 - ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid); 1214 - } 1186 + /* All fragments will be on the same stream */ 1187 + sid = ntohs(chunk->subh.data_hdr->stream); 1188 + stream = &chunk->asoc->ssnmap->out; 1215 1189 1216 - chunk->subh.data_hdr->ssn = htons(ssn); 1217 - chunk->has_ssn = 1; 1190 + /* Now assign the sequence number to the entire message. 1191 + * All fragments must have the same stream sequence number. 1192 + */ 1193 + msg = chunk->msg; 1194 + list_for_each_entry(lchunk, &msg->chunks, frag_list) { 1195 + if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { 1196 + ssn = 0; 1197 + } else { 1198 + if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) 1199 + ssn = sctp_ssn_next(stream, sid); 1200 + else 1201 + ssn = sctp_ssn_peek(stream, sid); 1202 + } 1203 + 1204 + lchunk->subh.data_hdr->ssn = htons(ssn); 1205 + lchunk->has_ssn = 1; 1206 + } 1218 1207 } 1219 1208 1220 1209 /* Helper function to assign a TSN if needed. This assumes that both ··· 1499 1466 __be32 n = htonl(usecs); 1500 1467 1501 1468 sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, 1502 - &n, sizeof(n)); 1469 + sizeof(n)); 1470 + sctp_addto_chunk(*errp, sizeof(n), &n); 1503 1471 *error = -SCTP_IERROR_STALE_COOKIE; 1504 1472 } else 1505 1473 *error = -SCTP_IERROR_NOMEM; ··· 1590 1556 report.num_missing = htonl(1); 1591 1557 report.type = paramtype; 1592 1558 sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, 1593 - &report, sizeof(report)); 1559 + sizeof(report)); 1560 + sctp_addto_chunk(*errp, sizeof(report), &report); 1594 1561 } 1595 1562 1596 1563 /* Stop processing this chunk. */ ··· 1609 1574 *errp = sctp_make_op_error_space(asoc, chunk, 0); 1610 1575 1611 1576 if (*errp) 1612 - sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, NULL, 0); 1577 + sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0); 1613 1578 1614 1579 /* Stop processing this chunk. */ 1615 1580 return 0; ··· 1630 1595 *errp = sctp_make_op_error_space(asoc, chunk, payload_len); 1631 1596 1632 1597 if (*errp) { 1633 - sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, error, 1634 - sizeof(error)); 1635 - sctp_addto_chunk(*errp, sizeof(sctp_paramhdr_t), param); 1598 + sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, 1599 + sizeof(error) + sizeof(sctp_paramhdr_t)); 1600 + sctp_addto_chunk(*errp, sizeof(error), error); 1601 + sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param); 1636 1602 } 1637 1603 1638 1604 return 0; ··· 1654 1618 if (!*errp) 1655 1619 *errp = sctp_make_op_error_space(asoc, chunk, len); 1656 1620 1657 - if (*errp) 1658 - sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, 1659 - param.v, len); 1621 + if (*errp) { 1622 + sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len); 1623 + sctp_addto_chunk(*errp, len, param.v); 1624 + } 1660 1625 1661 1626 /* Stop processing this chunk. */ 1662 1627 return 0; ··· 1709 1672 *errp = sctp_make_op_error_space(asoc, chunk, 1710 1673 ntohs(chunk->chunk_hdr->length)); 1711 1674 1712 - if (*errp) 1675 + if (*errp) { 1713 1676 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, 1714 - param.v, 1715 1677 WORD_ROUND(ntohs(param.p->length))); 1678 + sctp_addto_chunk(*errp, 1679 + WORD_ROUND(ntohs(param.p->length)), 1680 + param.v); 1681 + } 1716 1682 1717 1683 break; 1718 1684 case SCTP_PARAM_ACTION_SKIP: ··· 1730 1690 1731 1691 if (*errp) { 1732 1692 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, 1733 - param.v, 1734 1693 WORD_ROUND(ntohs(param.p->length))); 1694 + sctp_addto_chunk(*errp, 1695 + WORD_ROUND(ntohs(param.p->length)), 1696 + param.v); 1735 1697 } else { 1736 1698 /* If there is no memory for generating the ERROR 1737 1699 * report as specified, an ABORT will be triggered ··· 1833 1791 * VIOLATION error. We build the ERROR chunk here and let the normal 1834 1792 * error handling code build and send the packet. 1835 1793 */ 1836 - if (param.v < (void*)chunk->chunk_end - sizeof(sctp_paramhdr_t)) { 1794 + if (param.v != (void*)chunk->chunk_end) { 1837 1795 sctp_process_inv_paramlength(asoc, param.p, chunk, errp); 1838 1796 return 0; 1839 1797 }
+6 -2
net/sctp/sm_sideeffect.c
··· 1013 1013 break; 1014 1014 1015 1015 case SCTP_DISPOSITION_VIOLATION: 1016 - printk(KERN_ERR "sctp protocol violation state %d " 1017 - "chunkid %d\n", state, subtype.chunk); 1016 + if (net_ratelimit()) 1017 + printk(KERN_ERR "sctp protocol violation state %d " 1018 + "chunkid %d\n", state, subtype.chunk); 1018 1019 break; 1019 1020 1020 1021 case SCTP_DISPOSITION_NOT_IMPL: ··· 1130 1129 case SCTP_CMD_REPORT_FWDTSN: 1131 1130 /* Move the Cumulattive TSN Ack ahead. */ 1132 1131 sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); 1132 + 1133 + /* purge the fragmentation queue */ 1134 + sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32); 1133 1135 1134 1136 /* Abort any in progress partial delivery. */ 1135 1137 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+26 -25
net/sctp/sm_statefuns.c
··· 264 264 struct sctp_chunk *err_chunk; 265 265 struct sctp_packet *packet; 266 266 sctp_unrecognized_param_t *unk_param; 267 - struct sock *sk; 268 267 int len; 269 268 270 269 /* 6.10 Bundling ··· 282 283 * control endpoint, respond with an ABORT. 283 284 */ 284 285 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) 285 - return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 286 - 287 - sk = ep->base.sk; 288 - /* If the endpoint is not listening or if the number of associations 289 - * on the TCP-style socket exceed the max backlog, respond with an 290 - * ABORT. 291 - */ 292 - if (!sctp_sstate(sk, LISTENING) || 293 - (sctp_style(sk, TCP) && 294 - sk_acceptq_is_full(sk))) 295 286 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 296 287 297 288 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification ··· 579 590 struct sctp_ulpevent *ev, *ai_ev = NULL; 580 591 int error = 0; 581 592 struct sctp_chunk *err_chk_p; 593 + struct sock *sk; 582 594 583 595 /* If the packet is an OOTB packet which is temporarily on the 584 596 * control endpoint, respond with an ABORT. ··· 594 604 */ 595 605 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 596 606 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 607 + 608 + /* If the endpoint is not listening or if the number of associations 609 + * on the TCP-style socket exceed the max backlog, respond with an 610 + * ABORT. 611 + */ 612 + sk = ep->base.sk; 613 + if (!sctp_sstate(sk, LISTENING) || 614 + (sctp_style(sk, TCP) && sk_acceptq_is_full(sk))) 615 + return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 597 616 598 617 /* "Decode" the chunk. We have no optional parameters so we 599 618 * are in good shape. ··· 1031 1032 /* This should never happen, but lets log it if so. */ 1032 1033 if (unlikely(!link)) { 1033 1034 if (from_addr.sa.sa_family == AF_INET6) { 1034 - printk(KERN_WARNING 1035 - "%s association %p could not find address " 1036 - NIP6_FMT "\n", 1037 - __FUNCTION__, 1038 - asoc, 1039 - NIP6(from_addr.v6.sin6_addr)); 1035 + if (net_ratelimit()) 1036 + printk(KERN_WARNING 1037 + "%s association %p could not find address " 1038 + NIP6_FMT "\n", 1039 + __FUNCTION__, 1040 + asoc, 1041 + NIP6(from_addr.v6.sin6_addr)); 1040 1042 } else { 1041 - printk(KERN_WARNING 1042 - "%s association %p could not find address " 1043 - NIPQUAD_FMT "\n", 1044 - __FUNCTION__, 1045 - asoc, 1046 - NIPQUAD(from_addr.v4.sin_addr.s_addr)); 1043 + if (net_ratelimit()) 1044 + printk(KERN_WARNING 1045 + "%s association %p could not find address " 1046 + NIPQUAD_FMT "\n", 1047 + __FUNCTION__, 1048 + asoc, 1049 + NIPQUAD(from_addr.v4.sin_addr.s_addr)); 1047 1050 } 1048 1051 return SCTP_DISPOSITION_DISCARD; 1049 1052 } ··· 3363 3362 abort = sctp_make_abort(asoc, asconf_ack, 3364 3363 sizeof(sctp_errhdr_t)); 3365 3364 if (abort) { 3366 - sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, NULL, 0); 3365 + sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0); 3367 3366 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 3368 3367 SCTP_CHUNK(abort)); 3369 3368 } ··· 3393 3392 abort = sctp_make_abort(asoc, asconf_ack, 3394 3393 sizeof(sctp_errhdr_t)); 3395 3394 if (abort) { 3396 - sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, NULL, 0); 3395 + sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0); 3397 3396 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 3398 3397 SCTP_CHUNK(abort)); 3399 3398 }
+3
net/sctp/socket.c
··· 353 353 * The function sctp_get_port_local() does duplicate address 354 354 * detection. 355 355 */ 356 + addr->v4.sin_port = htons(snum); 356 357 if ((ret = sctp_get_port_local(sk, addr))) { 357 358 if (ret == (long) sk) { 358 359 /* This endpoint has a conflicting address. */ ··· 5203 5202 5204 5203 sctp_unhash_endpoint(ep); 5205 5204 sk->sk_state = SCTP_SS_CLOSED; 5205 + return 0; 5206 5206 } 5207 5207 5208 5208 /* Return if we are already listening. */ ··· 5251 5249 5252 5250 sctp_unhash_endpoint(ep); 5253 5251 sk->sk_state = SCTP_SS_CLOSED; 5252 + return 0; 5254 5253 } 5255 5254 5256 5255 if (sctp_sstate(sk, LISTENING))
+62 -13
net/sctp/ulpqueue.c
··· 659 659 return retval; 660 660 } 661 661 662 + /* 663 + * Flush out stale fragments from the reassembly queue when processing 664 + * a Forward TSN. 665 + * 666 + * RFC 3758, Section 3.6 667 + * 668 + * After receiving and processing a FORWARD TSN, the data receiver MUST 669 + * take cautions in updating its re-assembly queue. The receiver MUST 670 + * remove any partially reassembled message, which is still missing one 671 + * or more TSNs earlier than or equal to the new cumulative TSN point. 672 + * In the event that the receiver has invoked the partial delivery API, 673 + * a notification SHOULD also be generated to inform the upper layer API 674 + * that the message being partially delivered will NOT be completed. 675 + */ 676 + void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn) 677 + { 678 + struct sk_buff *pos, *tmp; 679 + struct sctp_ulpevent *event; 680 + __u32 tsn; 681 + 682 + if (skb_queue_empty(&ulpq->reasm)) 683 + return; 684 + 685 + skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { 686 + event = sctp_skb2event(pos); 687 + tsn = event->tsn; 688 + 689 + /* Since the entire message must be abandoned by the 690 + * sender (item A3 in Section 3.5, RFC 3758), we can 691 + * free all fragments on the list that are less then 692 + * or equal to ctsn_point 693 + */ 694 + if (TSN_lte(tsn, fwd_tsn)) { 695 + __skb_unlink(pos, &ulpq->reasm); 696 + sctp_ulpevent_free(event); 697 + } else 698 + break; 699 + } 700 + } 701 + 662 702 /* Helper function to gather skbs that have possibly become 663 703 * ordered by an an incoming chunk. 664 704 */ ··· 834 794 /* Helper function to gather skbs that have possibly become 835 795 * ordered by forward tsn skipping their dependencies. 836 796 */ 837 - static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) 797 + static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) 838 798 { 839 799 struct sk_buff *pos, *tmp; 840 800 struct sctp_ulpevent *cevent; ··· 853 813 csid = cevent->stream; 854 814 cssn = cevent->ssn; 855 815 856 - if (cssn != sctp_ssn_peek(in, csid)) 816 + /* Have we gone too far? */ 817 + if (csid > sid) 857 818 break; 858 819 859 - /* Found it, so mark in the ssnmap. */ 860 - sctp_ssn_next(in, csid); 820 + /* Have we not gone far enough? */ 821 + if (csid < sid) 822 + continue; 823 + 824 + /* see if this ssn has been marked by skipping */ 825 + if (!SSN_lt(cssn, sctp_ssn_peek(in, csid))) 826 + break; 861 827 862 828 __skb_unlink(pos, &ulpq->lobby); 863 - if (!event) { 829 + if (!event) 864 830 /* Create a temporary list to collect chunks on. */ 865 831 event = sctp_skb2event(pos); 866 - __skb_queue_tail(&temp, sctp_event2skb(event)); 867 - } else { 868 - /* Attach all gathered skbs to the event. */ 869 - __skb_queue_tail(&temp, pos); 870 - } 832 + 833 + /* Attach all gathered skbs to the event. */ 834 + __skb_queue_tail(&temp, pos); 871 835 } 872 836 873 837 /* Send event to the ULP. 'event' is the sctp_ulpevent for 874 838 * very first SKB on the 'temp' list. 875 839 */ 876 - if (event) 840 + if (event) { 841 + /* see if we have more ordered that we can deliver */ 842 + sctp_ulpq_retrieve_ordered(ulpq, event); 877 843 sctp_ulpq_tail_event(ulpq, event); 844 + } 878 845 } 879 846 880 - /* Skip over an SSN. */ 847 + /* Skip over an SSN. This is used during the processing of 848 + * Forwared TSN chunk to skip over the abandoned ordered data 849 + */ 881 850 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) 882 851 { 883 852 struct sctp_stream *in; ··· 904 855 /* Go find any other chunks that were waiting for 905 856 * ordering and deliver them if needed. 906 857 */ 907 - sctp_ulpq_reap_ordered(ulpq); 858 + sctp_ulpq_reap_ordered(ulpq, sid); 908 859 return; 909 860 } 910 861