Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/vxy/lksctp-dev

+190 -77
+1 -1
include/net/sctp/sm.h
··· 214 const struct sctp_chunk *); 215 struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *, 216 const struct sctp_chunk *); 217 - void sctp_init_cause(struct sctp_chunk *, __be16 cause, const void *, size_t); 218 struct sctp_chunk *sctp_make_abort(const struct sctp_association *, 219 const struct sctp_chunk *, 220 const size_t hint);
··· 214 const struct sctp_chunk *); 215 struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *, 216 const struct sctp_chunk *); 217 + void sctp_init_cause(struct sctp_chunk *, __be16 cause, size_t); 218 struct sctp_chunk *sctp_make_abort(const struct sctp_association *, 219 const struct sctp_chunk *, 220 const size_t hint);
+1
include/net/sctp/structs.h
··· 726 struct iovec *data); 727 void sctp_chunk_free(struct sctp_chunk *); 728 void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); 729 struct sctp_chunk *sctp_chunkify(struct sk_buff *, 730 const struct sctp_association *, 731 struct sock *);
··· 726 struct iovec *data); 727 void sctp_chunk_free(struct sctp_chunk *); 728 void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); 729 + void *sctp_addto_param(struct sctp_chunk *, int len, const void *data); 730 struct sctp_chunk *sctp_chunkify(struct sk_buff *, 731 const struct sctp_association *, 732 struct sock *);
+1
include/net/sctp/ulpqueue.h
··· 83 /* Skip over an SSN. */ 84 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn); 85 86 #endif /* __sctp_ulpqueue_h__ */ 87 88
··· 83 /* Skip over an SSN. */ 84 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn); 85 86 + void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32); 87 #endif /* __sctp_ulpqueue_h__ */ 88 89
+6 -1
net/sctp/associola.c
··· 727 break; 728 729 case SCTP_TRANSPORT_DOWN: 730 - transport->state = SCTP_INACTIVE; 731 spc_state = SCTP_ADDR_UNREACHABLE; 732 break; 733
··· 727 break; 728 729 case SCTP_TRANSPORT_DOWN: 730 + /* if the transort was never confirmed, do not transition it 731 + * to inactive state. 732 + */ 733 + if (transport->state != SCTP_UNCONFIRMED) 734 + transport->state = SCTP_INACTIVE; 735 + 736 spc_state = SCTP_ADDR_UNREACHABLE; 737 break; 738
+7
net/sctp/outqueue.c
··· 421 */ 422 if ((fast_retransmit && (chunk->fast_retransmit > 0)) || 423 (!fast_retransmit && !chunk->tsn_gap_acked)) { 424 /* RFC 2960 6.2.1 Processing a Received SACK 425 * 426 * C) Any time a DATA chunk is marked for
··· 421 */ 422 if ((fast_retransmit && (chunk->fast_retransmit > 0)) || 423 (!fast_retransmit && !chunk->tsn_gap_acked)) { 424 + /* If this chunk was sent less then 1 rto ago, do not 425 + * retransmit this chunk, but give the peer time 426 + * to acknowlege it. 427 + */ 428 + if ((jiffies - chunk->sent_at) < transport->rto) 429 + continue; 430 + 431 /* RFC 2960 6.2.1 Processing a Received SACK 432 * 433 * C) Any time a DATA chunk is marked for
+77 -35
net/sctp/sm_make_chunk.c
··· 110 * abort chunk. 111 */ 112 void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, 113 - const void *payload, size_t paylen) 114 { 115 sctp_errhdr_t err; 116 __u16 len; ··· 120 len = sizeof(sctp_errhdr_t) + paylen; 121 err.length = htons(len); 122 chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); 123 - sctp_addto_chunk(chunk, paylen, payload); 124 } 125 126 /* 3.3.2 Initiation (INIT) (1) ··· 779 780 /* Put the tsn back into network byte order. */ 781 payload = htonl(tsn); 782 - sctp_init_cause(retval, SCTP_ERROR_NO_DATA, (const void *)&payload, 783 - sizeof(payload)); 784 785 /* RFC 2960 6.4 Multi-homed SCTP Endpoints 786 * ··· 822 goto err_copy; 823 } 824 825 - sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); 826 827 if (paylen) 828 kfree(payload); ··· 850 struct sctp_paramhdr phdr; 851 852 retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen 853 - + sizeof(sctp_chunkhdr_t)); 854 if (!retval) 855 goto end; 856 857 - sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, payload, paylen); 858 859 phdr.type = htons(chunk->chunk_hdr->type); 860 phdr.length = chunk->chunk_hdr->length; 861 - sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &phdr); 862 863 end: 864 return retval; ··· 957 if (!retval) 958 goto nodata; 959 960 - sctp_init_cause(retval, cause_code, payload, paylen); 961 962 nodata: 963 return retval; ··· 1131 void *target; 1132 void *padding; 1133 int chunklen = ntohs(chunk->chunk_hdr->length); 1134 - int padlen = chunklen % 4; 1135 1136 padding = skb_put(chunk->skb, padlen); 1137 target = skb_put(chunk->skb, len); ··· 1141 1142 /* Adjust the chunk length field. */ 1143 chunk->chunk_hdr->length = htons(chunklen + padlen + len); 1144 chunk->chunk_end = skb_tail_pointer(chunk->skb); 1145 1146 return target; ··· 1196 */ 1197 void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) 1198 { 1199 __u16 ssn; 1200 __u16 sid; 1201 1202 if (chunk->has_ssn) 1203 return; 1204 1205 - /* This is the last possible instant to assign a SSN. */ 1206 - if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { 1207 - ssn = 0; 1208 - } else { 1209 - sid = ntohs(chunk->subh.data_hdr->stream); 1210 - if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) 1211 - ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid); 1212 - else 1213 - ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid); 1214 - } 1215 1216 - chunk->subh.data_hdr->ssn = htons(ssn); 1217 - chunk->has_ssn = 1; 1218 } 1219 1220 /* Helper function to assign a TSN if needed. This assumes that both ··· 1499 __be32 n = htonl(usecs); 1500 1501 sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, 1502 - &n, sizeof(n)); 1503 *error = -SCTP_IERROR_STALE_COOKIE; 1504 } else 1505 *error = -SCTP_IERROR_NOMEM; ··· 1590 report.num_missing = htonl(1); 1591 report.type = paramtype; 1592 sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, 1593 - &report, sizeof(report)); 1594 } 1595 1596 /* Stop processing this chunk. */ ··· 1609 *errp = sctp_make_op_error_space(asoc, chunk, 0); 1610 1611 if (*errp) 1612 - sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, NULL, 0); 1613 1614 /* Stop processing this chunk. */ 1615 return 0; ··· 1630 *errp = sctp_make_op_error_space(asoc, chunk, payload_len); 1631 1632 if (*errp) { 1633 - sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, error, 1634 - sizeof(error)); 1635 - sctp_addto_chunk(*errp, sizeof(sctp_paramhdr_t), param); 1636 } 1637 1638 return 0; ··· 1654 if (!*errp) 1655 *errp = sctp_make_op_error_space(asoc, chunk, len); 1656 1657 - if (*errp) 1658 - sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, 1659 - param.v, len); 1660 1661 /* Stop processing this chunk. */ 1662 return 0; ··· 1709 *errp = sctp_make_op_error_space(asoc, chunk, 1710 ntohs(chunk->chunk_hdr->length)); 1711 1712 - if (*errp) 1713 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, 1714 - param.v, 1715 WORD_ROUND(ntohs(param.p->length))); 1716 1717 break; 1718 case SCTP_PARAM_ACTION_SKIP: ··· 1730 1731 if (*errp) { 1732 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, 1733 - param.v, 1734 WORD_ROUND(ntohs(param.p->length))); 1735 } else { 1736 /* If there is no memory for generating the ERROR 1737 * report as specified, an ABORT will be triggered ··· 1833 * VIOLATION error. We build the ERROR chunk here and let the normal 1834 * error handling code build and send the packet. 1835 */ 1836 - if (param.v < (void*)chunk->chunk_end - sizeof(sctp_paramhdr_t)) { 1837 sctp_process_inv_paramlength(asoc, param.p, chunk, errp); 1838 return 0; 1839 }
··· 110 * abort chunk. 111 */ 112 void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, 113 + size_t paylen) 114 { 115 sctp_errhdr_t err; 116 __u16 len; ··· 120 len = sizeof(sctp_errhdr_t) + paylen; 121 err.length = htons(len); 122 chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); 123 } 124 125 /* 3.3.2 Initiation (INIT) (1) ··· 780 781 /* Put the tsn back into network byte order. */ 782 payload = htonl(tsn); 783 + sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload)); 784 + sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload); 785 786 /* RFC 2960 6.4 Multi-homed SCTP Endpoints 787 * ··· 823 goto err_copy; 824 } 825 826 + sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen); 827 + sctp_addto_chunk(retval, paylen, payload); 828 829 if (paylen) 830 kfree(payload); ··· 850 struct sctp_paramhdr phdr; 851 852 retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen 853 + + sizeof(sctp_paramhdr_t)); 854 if (!retval) 855 goto end; 856 857 + sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen 858 + + sizeof(sctp_paramhdr_t)); 859 860 phdr.type = htons(chunk->chunk_hdr->type); 861 phdr.length = chunk->chunk_hdr->length; 862 + sctp_addto_chunk(retval, paylen, payload); 863 + sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr); 864 865 end: 866 return retval; ··· 955 if (!retval) 956 goto nodata; 957 958 + sctp_init_cause(retval, cause_code, paylen); 959 + sctp_addto_chunk(retval, paylen, payload); 960 961 nodata: 962 return retval; ··· 1128 void *target; 1129 void *padding; 1130 int chunklen = ntohs(chunk->chunk_hdr->length); 1131 + int padlen = WORD_ROUND(chunklen) - chunklen; 1132 1133 padding = skb_put(chunk->skb, padlen); 1134 target = skb_put(chunk->skb, len); ··· 1138 1139 /* Adjust the chunk length field. */ 1140 chunk->chunk_hdr->length = htons(chunklen + padlen + len); 1141 + chunk->chunk_end = skb_tail_pointer(chunk->skb); 1142 + 1143 + return target; 1144 + } 1145 + 1146 + /* Append bytes to the end of a parameter. Will panic if chunk is not big 1147 + * enough. 1148 + */ 1149 + void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data) 1150 + { 1151 + void *target; 1152 + int chunklen = ntohs(chunk->chunk_hdr->length); 1153 + 1154 + target = skb_put(chunk->skb, len); 1155 + 1156 + memcpy(target, data, len); 1157 + 1158 + /* Adjust the chunk length field. */ 1159 + chunk->chunk_hdr->length = htons(chunklen + len); 1160 chunk->chunk_end = skb_tail_pointer(chunk->skb); 1161 1162 return target; ··· 1174 */ 1175 void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) 1176 { 1177 + struct sctp_datamsg *msg; 1178 + struct sctp_chunk *lchunk; 1179 + struct sctp_stream *stream; 1180 __u16 ssn; 1181 __u16 sid; 1182 1183 if (chunk->has_ssn) 1184 return; 1185 1186 + /* All fragments will be on the same stream */ 1187 + sid = ntohs(chunk->subh.data_hdr->stream); 1188 + stream = &chunk->asoc->ssnmap->out; 1189 1190 + /* Now assign the sequence number to the entire message. 1191 + * All fragments must have the same stream sequence number. 1192 + */ 1193 + msg = chunk->msg; 1194 + list_for_each_entry(lchunk, &msg->chunks, frag_list) { 1195 + if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { 1196 + ssn = 0; 1197 + } else { 1198 + if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) 1199 + ssn = sctp_ssn_next(stream, sid); 1200 + else 1201 + ssn = sctp_ssn_peek(stream, sid); 1202 + } 1203 + 1204 + lchunk->subh.data_hdr->ssn = htons(ssn); 1205 + lchunk->has_ssn = 1; 1206 + } 1207 } 1208 1209 /* Helper function to assign a TSN if needed. This assumes that both ··· 1466 __be32 n = htonl(usecs); 1467 1468 sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, 1469 + sizeof(n)); 1470 + sctp_addto_chunk(*errp, sizeof(n), &n); 1471 *error = -SCTP_IERROR_STALE_COOKIE; 1472 } else 1473 *error = -SCTP_IERROR_NOMEM; ··· 1556 report.num_missing = htonl(1); 1557 report.type = paramtype; 1558 sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, 1559 + sizeof(report)); 1560 + sctp_addto_chunk(*errp, sizeof(report), &report); 1561 } 1562 1563 /* Stop processing this chunk. */ ··· 1574 *errp = sctp_make_op_error_space(asoc, chunk, 0); 1575 1576 if (*errp) 1577 + sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0); 1578 1579 /* Stop processing this chunk. */ 1580 return 0; ··· 1595 *errp = sctp_make_op_error_space(asoc, chunk, payload_len); 1596 1597 if (*errp) { 1598 + sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, 1599 + sizeof(error) + sizeof(sctp_paramhdr_t)); 1600 + sctp_addto_chunk(*errp, sizeof(error), error); 1601 + sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param); 1602 } 1603 1604 return 0; ··· 1618 if (!*errp) 1619 *errp = sctp_make_op_error_space(asoc, chunk, len); 1620 1621 + if (*errp) { 1622 + sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len); 1623 + sctp_addto_chunk(*errp, len, param.v); 1624 + } 1625 1626 /* Stop processing this chunk. */ 1627 return 0; ··· 1672 *errp = sctp_make_op_error_space(asoc, chunk, 1673 ntohs(chunk->chunk_hdr->length)); 1674 1675 + if (*errp) { 1676 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, 1677 WORD_ROUND(ntohs(param.p->length))); 1678 + sctp_addto_chunk(*errp, 1679 + WORD_ROUND(ntohs(param.p->length)), 1680 + param.v); 1681 + } 1682 1683 break; 1684 case SCTP_PARAM_ACTION_SKIP: ··· 1690 1691 if (*errp) { 1692 sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, 1693 WORD_ROUND(ntohs(param.p->length))); 1694 + sctp_addto_chunk(*errp, 1695 + WORD_ROUND(ntohs(param.p->length)), 1696 + param.v); 1697 } else { 1698 /* If there is no memory for generating the ERROR 1699 * report as specified, an ABORT will be triggered ··· 1791 * VIOLATION error. We build the ERROR chunk here and let the normal 1792 * error handling code build and send the packet. 1793 */ 1794 + if (param.v != (void*)chunk->chunk_end) { 1795 sctp_process_inv_paramlength(asoc, param.p, chunk, errp); 1796 return 0; 1797 }
+6 -2
net/sctp/sm_sideeffect.c
··· 1013 break; 1014 1015 case SCTP_DISPOSITION_VIOLATION: 1016 - printk(KERN_ERR "sctp protocol violation state %d " 1017 - "chunkid %d\n", state, subtype.chunk); 1018 break; 1019 1020 case SCTP_DISPOSITION_NOT_IMPL: ··· 1130 case SCTP_CMD_REPORT_FWDTSN: 1131 /* Move the Cumulattive TSN Ack ahead. */ 1132 sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); 1133 1134 /* Abort any in progress partial delivery. */ 1135 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
··· 1013 break; 1014 1015 case SCTP_DISPOSITION_VIOLATION: 1016 + if (net_ratelimit()) 1017 + printk(KERN_ERR "sctp protocol violation state %d " 1018 + "chunkid %d\n", state, subtype.chunk); 1019 break; 1020 1021 case SCTP_DISPOSITION_NOT_IMPL: ··· 1129 case SCTP_CMD_REPORT_FWDTSN: 1130 /* Move the Cumulattive TSN Ack ahead. */ 1131 sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); 1132 + 1133 + /* purge the fragmentation queue */ 1134 + sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32); 1135 1136 /* Abort any in progress partial delivery. */ 1137 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+26 -25
net/sctp/sm_statefuns.c
··· 264 struct sctp_chunk *err_chunk; 265 struct sctp_packet *packet; 266 sctp_unrecognized_param_t *unk_param; 267 - struct sock *sk; 268 int len; 269 270 /* 6.10 Bundling ··· 282 * control endpoint, respond with an ABORT. 283 */ 284 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) 285 - return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 286 - 287 - sk = ep->base.sk; 288 - /* If the endpoint is not listening or if the number of associations 289 - * on the TCP-style socket exceed the max backlog, respond with an 290 - * ABORT. 291 - */ 292 - if (!sctp_sstate(sk, LISTENING) || 293 - (sctp_style(sk, TCP) && 294 - sk_acceptq_is_full(sk))) 295 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 296 297 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification ··· 579 struct sctp_ulpevent *ev, *ai_ev = NULL; 580 int error = 0; 581 struct sctp_chunk *err_chk_p; 582 583 /* If the packet is an OOTB packet which is temporarily on the 584 * control endpoint, respond with an ABORT. ··· 594 */ 595 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 596 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 597 598 /* "Decode" the chunk. We have no optional parameters so we 599 * are in good shape. ··· 1031 /* This should never happen, but lets log it if so. */ 1032 if (unlikely(!link)) { 1033 if (from_addr.sa.sa_family == AF_INET6) { 1034 - printk(KERN_WARNING 1035 - "%s association %p could not find address " 1036 - NIP6_FMT "\n", 1037 - __FUNCTION__, 1038 - asoc, 1039 - NIP6(from_addr.v6.sin6_addr)); 1040 } else { 1041 - printk(KERN_WARNING 1042 - "%s association %p could not find address " 1043 - NIPQUAD_FMT "\n", 1044 - __FUNCTION__, 1045 - asoc, 1046 - NIPQUAD(from_addr.v4.sin_addr.s_addr)); 1047 } 1048 return SCTP_DISPOSITION_DISCARD; 1049 } ··· 3363 abort = sctp_make_abort(asoc, asconf_ack, 3364 sizeof(sctp_errhdr_t)); 3365 if (abort) { 3366 - sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, NULL, 0); 3367 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 3368 SCTP_CHUNK(abort)); 3369 } ··· 3393 abort = sctp_make_abort(asoc, asconf_ack, 3394 sizeof(sctp_errhdr_t)); 3395 if (abort) { 3396 - sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, NULL, 0); 3397 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 3398 SCTP_CHUNK(abort)); 3399 }
··· 264 struct sctp_chunk *err_chunk; 265 struct sctp_packet *packet; 266 sctp_unrecognized_param_t *unk_param; 267 int len; 268 269 /* 6.10 Bundling ··· 283 * control endpoint, respond with an ABORT. 284 */ 285 if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) 286 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 287 288 /* 3.1 A packet containing an INIT chunk MUST have a zero Verification ··· 590 struct sctp_ulpevent *ev, *ai_ev = NULL; 591 int error = 0; 592 struct sctp_chunk *err_chk_p; 593 + struct sock *sk; 594 595 /* If the packet is an OOTB packet which is temporarily on the 596 * control endpoint, respond with an ABORT. ··· 604 */ 605 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) 606 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 607 + 608 + /* If the endpoint is not listening or if the number of associations 609 + * on the TCP-style socket exceed the max backlog, respond with an 610 + * ABORT. 611 + */ 612 + sk = ep->base.sk; 613 + if (!sctp_sstate(sk, LISTENING) || 614 + (sctp_style(sk, TCP) && sk_acceptq_is_full(sk))) 615 + return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 616 617 /* "Decode" the chunk. We have no optional parameters so we 618 * are in good shape. ··· 1032 /* This should never happen, but lets log it if so. */ 1033 if (unlikely(!link)) { 1034 if (from_addr.sa.sa_family == AF_INET6) { 1035 + if (net_ratelimit()) 1036 + printk(KERN_WARNING 1037 + "%s association %p could not find address " 1038 + NIP6_FMT "\n", 1039 + __FUNCTION__, 1040 + asoc, 1041 + NIP6(from_addr.v6.sin6_addr)); 1042 } else { 1043 + if (net_ratelimit()) 1044 + printk(KERN_WARNING 1045 + "%s association %p could not find address " 1046 + NIPQUAD_FMT "\n", 1047 + __FUNCTION__, 1048 + asoc, 1049 + NIPQUAD(from_addr.v4.sin_addr.s_addr)); 1050 } 1051 return SCTP_DISPOSITION_DISCARD; 1052 } ··· 3362 abort = sctp_make_abort(asoc, asconf_ack, 3363 sizeof(sctp_errhdr_t)); 3364 if (abort) { 3365 + sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0); 3366 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 3367 SCTP_CHUNK(abort)); 3368 } ··· 3392 abort = sctp_make_abort(asoc, asconf_ack, 3393 sizeof(sctp_errhdr_t)); 3394 if (abort) { 3395 + sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0); 3396 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 3397 SCTP_CHUNK(abort)); 3398 }
+3
net/sctp/socket.c
··· 353 * The function sctp_get_port_local() does duplicate address 354 * detection. 355 */ 356 if ((ret = sctp_get_port_local(sk, addr))) { 357 if (ret == (long) sk) { 358 /* This endpoint has a conflicting address. */ ··· 5203 5204 sctp_unhash_endpoint(ep); 5205 sk->sk_state = SCTP_SS_CLOSED; 5206 } 5207 5208 /* Return if we are already listening. */ ··· 5251 5252 sctp_unhash_endpoint(ep); 5253 sk->sk_state = SCTP_SS_CLOSED; 5254 } 5255 5256 if (sctp_sstate(sk, LISTENING))
··· 353 * The function sctp_get_port_local() does duplicate address 354 * detection. 355 */ 356 + addr->v4.sin_port = htons(snum); 357 if ((ret = sctp_get_port_local(sk, addr))) { 358 if (ret == (long) sk) { 359 /* This endpoint has a conflicting address. */ ··· 5202 5203 sctp_unhash_endpoint(ep); 5204 sk->sk_state = SCTP_SS_CLOSED; 5205 + return 0; 5206 } 5207 5208 /* Return if we are already listening. */ ··· 5249 5250 sctp_unhash_endpoint(ep); 5251 sk->sk_state = SCTP_SS_CLOSED; 5252 + return 0; 5253 } 5254 5255 if (sctp_sstate(sk, LISTENING))
+62 -13
net/sctp/ulpqueue.c
··· 659 return retval; 660 } 661 662 /* Helper function to gather skbs that have possibly become 663 * ordered by an an incoming chunk. 664 */ ··· 834 /* Helper function to gather skbs that have possibly become 835 * ordered by forward tsn skipping their dependencies. 836 */ 837 - static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) 838 { 839 struct sk_buff *pos, *tmp; 840 struct sctp_ulpevent *cevent; ··· 853 csid = cevent->stream; 854 cssn = cevent->ssn; 855 856 - if (cssn != sctp_ssn_peek(in, csid)) 857 break; 858 859 - /* Found it, so mark in the ssnmap. */ 860 - sctp_ssn_next(in, csid); 861 862 __skb_unlink(pos, &ulpq->lobby); 863 - if (!event) { 864 /* Create a temporary list to collect chunks on. */ 865 event = sctp_skb2event(pos); 866 - __skb_queue_tail(&temp, sctp_event2skb(event)); 867 - } else { 868 - /* Attach all gathered skbs to the event. */ 869 - __skb_queue_tail(&temp, pos); 870 - } 871 } 872 873 /* Send event to the ULP. 'event' is the sctp_ulpevent for 874 * very first SKB on the 'temp' list. 875 */ 876 - if (event) 877 sctp_ulpq_tail_event(ulpq, event); 878 } 879 880 - /* Skip over an SSN. */ 881 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) 882 { 883 struct sctp_stream *in; ··· 904 /* Go find any other chunks that were waiting for 905 * ordering and deliver them if needed. 906 */ 907 - sctp_ulpq_reap_ordered(ulpq); 908 return; 909 } 910
··· 659 return retval; 660 } 661 662 + /* 663 + * Flush out stale fragments from the reassembly queue when processing 664 + * a Forward TSN. 665 + * 666 + * RFC 3758, Section 3.6 667 + * 668 + * After receiving and processing a FORWARD TSN, the data receiver MUST 669 + * take cautions in updating its re-assembly queue. The receiver MUST 670 + * remove any partially reassembled message, which is still missing one 671 + * or more TSNs earlier than or equal to the new cumulative TSN point. 672 + * In the event that the receiver has invoked the partial delivery API, 673 + * a notification SHOULD also be generated to inform the upper layer API 674 + * that the message being partially delivered will NOT be completed. 675 + */ 676 + void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn) 677 + { 678 + struct sk_buff *pos, *tmp; 679 + struct sctp_ulpevent *event; 680 + __u32 tsn; 681 + 682 + if (skb_queue_empty(&ulpq->reasm)) 683 + return; 684 + 685 + skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { 686 + event = sctp_skb2event(pos); 687 + tsn = event->tsn; 688 + 689 + /* Since the entire message must be abandoned by the 690 + * sender (item A3 in Section 3.5, RFC 3758), we can 691 + * free all fragments on the list that are less then 692 + * or equal to ctsn_point 693 + */ 694 + if (TSN_lte(tsn, fwd_tsn)) { 695 + __skb_unlink(pos, &ulpq->reasm); 696 + sctp_ulpevent_free(event); 697 + } else 698 + break; 699 + } 700 + } 701 + 702 /* Helper function to gather skbs that have possibly become 703 * ordered by an an incoming chunk. 704 */ ··· 794 /* Helper function to gather skbs that have possibly become 795 * ordered by forward tsn skipping their dependencies. 796 */ 797 + static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) 798 { 799 struct sk_buff *pos, *tmp; 800 struct sctp_ulpevent *cevent; ··· 813 csid = cevent->stream; 814 cssn = cevent->ssn; 815 816 + /* Have we gone too far? */ 817 + if (csid > sid) 818 break; 819 820 + /* Have we not gone far enough? */ 821 + if (csid < sid) 822 + continue; 823 + 824 + /* see if this ssn has been marked by skipping */ 825 + if (!SSN_lt(cssn, sctp_ssn_peek(in, csid))) 826 + break; 827 828 __skb_unlink(pos, &ulpq->lobby); 829 + if (!event) 830 /* Create a temporary list to collect chunks on. */ 831 event = sctp_skb2event(pos); 832 + 833 + /* Attach all gathered skbs to the event. */ 834 + __skb_queue_tail(&temp, pos); 835 } 836 837 /* Send event to the ULP. 'event' is the sctp_ulpevent for 838 * very first SKB on the 'temp' list. 839 */ 840 + if (event) { 841 + /* see if we have more ordered that we can deliver */ 842 + sctp_ulpq_retrieve_ordered(ulpq, event); 843 sctp_ulpq_tail_event(ulpq, event); 844 + } 845 } 846 847 + /* Skip over an SSN. This is used during the processing of 848 + * Forwared TSN chunk to skip over the abandoned ordered data 849 + */ 850 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) 851 { 852 struct sctp_stream *in; ··· 855 /* Go find any other chunks that were waiting for 856 * ordering and deliver them if needed. 857 */ 858 + sctp_ulpq_reap_ordered(ulpq, sid); 859 return; 860 } 861