Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sctp: implement report_ftsn for sctp_stream_interleave

report_ftsn is added as a member of sctp_stream_interleave, used to
skip tsn from tsnmap, remove old events from reasm or lobby queue,
and abort pd for data or idata, called for SCTP_CMD_REPORT_FWDTSN
cmd and asoc reset.

sctp_report_iftsn works for ifwdtsn, and sctp_report_fwdtsn works
for fwdtsn. Note that sctp_report_iftsn doesn't do asoc abort_pd,
as stream abort_pd will be done when handling ifwdtsn. But when
ftsn is equal with ftsn, which means asoc reset, asoc abort_pd has
to be done.

Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Marcelo R. Leitner <marcelo.leitner@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Xin Long and committed by
David S. Miller
47b20a88 0fc2ea92

+52 -12
+1
include/net/sctp/stream_interleave.h
··· 51 51 /* (I-)FORWARD-TSN process */ 52 52 void (*generate_ftsn)(struct sctp_outq *q, __u32 ctsn); 53 53 bool (*validate_ftsn)(struct sctp_chunk *chunk); 54 + void (*report_ftsn)(struct sctp_ulpq *ulpq, __u32 ftsn); 54 55 }; 55 56 56 57 void sctp_stream_interleave_init(struct sctp_stream *stream);
+1 -8
net/sctp/sm_sideeffect.c
··· 1368 1368 break; 1369 1369 1370 1370 case SCTP_CMD_REPORT_FWDTSN: 1371 - /* Move the Cumulattive TSN Ack ahead. */ 1372 - sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); 1373 - 1374 - /* purge the fragmentation queue */ 1375 - sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32); 1376 - 1377 - /* Abort any in progress partial delivery. */ 1378 - sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); 1371 + asoc->stream.si->report_ftsn(&asoc->ulpq, cmd->obj.u32); 1379 1372 break; 1380 1373 1381 1374 case SCTP_CMD_PROCESS_FWDTSN:
+2 -4
net/sctp/stream.c
··· 754 754 * performed. 755 755 */ 756 756 max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); 757 - sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen); 758 - sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); 757 + asoc->stream.si->report_ftsn(&asoc->ulpq, max_tsn_seen); 759 758 760 759 /* G1: Compute an appropriate value for the Receiver's Next TSN -- the 761 760 * TSN that the peer should use to send the next DATA chunk. The ··· 1023 1024 &asoc->peer.tsn_map); 1024 1025 LIST_HEAD(temp); 1025 1026 1026 - sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn); 1027 - sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); 1027 + asoc->stream.si->report_ftsn(&asoc->ulpq, mtsn); 1028 1028 1029 1029 sctp_tsnmap_init(&asoc->peer.tsn_map, 1030 1030 SCTP_TSN_MAP_INITIAL,
+48
net/sctp/stream_interleave.c
··· 1193 1193 return true; 1194 1194 } 1195 1195 1196 + static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn) 1197 + { 1198 + /* Move the Cumulattive TSN Ack ahead. */ 1199 + sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn); 1200 + /* purge the fragmentation queue */ 1201 + sctp_ulpq_reasm_flushtsn(ulpq, ftsn); 1202 + /* Abort any in progress partial delivery. */ 1203 + sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC); 1204 + } 1205 + 1206 + static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn) 1207 + { 1208 + struct sk_buff *pos, *tmp; 1209 + 1210 + skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { 1211 + struct sctp_ulpevent *event = sctp_skb2event(pos); 1212 + __u32 tsn = event->tsn; 1213 + 1214 + if (TSN_lte(tsn, ftsn)) { 1215 + __skb_unlink(pos, &ulpq->reasm); 1216 + sctp_ulpevent_free(event); 1217 + } 1218 + } 1219 + 1220 + skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) { 1221 + struct sctp_ulpevent *event = sctp_skb2event(pos); 1222 + __u32 tsn = event->tsn; 1223 + 1224 + if (TSN_lte(tsn, ftsn)) { 1225 + __skb_unlink(pos, &ulpq->reasm_uo); 1226 + sctp_ulpevent_free(event); 1227 + } 1228 + } 1229 + } 1230 + 1231 + static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn) 1232 + { 1233 + /* Move the Cumulattive TSN Ack ahead. */ 1234 + sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn); 1235 + /* purge the fragmentation queue */ 1236 + sctp_intl_reasm_flushtsn(ulpq, ftsn); 1237 + /* abort only when it's for all data */ 1238 + if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map)) 1239 + sctp_intl_abort_pd(ulpq, GFP_ATOMIC); 1240 + } 1241 + 1196 1242 static struct sctp_stream_interleave sctp_stream_interleave_0 = { 1197 1243 .data_chunk_len = sizeof(struct sctp_data_chunk), 1198 1244 .ftsn_chunk_len = sizeof(struct sctp_fwdtsn_chunk), ··· 1254 1208 /* FORWARD-TSN process functions */ 1255 1209 .generate_ftsn = sctp_generate_fwdtsn, 1256 1210 .validate_ftsn = sctp_validate_fwdtsn, 1211 + .report_ftsn = sctp_report_fwdtsn, 1257 1212 }; 1258 1213 1259 1214 static struct sctp_stream_interleave sctp_stream_interleave_1 = { ··· 1272 1225 /* I-FORWARD-TSN process functions */ 1273 1226 .generate_ftsn = sctp_generate_iftsn, 1274 1227 .validate_ftsn = sctp_validate_iftsn, 1228 + .report_ftsn = sctp_report_iftsn, 1275 1229 }; 1276 1230 1277 1231 void sctp_stream_interleave_init(struct sctp_stream *stream)