Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.19-rc2 607 lines 16 kB view raw
1/* 2 * net/dccp/output.c 3 * 4 * An implementation of the DCCP protocol 5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13#include <linux/dccp.h> 14#include <linux/kernel.h> 15#include <linux/skbuff.h> 16 17#include <net/inet_sock.h> 18#include <net/sock.h> 19 20#include "ackvec.h" 21#include "ccid.h" 22#include "dccp.h" 23 24static inline void dccp_event_ack_sent(struct sock *sk) 25{ 26 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 27} 28 29static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb) 30{ 31 skb_set_owner_w(skb, sk); 32 WARN_ON(sk->sk_send_head); 33 sk->sk_send_head = skb; 34} 35 36/* 37 * All SKB's seen here are completely headerless. It is our 38 * job to build the DCCP header, and pass the packet down to 39 * IP so it can do the same plus pass the packet off to the 40 * device. 41 */ 42static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) 43{ 44 if (likely(skb != NULL)) { 45 const struct inet_sock *inet = inet_sk(sk); 46 const struct inet_connection_sock *icsk = inet_csk(sk); 47 struct dccp_sock *dp = dccp_sk(sk); 48 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 49 struct dccp_hdr *dh; 50 /* XXX For now we're using only 48 bits sequence numbers */ 51 const u32 dccp_header_size = sizeof(*dh) + 52 sizeof(struct dccp_hdr_ext) + 53 dccp_packet_hdr_len(dcb->dccpd_type); 54 int err, set_ack = 1; 55 u64 ackno = dp->dccps_gsr; 56 57 dccp_inc_seqno(&dp->dccps_gss); 58 59 switch (dcb->dccpd_type) { 60 case DCCP_PKT_DATA: 61 set_ack = 0; 62 /* fall through */ 63 case DCCP_PKT_DATAACK: 64 break; 65 66 case DCCP_PKT_REQUEST: 67 set_ack = 0; 68 /* fall through */ 69 70 case DCCP_PKT_SYNC: 71 case DCCP_PKT_SYNCACK: 72 ackno = dcb->dccpd_seq; 73 /* fall through */ 74 default: 75 /* 76 * Only data packets should come through with skb->sk 77 * set. 78 */ 79 WARN_ON(skb->sk); 80 skb_set_owner_w(skb, sk); 81 break; 82 } 83 84 dcb->dccpd_seq = dp->dccps_gss; 85 86 if (dccp_insert_options(sk, skb)) { 87 kfree_skb(skb); 88 return -EPROTO; 89 } 90 91 skb->h.raw = skb_push(skb, dccp_header_size); 92 dh = dccp_hdr(skb); 93 94 /* Build DCCP header and checksum it. */ 95 memset(dh, 0, dccp_header_size); 96 dh->dccph_type = dcb->dccpd_type; 97 dh->dccph_sport = inet->sport; 98 dh->dccph_dport = inet->dport; 99 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; 100 dh->dccph_ccval = dcb->dccpd_ccval; 101 /* XXX For now we're using only 48 bits sequence numbers */ 102 dh->dccph_x = 1; 103 104 dp->dccps_awh = dp->dccps_gss; 105 dccp_hdr_set_seq(dh, dp->dccps_gss); 106 if (set_ack) 107 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno); 108 109 switch (dcb->dccpd_type) { 110 case DCCP_PKT_REQUEST: 111 dccp_hdr_request(skb)->dccph_req_service = 112 dp->dccps_service; 113 break; 114 case DCCP_PKT_RESET: 115 dccp_hdr_reset(skb)->dccph_reset_code = 116 dcb->dccpd_reset_code; 117 break; 118 } 119 120 icsk->icsk_af_ops->send_check(sk, skb->len, skb); 121 122 if (set_ack) 123 dccp_event_ack_sent(sk); 124 125 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 126 127 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 128 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 129 if (err <= 0) 130 return err; 131 132 /* NET_XMIT_CN is special. It does not guarantee, 133 * that this packet is lost. It tells that device 134 * is about to start to drop packets or already 135 * drops some packets of the same priority and 136 * invokes us to send less aggressively. 137 */ 138 return err == NET_XMIT_CN ? 0 : err; 139 } 140 return -ENOBUFS; 141} 142 143unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu) 144{ 145 struct inet_connection_sock *icsk = inet_csk(sk); 146 struct dccp_sock *dp = dccp_sk(sk); 147 int mss_now = (pmtu - icsk->icsk_af_ops->net_header_len - 148 sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext)); 149 150 /* Now subtract optional transport overhead */ 151 mss_now -= icsk->icsk_ext_hdr_len; 152 153 /* 154 * FIXME: this should come from the CCID infrastructure, where, say, 155 * TFRC will say it wants TIMESTAMPS, ELAPSED time, etc, for now lets 156 * put a rough estimate for NDP + TIMESTAMP + TIMESTAMP_ECHO + ELAPSED 157 * TIME + TFRC_OPT_LOSS_EVENT_RATE + TFRC_OPT_RECEIVE_RATE + padding to 158 * make it a multiple of 4 159 */ 160 161 mss_now -= ((5 + 6 + 10 + 6 + 6 + 6 + 3) / 4) * 4; 162 163 /* And store cached results */ 164 icsk->icsk_pmtu_cookie = pmtu; 165 dp->dccps_mss_cache = mss_now; 166 167 return mss_now; 168} 169 170EXPORT_SYMBOL_GPL(dccp_sync_mss); 171 172void dccp_write_space(struct sock *sk) 173{ 174 read_lock(&sk->sk_callback_lock); 175 176 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 177 wake_up_interruptible(sk->sk_sleep); 178 /* Should agree with poll, otherwise some programs break */ 179 if (sock_writeable(sk)) 180 sk_wake_async(sk, 2, POLL_OUT); 181 182 read_unlock(&sk->sk_callback_lock); 183} 184 185/** 186 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet 187 * @sk: socket to wait for 188 * @timeo: for how long 189 */ 190static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, 191 long *timeo) 192{ 193 struct dccp_sock *dp = dccp_sk(sk); 194 DEFINE_WAIT(wait); 195 long delay; 196 int rc; 197 198 while (1) { 199 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 200 201 if (sk->sk_err) 202 goto do_error; 203 if (!*timeo) 204 goto do_nonblock; 205 if (signal_pending(current)) 206 goto do_interrupted; 207 208 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, 209 skb->len); 210 if (rc <= 0) 211 break; 212 delay = msecs_to_jiffies(rc); 213 if (delay > *timeo || delay < 0) 214 goto do_nonblock; 215 216 sk->sk_write_pending++; 217 release_sock(sk); 218 *timeo -= schedule_timeout(delay); 219 lock_sock(sk); 220 sk->sk_write_pending--; 221 } 222out: 223 finish_wait(sk->sk_sleep, &wait); 224 return rc; 225 226do_error: 227 rc = -EPIPE; 228 goto out; 229do_nonblock: 230 rc = -EAGAIN; 231 goto out; 232do_interrupted: 233 rc = sock_intr_errno(*timeo); 234 goto out; 235} 236 237static void dccp_write_xmit_timer(unsigned long data) { 238 struct sock *sk = (struct sock *)data; 239 struct dccp_sock *dp = dccp_sk(sk); 240 241 bh_lock_sock(sk); 242 if (sock_owned_by_user(sk)) 243 sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1); 244 else 245 dccp_write_xmit(sk, 0); 246 bh_unlock_sock(sk); 247 sock_put(sk); 248} 249 250void dccp_write_xmit(struct sock *sk, int block) 251{ 252 struct dccp_sock *dp = dccp_sk(sk); 253 struct sk_buff *skb; 254 long timeo = 30000; /* If a packet is taking longer than 2 secs 255 we have other issues */ 256 257 while ((skb = skb_peek(&sk->sk_write_queue))) { 258 int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, 259 skb->len); 260 261 if (err > 0) { 262 if (!block) { 263 sk_reset_timer(sk, &dp->dccps_xmit_timer, 264 msecs_to_jiffies(err)+jiffies); 265 break; 266 } else 267 err = dccp_wait_for_ccid(sk, skb, &timeo); 268 if (err) { 269 printk(KERN_CRIT "%s:err at dccp_wait_for_ccid" 270 " %d\n", __FUNCTION__, err); 271 dump_stack(); 272 } 273 } 274 275 skb_dequeue(&sk->sk_write_queue); 276 if (err == 0) { 277 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 278 const int len = skb->len; 279 280 if (sk->sk_state == DCCP_PARTOPEN) { 281 /* See 8.1.5. Handshake Completion */ 282 inet_csk_schedule_ack(sk); 283 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 284 inet_csk(sk)->icsk_rto, 285 DCCP_RTO_MAX); 286 dcb->dccpd_type = DCCP_PKT_DATAACK; 287 } else if (dccp_ack_pending(sk)) 288 dcb->dccpd_type = DCCP_PKT_DATAACK; 289 else 290 dcb->dccpd_type = DCCP_PKT_DATA; 291 292 err = dccp_transmit_skb(sk, skb); 293 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); 294 if (err) { 295 printk(KERN_CRIT "%s:err from " 296 "ccid_hc_tx_packet_sent %d\n", 297 __FUNCTION__, err); 298 dump_stack(); 299 } 300 } else 301 kfree(skb); 302 } 303} 304 305int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 306{ 307 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0) 308 return -EHOSTUNREACH; /* Routing failure or similar. */ 309 310 return dccp_transmit_skb(sk, (skb_cloned(skb) ? 311 pskb_copy(skb, GFP_ATOMIC): 312 skb_clone(skb, GFP_ATOMIC))); 313} 314 315struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, 316 struct request_sock *req) 317{ 318 struct dccp_hdr *dh; 319 struct dccp_request_sock *dreq; 320 const u32 dccp_header_size = sizeof(struct dccp_hdr) + 321 sizeof(struct dccp_hdr_ext) + 322 sizeof(struct dccp_hdr_response); 323 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, 324 GFP_ATOMIC); 325 if (skb == NULL) 326 return NULL; 327 328 /* Reserve space for headers. */ 329 skb_reserve(skb, sk->sk_prot->max_header); 330 331 skb->dst = dst_clone(dst); 332 skb->csum = 0; 333 334 dreq = dccp_rsk(req); 335 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; 336 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; 337 338 if (dccp_insert_options(sk, skb)) { 339 kfree_skb(skb); 340 return NULL; 341 } 342 343 skb->h.raw = skb_push(skb, dccp_header_size); 344 345 dh = dccp_hdr(skb); 346 memset(dh, 0, dccp_header_size); 347 348 dh->dccph_sport = inet_sk(sk)->sport; 349 dh->dccph_dport = inet_rsk(req)->rmt_port; 350 dh->dccph_doff = (dccp_header_size + 351 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; 352 dh->dccph_type = DCCP_PKT_RESPONSE; 353 dh->dccph_x = 1; 354 dccp_hdr_set_seq(dh, dreq->dreq_iss); 355 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); 356 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; 357 358 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 359 return skb; 360} 361 362EXPORT_SYMBOL_GPL(dccp_make_response); 363 364static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, 365 const enum dccp_reset_codes code) 366 367{ 368 struct dccp_hdr *dh; 369 struct dccp_sock *dp = dccp_sk(sk); 370 const u32 dccp_header_size = sizeof(struct dccp_hdr) + 371 sizeof(struct dccp_hdr_ext) + 372 sizeof(struct dccp_hdr_reset); 373 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, 374 GFP_ATOMIC); 375 if (skb == NULL) 376 return NULL; 377 378 /* Reserve space for headers. */ 379 skb_reserve(skb, sk->sk_prot->max_header); 380 381 skb->dst = dst_clone(dst); 382 skb->csum = 0; 383 384 dccp_inc_seqno(&dp->dccps_gss); 385 386 DCCP_SKB_CB(skb)->dccpd_reset_code = code; 387 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET; 388 DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss; 389 390 if (dccp_insert_options(sk, skb)) { 391 kfree_skb(skb); 392 return NULL; 393 } 394 395 skb->h.raw = skb_push(skb, dccp_header_size); 396 397 dh = dccp_hdr(skb); 398 memset(dh, 0, dccp_header_size); 399 400 dh->dccph_sport = inet_sk(sk)->sport; 401 dh->dccph_dport = inet_sk(sk)->dport; 402 dh->dccph_doff = (dccp_header_size + 403 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; 404 dh->dccph_type = DCCP_PKT_RESET; 405 dh->dccph_x = 1; 406 dccp_hdr_set_seq(dh, dp->dccps_gss); 407 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); 408 409 dccp_hdr_reset(skb)->dccph_reset_code = code; 410 inet_csk(sk)->icsk_af_ops->send_check(sk, skb->len, skb); 411 412 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 413 return skb; 414} 415 416int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code) 417{ 418 /* 419 * FIXME: what if rebuild_header fails? 420 * Should we be doing a rebuild_header here? 421 */ 422 int err = inet_sk_rebuild_header(sk); 423 424 if (err == 0) { 425 struct sk_buff *skb = dccp_make_reset(sk, sk->sk_dst_cache, 426 code); 427 if (skb != NULL) { 428 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 429 err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, 0); 430 if (err == NET_XMIT_CN) 431 err = 0; 432 } 433 } 434 435 return err; 436} 437 438/* 439 * Do all connect socket setups that can be done AF independent. 440 */ 441static inline void dccp_connect_init(struct sock *sk) 442{ 443 struct dccp_sock *dp = dccp_sk(sk); 444 struct dst_entry *dst = __sk_dst_get(sk); 445 struct inet_connection_sock *icsk = inet_csk(sk); 446 447 sk->sk_err = 0; 448 sock_reset_flag(sk, SOCK_DONE); 449 450 dccp_sync_mss(sk, dst_mtu(dst)); 451 452 dccp_update_gss(sk, dp->dccps_iss); 453 /* 454 * SWL and AWL are initially adjusted so that they are not less than 455 * the initial Sequence Numbers received and sent, respectively: 456 * SWL := max(GSR + 1 - floor(W/4), ISR), 457 * AWL := max(GSS - W' + 1, ISS). 458 * These adjustments MUST be applied only at the beginning of the 459 * connection. 460 */ 461 dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss)); 462 463 icsk->icsk_retransmits = 0; 464 init_timer(&dp->dccps_xmit_timer); 465 dp->dccps_xmit_timer.data = (unsigned long)sk; 466 dp->dccps_xmit_timer.function = dccp_write_xmit_timer; 467} 468 469int dccp_connect(struct sock *sk) 470{ 471 struct sk_buff *skb; 472 struct inet_connection_sock *icsk = inet_csk(sk); 473 474 dccp_connect_init(sk); 475 476 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation); 477 if (unlikely(skb == NULL)) 478 return -ENOBUFS; 479 480 /* Reserve space for headers. */ 481 skb_reserve(skb, sk->sk_prot->max_header); 482 483 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; 484 skb->csum = 0; 485 486 dccp_skb_entail(sk, skb); 487 dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL)); 488 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS); 489 490 /* Timer for repeating the REQUEST until an answer. */ 491 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 492 icsk->icsk_rto, DCCP_RTO_MAX); 493 return 0; 494} 495 496EXPORT_SYMBOL_GPL(dccp_connect); 497 498void dccp_send_ack(struct sock *sk) 499{ 500 /* If we have been reset, we may not send again. */ 501 if (sk->sk_state != DCCP_CLOSED) { 502 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, 503 GFP_ATOMIC); 504 505 if (skb == NULL) { 506 inet_csk_schedule_ack(sk); 507 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 508 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 509 TCP_DELACK_MAX, 510 DCCP_RTO_MAX); 511 return; 512 } 513 514 /* Reserve space for headers */ 515 skb_reserve(skb, sk->sk_prot->max_header); 516 skb->csum = 0; 517 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; 518 dccp_transmit_skb(sk, skb); 519 } 520} 521 522EXPORT_SYMBOL_GPL(dccp_send_ack); 523 524void dccp_send_delayed_ack(struct sock *sk) 525{ 526 struct inet_connection_sock *icsk = inet_csk(sk); 527 /* 528 * FIXME: tune this timer. elapsed time fixes the skew, so no problem 529 * with using 2s, and active senders also piggyback the ACK into a 530 * DATAACK packet, so this is really for quiescent senders. 531 */ 532 unsigned long timeout = jiffies + 2 * HZ; 533 534 /* Use new timeout only if there wasn't a older one earlier. */ 535 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 536 /* If delack timer was blocked or is about to expire, 537 * send ACK now. 538 * 539 * FIXME: check the "about to expire" part 540 */ 541 if (icsk->icsk_ack.blocked) { 542 dccp_send_ack(sk); 543 return; 544 } 545 546 if (!time_before(timeout, icsk->icsk_ack.timeout)) 547 timeout = icsk->icsk_ack.timeout; 548 } 549 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 550 icsk->icsk_ack.timeout = timeout; 551 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 552} 553 554void dccp_send_sync(struct sock *sk, const u64 seq, 555 const enum dccp_pkt_type pkt_type) 556{ 557 /* 558 * We are not putting this on the write queue, so 559 * dccp_transmit_skb() will set the ownership to this 560 * sock. 561 */ 562 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); 563 564 if (skb == NULL) 565 /* FIXME: how to make sure the sync is sent? */ 566 return; 567 568 /* Reserve space for headers and prepare control bits. */ 569 skb_reserve(skb, sk->sk_prot->max_header); 570 skb->csum = 0; 571 DCCP_SKB_CB(skb)->dccpd_type = pkt_type; 572 DCCP_SKB_CB(skb)->dccpd_seq = seq; 573 574 dccp_transmit_skb(sk, skb); 575} 576 577EXPORT_SYMBOL_GPL(dccp_send_sync); 578 579/* 580 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This 581 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under 582 * any circumstances. 583 */ 584void dccp_send_close(struct sock *sk, const int active) 585{ 586 struct dccp_sock *dp = dccp_sk(sk); 587 struct sk_buff *skb; 588 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC; 589 590 skb = alloc_skb(sk->sk_prot->max_header, prio); 591 if (skb == NULL) 592 return; 593 594 /* Reserve space for headers and prepare control bits. */ 595 skb_reserve(skb, sk->sk_prot->max_header); 596 skb->csum = 0; 597 DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ? 598 DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; 599 600 if (active) { 601 dccp_write_xmit(sk, 1); 602 dccp_skb_entail(sk, skb); 603 dccp_transmit_skb(sk, skb_clone(skb, prio)); 604 /* FIXME do we need a retransmit timer here? */ 605 } else 606 dccp_transmit_skb(sk, skb); 607}