Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tcp: whitespace fixes

Fix places where there is space before tab, long lines, and
awkward if(){, double spacing etc. Add blank line after declaration/initialization.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

stephen hemminger and committed by
David S. Miller
688d1945 d09d3038

+104 -123
+4 -7
net/ipv4/tcp_bic.c
··· 17 17 #include <linux/module.h> 18 18 #include <net/tcp.h> 19 19 20 - 21 20 #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation 22 21 * max_cwnd = snd_cwnd * beta 23 22 */ ··· 45 46 module_param(smooth_part, int, 0644); 46 47 MODULE_PARM_DESC(smooth_part, "log(B/(B*Smin))/log(B/(B-1))+B, # of RTT from Wmax-B to Wmax"); 47 48 48 - 49 49 /* BIC TCP Parameters */ 50 50 struct bictcp { 51 51 u32 cnt; /* increase cwnd by 1 after ACKs */ 52 - u32 last_max_cwnd; /* last maximum snd_cwnd */ 52 + u32 last_max_cwnd; /* last maximum snd_cwnd */ 53 53 u32 loss_cwnd; /* congestion window at last loss */ 54 54 u32 last_cwnd; /* the last snd_cwnd */ 55 55 u32 last_time; /* time when updated last_cwnd */ ··· 101 103 102 104 /* binary increase */ 103 105 if (cwnd < ca->last_max_cwnd) { 104 - __u32 dist = (ca->last_max_cwnd - cwnd) 106 + __u32 dist = (ca->last_max_cwnd - cwnd) 105 107 / BICTCP_B; 106 108 107 109 if (dist > max_increment) ··· 152 154 bictcp_update(ca, tp->snd_cwnd); 153 155 tcp_cong_avoid_ai(tp, ca->cnt); 154 156 } 155 - 156 157 } 157 158 158 159 /* ··· 174 177 175 178 ca->loss_cwnd = tp->snd_cwnd; 176 179 177 - 178 180 if (tp->snd_cwnd <= low_window) 179 181 return max(tp->snd_cwnd >> 1U, 2U); 180 182 else ··· 184 188 { 185 189 const struct tcp_sock *tp = tcp_sk(sk); 186 190 const struct bictcp *ca = inet_csk_ca(sk); 191 + 187 192 return max(tp->snd_cwnd, ca->loss_cwnd); 188 193 } 189 194 ··· 203 206 204 207 if (icsk->icsk_ca_state == TCP_CA_Open) { 205 208 struct bictcp *ca = inet_csk_ca(sk); 209 + 206 210 cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; 207 211 ca->delayed_ack += cnt; 208 212 } 209 213 } 210 - 211 214 212 215 static struct tcp_congestion_ops bictcp __read_mostly = { 213 216 .init = bictcp_init,
+1 -4
net/ipv4/tcp_cong.c
··· 142 142 } 143 143 late_initcall(tcp_congestion_default); 144 144 145 - 146 145 /* Build string with list of available congestion control values */ 147 146 void tcp_get_available_congestion_control(char *buf, size_t maxlen) 148 147 { ··· 153 154 offs += snprintf(buf + offs, maxlen - offs, 154 155 "%s%s", 155 156 offs == 0 ? "" : " ", ca->name); 156 - 157 157 } 158 158 rcu_read_unlock(); 159 159 } ··· 184 186 offs += snprintf(buf + offs, maxlen - offs, 185 187 "%s%s", 186 188 offs == 0 ? "" : " ", ca->name); 187 - 188 189 } 189 190 rcu_read_unlock(); 190 191 } ··· 226 229 227 230 return ret; 228 231 } 229 - 230 232 231 233 /* Change congestion control for socket */ 232 234 int tcp_set_congestion_control(struct sock *sk, const char *name) ··· 333 337 u32 tcp_reno_ssthresh(struct sock *sk) 334 338 { 335 339 const struct tcp_sock *tp = tcp_sk(sk); 340 + 336 341 return max(tp->snd_cwnd >> 1U, 2U); 337 342 } 338 343 EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
+10 -8
net/ipv4/tcp_cubic.c
··· 82 82 /* BIC TCP Parameters */ 83 83 struct bictcp { 84 84 u32 cnt; /* increase cwnd by 1 after ACKs */ 85 - u32 last_max_cwnd; /* last maximum snd_cwnd */ 85 + u32 last_max_cwnd; /* last maximum snd_cwnd */ 86 86 u32 loss_cwnd; /* congestion window at last loss */ 87 87 u32 last_cwnd; /* the last snd_cwnd */ 88 88 u32 last_time; /* time when updated last_cwnd */ 89 89 u32 bic_origin_point;/* origin point of bic function */ 90 - u32 bic_K; /* time to origin point from the beginning of the current epoch */ 90 + u32 bic_K; /* time to origin point 91 + from the beginning of the current epoch */ 91 92 u32 delay_min; /* min delay (msec << 3) */ 92 93 u32 epoch_start; /* beginning of an epoch */ 93 94 u32 ack_cnt; /* number of acks */ ··· 220 219 ca->last_time = tcp_time_stamp; 221 220 222 221 if (ca->epoch_start == 0) { 223 - ca->epoch_start = tcp_time_stamp; /* record the beginning of an epoch */ 222 + ca->epoch_start = tcp_time_stamp; /* record beginning */ 224 223 ca->ack_cnt = 1; /* start counting */ 225 224 ca->tcp_cwnd = cwnd; /* syn with cubic */ 226 225 ··· 264 263 265 264 /* c/rtt * (t-K)^3 */ 266 265 delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ); 267 - if (t < ca->bic_K) /* below origin*/ 266 + if (t < ca->bic_K) /* below origin*/ 268 267 bic_target = ca->bic_origin_point - delta; 269 - else /* above origin*/ 268 + else /* above origin*/ 270 269 bic_target = ca->bic_origin_point + delta; 271 270 272 271 /* cubic function - calc bictcp_cnt*/ ··· 286 285 /* TCP Friendly */ 287 286 if (tcp_friendliness) { 288 287 u32 scale = beta_scale; 288 + 289 289 delta = (cwnd * scale) >> 3; 290 290 while (ca->ack_cnt > delta) { /* update tcp cwnd */ 291 291 ca->ack_cnt -= delta; 292 292 ca->tcp_cwnd++; 293 293 } 294 294 295 - if (ca->tcp_cwnd > cwnd){ /* if bic is slower than tcp */ 295 + if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */ 296 296 delta = ca->tcp_cwnd - cwnd; 297 297 max_cnt = cwnd / delta; 298 298 if (ca->cnt > max_cnt) ··· 322 320 bictcp_update(ca, tp->snd_cwnd); 323 321 tcp_cong_avoid_ai(tp, ca->cnt); 324 322 } 325 - 326 323 } 327 324 328 325 static u32 bictcp_recalc_ssthresh(struct sock *sk) ··· 453 452 * based on SRTT of 100ms 454 453 */ 455 454 456 - beta_scale = 8*(BICTCP_BETA_SCALE+beta)/ 3 / (BICTCP_BETA_SCALE - beta); 455 + beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3 456 + / (BICTCP_BETA_SCALE - beta); 457 457 458 458 cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */ 459 459
+2 -3
net/ipv4/tcp_diag.c
··· 9 9 * 2 of the License, or (at your option) any later version. 10 10 */ 11 11 12 - 13 12 #include <linux/module.h> 14 13 #include <linux/inet_diag.h> 15 14 ··· 34 35 } 35 36 36 37 static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, 37 - struct inet_diag_req_v2 *r, struct nlattr *bc) 38 + struct inet_diag_req_v2 *r, struct nlattr *bc) 38 39 { 39 40 inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc); 40 41 } 41 42 42 43 static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, 43 - struct inet_diag_req_v2 *req) 44 + struct inet_diag_req_v2 *req) 44 45 { 45 46 return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req); 46 47 }
+72 -73
net/ipv4/tcp_highspeed.c
··· 9 9 #include <linux/module.h> 10 10 #include <net/tcp.h> 11 11 12 - 13 12 /* From AIMD tables from RFC 3649 appendix B, 14 13 * with fixed-point MD scaled <<8. 15 14 */ ··· 16 17 unsigned int cwnd; 17 18 unsigned int md; 18 19 } hstcp_aimd_vals[] = { 19 - { 38, 128, /* 0.50 */ }, 20 - { 118, 112, /* 0.44 */ }, 21 - { 221, 104, /* 0.41 */ }, 22 - { 347, 98, /* 0.38 */ }, 23 - { 495, 93, /* 0.37 */ }, 24 - { 663, 89, /* 0.35 */ }, 25 - { 851, 86, /* 0.34 */ }, 26 - { 1058, 83, /* 0.33 */ }, 27 - { 1284, 81, /* 0.32 */ }, 28 - { 1529, 78, /* 0.31 */ }, 29 - { 1793, 76, /* 0.30 */ }, 30 - { 2076, 74, /* 0.29 */ }, 31 - { 2378, 72, /* 0.28 */ }, 32 - { 2699, 71, /* 0.28 */ }, 33 - { 3039, 69, /* 0.27 */ }, 34 - { 3399, 68, /* 0.27 */ }, 35 - { 3778, 66, /* 0.26 */ }, 36 - { 4177, 65, /* 0.26 */ }, 37 - { 4596, 64, /* 0.25 */ }, 38 - { 5036, 62, /* 0.25 */ }, 39 - { 5497, 61, /* 0.24 */ }, 40 - { 5979, 60, /* 0.24 */ }, 41 - { 6483, 59, /* 0.23 */ }, 42 - { 7009, 58, /* 0.23 */ }, 43 - { 7558, 57, /* 0.22 */ }, 44 - { 8130, 56, /* 0.22 */ }, 45 - { 8726, 55, /* 0.22 */ }, 46 - { 9346, 54, /* 0.21 */ }, 47 - { 9991, 53, /* 0.21 */ }, 48 - { 10661, 52, /* 0.21 */ }, 49 - { 11358, 52, /* 0.20 */ }, 50 - { 12082, 51, /* 0.20 */ }, 51 - { 12834, 50, /* 0.20 */ }, 52 - { 13614, 49, /* 0.19 */ }, 53 - { 14424, 48, /* 0.19 */ }, 54 - { 15265, 48, /* 0.19 */ }, 55 - { 16137, 47, /* 0.19 */ }, 56 - { 17042, 46, /* 0.18 */ }, 57 - { 17981, 45, /* 0.18 */ }, 58 - { 18955, 45, /* 0.18 */ }, 59 - { 19965, 44, /* 0.17 */ }, 60 - { 21013, 43, /* 0.17 */ }, 61 - { 22101, 43, /* 0.17 */ }, 62 - { 23230, 42, /* 0.17 */ }, 63 - { 24402, 41, /* 0.16 */ }, 64 - { 25618, 41, /* 0.16 */ }, 65 - { 26881, 40, /* 0.16 */ }, 66 - { 28193, 39, /* 0.16 */ }, 67 - { 29557, 39, /* 0.15 */ }, 68 - { 30975, 38, /* 0.15 */ }, 69 - { 32450, 38, /* 0.15 */ }, 70 - { 33986, 37, /* 0.15 */ }, 71 - { 35586, 36, /* 0.14 */ }, 72 - { 37253, 36, /* 0.14 */ }, 73 - { 38992, 35, /* 0.14 */ }, 74 - { 40808, 35, /* 0.14 */ }, 75 - { 42707, 34, /* 0.13 */ }, 76 - { 44694, 33, /* 0.13 */ }, 77 - { 46776, 33, /* 0.13 */ }, 78 - { 48961, 32, /* 0.13 */ }, 79 - { 51258, 32, /* 0.13 */ }, 80 - { 53677, 31, /* 0.12 */ }, 81 - { 56230, 30, /* 0.12 */ }, 82 - { 58932, 30, /* 0.12 */ }, 83 - { 61799, 29, /* 0.12 */ }, 84 - { 64851, 28, /* 0.11 */ }, 85 - { 68113, 28, /* 0.11 */ }, 86 - { 71617, 27, /* 0.11 */ }, 87 - { 75401, 26, /* 0.10 */ }, 88 - { 79517, 26, /* 0.10 */ }, 89 - { 84035, 25, /* 0.10 */ }, 90 - { 89053, 24, /* 0.10 */ }, 20 + { 38, 128, /* 0.50 */ }, 21 + { 118, 112, /* 0.44 */ }, 22 + { 221, 104, /* 0.41 */ }, 23 + { 347, 98, /* 0.38 */ }, 24 + { 495, 93, /* 0.37 */ }, 25 + { 663, 89, /* 0.35 */ }, 26 + { 851, 86, /* 0.34 */ }, 27 + { 1058, 83, /* 0.33 */ }, 28 + { 1284, 81, /* 0.32 */ }, 29 + { 1529, 78, /* 0.31 */ }, 30 + { 1793, 76, /* 0.30 */ }, 31 + { 2076, 74, /* 0.29 */ }, 32 + { 2378, 72, /* 0.28 */ }, 33 + { 2699, 71, /* 0.28 */ }, 34 + { 3039, 69, /* 0.27 */ }, 35 + { 3399, 68, /* 0.27 */ }, 36 + { 3778, 66, /* 0.26 */ }, 37 + { 4177, 65, /* 0.26 */ }, 38 + { 4596, 64, /* 0.25 */ }, 39 + { 5036, 62, /* 0.25 */ }, 40 + { 5497, 61, /* 0.24 */ }, 41 + { 5979, 60, /* 0.24 */ }, 42 + { 6483, 59, /* 0.23 */ }, 43 + { 7009, 58, /* 0.23 */ }, 44 + { 7558, 57, /* 0.22 */ }, 45 + { 8130, 56, /* 0.22 */ }, 46 + { 8726, 55, /* 0.22 */ }, 47 + { 9346, 54, /* 0.21 */ }, 48 + { 9991, 53, /* 0.21 */ }, 49 + { 10661, 52, /* 0.21 */ }, 50 + { 11358, 52, /* 0.20 */ }, 51 + { 12082, 51, /* 0.20 */ }, 52 + { 12834, 50, /* 0.20 */ }, 53 + { 13614, 49, /* 0.19 */ }, 54 + { 14424, 48, /* 0.19 */ }, 55 + { 15265, 48, /* 0.19 */ }, 56 + { 16137, 47, /* 0.19 */ }, 57 + { 17042, 46, /* 0.18 */ }, 58 + { 17981, 45, /* 0.18 */ }, 59 + { 18955, 45, /* 0.18 */ }, 60 + { 19965, 44, /* 0.17 */ }, 61 + { 21013, 43, /* 0.17 */ }, 62 + { 22101, 43, /* 0.17 */ }, 63 + { 23230, 42, /* 0.17 */ }, 64 + { 24402, 41, /* 0.16 */ }, 65 + { 25618, 41, /* 0.16 */ }, 66 + { 26881, 40, /* 0.16 */ }, 67 + { 28193, 39, /* 0.16 */ }, 68 + { 29557, 39, /* 0.15 */ }, 69 + { 30975, 38, /* 0.15 */ }, 70 + { 32450, 38, /* 0.15 */ }, 71 + { 33986, 37, /* 0.15 */ }, 72 + { 35586, 36, /* 0.14 */ }, 73 + { 37253, 36, /* 0.14 */ }, 74 + { 38992, 35, /* 0.14 */ }, 75 + { 40808, 35, /* 0.14 */ }, 76 + { 42707, 34, /* 0.13 */ }, 77 + { 44694, 33, /* 0.13 */ }, 78 + { 46776, 33, /* 0.13 */ }, 79 + { 48961, 32, /* 0.13 */ }, 80 + { 51258, 32, /* 0.13 */ }, 81 + { 53677, 31, /* 0.12 */ }, 82 + { 56230, 30, /* 0.12 */ }, 83 + { 58932, 30, /* 0.12 */ }, 84 + { 61799, 29, /* 0.12 */ }, 85 + { 64851, 28, /* 0.11 */ }, 86 + { 68113, 28, /* 0.11 */ }, 87 + { 71617, 27, /* 0.11 */ }, 88 + { 75401, 26, /* 0.10 */ }, 89 + { 79517, 26, /* 0.10 */ }, 90 + { 84035, 25, /* 0.10 */ }, 91 + { 89053, 24, /* 0.10 */ }, 91 92 }; 92 93 93 94 #define HSTCP_AIMD_MAX ARRAY_SIZE(hstcp_aimd_vals)
+4 -2
net/ipv4/tcp_htcp.c
··· 98 98 } 99 99 } 100 100 101 - static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, s32 rtt) 101 + static void measure_achieved_throughput(struct sock *sk, 102 + u32 pkts_acked, s32 rtt) 102 103 { 103 104 const struct inet_connection_sock *icsk = inet_csk(sk); 104 105 const struct tcp_sock *tp = tcp_sk(sk); ··· 149 148 if (use_bandwidth_switch) { 150 149 u32 maxB = ca->maxB; 151 150 u32 old_maxB = ca->old_maxB; 152 - ca->old_maxB = ca->maxB; 153 151 152 + ca->old_maxB = ca->maxB; 154 153 if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) { 155 154 ca->beta = BETA_MIN; 156 155 ca->modeswitch = 0; ··· 271 270 case TCP_CA_Open: 272 271 { 273 272 struct htcp *ca = inet_csk_ca(sk); 273 + 274 274 if (ca->undo_last_cong) { 275 275 ca->last_cong = jiffies; 276 276 ca->undo_last_cong = 0;
-1
net/ipv4/tcp_hybla.c
··· 29 29 module_param(rtt0, int, 0644); 30 30 MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)"); 31 31 32 - 33 32 /* This is called to refresh values for hybla parameters */ 34 33 static inline void hybla_recalc_param (struct sock *sk) 35 34 {
+1 -2
net/ipv4/tcp_illinois.c
··· 284 284 delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT; 285 285 if (delta >= tp->snd_cwnd) { 286 286 tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd, 287 - (u32) tp->snd_cwnd_clamp); 287 + (u32)tp->snd_cwnd_clamp); 288 288 tp->snd_cwnd_cnt = 0; 289 289 } 290 290 } ··· 298 298 /* Multiplicative decrease */ 299 299 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U); 300 300 } 301 - 302 301 303 302 /* Extract info for Tcp socket info provided via netlink. */ 304 303 static void tcp_illinois_info(struct sock *sk, u32 ext,
+2 -3
net/ipv4/tcp_ipv4.c
··· 90 90 int sysctl_tcp_low_latency __read_mostly; 91 91 EXPORT_SYMBOL(sysctl_tcp_low_latency); 92 92 93 - 94 93 #ifdef CONFIG_TCP_MD5SIG 95 94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, 96 95 __be32 daddr, __be32 saddr, const struct tcphdr *th); ··· 1268 1269 .send_ack = tcp_v4_reqsk_send_ack, 1269 1270 .destructor = tcp_v4_reqsk_destructor, 1270 1271 .send_reset = tcp_v4_send_reset, 1271 - .syn_ack_timeout = tcp_syn_ack_timeout, 1272 + .syn_ack_timeout = tcp_syn_ack_timeout, 1272 1273 }; 1273 1274 1274 1275 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { ··· 2182 2183 2183 2184 s = ((struct seq_file *)file->private_data)->private; 2184 2185 s->family = afinfo->family; 2185 - s->last_pos = 0; 2186 + s->last_pos = 0; 2186 2187 return 0; 2187 2188 } 2188 2189 EXPORT_SYMBOL(tcp_seq_open);
+2 -4
net/ipv4/tcp_probe.c
··· 83 83 struct tcp_log *log; 84 84 } tcp_probe; 85 85 86 - 87 86 static inline int tcp_probe_used(void) 88 87 { 89 88 return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1); ··· 99 100 si4.sin_port = inet->inet_##mem##port; \ 100 101 si4.sin_addr.s_addr = inet->inet_##mem##addr; \ 101 102 } while (0) \ 102 - 103 103 104 104 /* 105 105 * Hook inserted to be called before each receive packet. ··· 192 194 193 195 return scnprintf(tbuf, n, 194 196 "%lu.%09lu %pISpc %pISpc %d %#x %#x %u %u %u %u %u\n", 195 - (unsigned long) tv.tv_sec, 196 - (unsigned long) tv.tv_nsec, 197 + (unsigned long)tv.tv_sec, 198 + (unsigned long)tv.tv_nsec, 197 199 &p->src, &p->dst, p->length, p->snd_nxt, p->snd_una, 198 200 p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd); 199 201 }
+1 -1
net/ipv4/tcp_scalable.c
··· 31 31 static u32 tcp_scalable_ssthresh(struct sock *sk) 32 32 { 33 33 const struct tcp_sock *tp = tcp_sk(sk); 34 + 34 35 return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); 35 36 } 36 - 37 37 38 38 static struct tcp_congestion_ops tcp_scalable __read_mostly = { 39 39 .ssthresh = tcp_scalable_ssthresh,
-3
net/ipv4/tcp_vegas.c
··· 51 51 module_param(gamma, int, 0644); 52 52 MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)"); 53 53 54 - 55 54 /* There are several situations when we must "re-start" Vegas: 56 55 * 57 56 * o when a connection is established ··· 132 133 133 134 void tcp_vegas_state(struct sock *sk, u8 ca_state) 134 135 { 135 - 136 136 if (ca_state == TCP_CA_Open) 137 137 vegas_enable(sk); 138 138 else ··· 283 285 /* Use normal slow start */ 284 286 else if (tp->snd_cwnd <= tp->snd_ssthresh) 285 287 tcp_slow_start(tp, acked); 286 - 287 288 } 288 289 289 290 /* Extract info for Tcp socket info provided via netlink. */
-1
net/ipv4/tcp_veno.c
··· 175 175 } else 176 176 tp->snd_cwnd_cnt++; 177 177 } 178 - 179 178 } 180 179 if (tp->snd_cwnd < 2) 181 180 tp->snd_cwnd = 2;
+2 -5
net/ipv4/tcp_westwood.c
··· 42 42 u8 reset_rtt_min; /* Reset RTT min to next RTT sample*/ 43 43 }; 44 44 45 - 46 45 /* TCP Westwood functions and constants */ 47 46 #define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */ 48 47 #define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */ ··· 152 153 w->rtt_min = min(w->rtt, w->rtt_min); 153 154 } 154 155 155 - 156 156 /* 157 157 * @westwood_fast_bw 158 158 * It is called when we are in fast path. In particular it is called when ··· 206 208 return w->cumul_ack; 207 209 } 208 210 209 - 210 211 /* 211 212 * TCP Westwood 212 213 * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it ··· 216 219 { 217 220 const struct tcp_sock *tp = tcp_sk(sk); 218 221 const struct westwood *w = inet_csk_ca(sk); 222 + 219 223 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2); 220 224 } 221 225 ··· 252 254 } 253 255 } 254 256 255 - 256 257 /* Extract info for Tcp socket info provided via netlink. */ 257 258 static void tcp_westwood_info(struct sock *sk, u32 ext, 258 259 struct sk_buff *skb) 259 260 { 260 261 const struct westwood *ca = inet_csk_ca(sk); 262 + 261 263 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { 262 264 struct tcpvegas_info info = { 263 265 .tcpv_enabled = 1, ··· 268 270 nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); 269 271 } 270 272 } 271 - 272 273 273 274 static struct tcp_congestion_ops tcp_westwood __read_mostly = { 274 275 .init = tcp_westwood_init,
+3 -6
net/ipv4/tcp_yeah.c
··· 54 54 /* Ensure the MD arithmetic works. This is somewhat pedantic, 55 55 * since I don't think we will see a cwnd this large. :) */ 56 56 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); 57 - 58 57 } 59 - 60 58 61 59 static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us) 62 60 { ··· 82 84 /* Scalable */ 83 85 84 86 tp->snd_cwnd_cnt += yeah->pkts_acked; 85 - if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){ 87 + if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)) { 86 88 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 87 89 tp->snd_cwnd++; 88 90 tp->snd_cwnd_cnt = 0; ··· 118 120 */ 119 121 120 122 if (after(ack, yeah->vegas.beg_snd_nxt)) { 121 - 122 123 /* We do the Vegas calculations only if we got enough RTT 123 124 * samples that we can be reasonably sure that we got 124 125 * at least one RTT sample that wasn't from a delayed ACK. ··· 186 189 } 187 190 188 191 yeah->lastQ = queue; 189 - 190 192 } 191 193 192 194 /* Save the extent of the current window so we can use this ··· 201 205 } 202 206 } 203 207 204 - static u32 tcp_yeah_ssthresh(struct sock *sk) { 208 + static u32 tcp_yeah_ssthresh(struct sock *sk) 209 + { 205 210 const struct tcp_sock *tp = tcp_sk(sk); 206 211 struct yeah *yeah = inet_csk_ca(sk); 207 212 u32 reduction;