Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (22 commits)
[IPCONFIG]: The kernel gets no IP from some DHCP servers
b43legacy: Fix module init message
rndis_wlan: fix broken data copy
libertas: compare the current command with response
libertas: fix sanity check on sequence number in command response
p54: fix eeprom parser length sanity checks
p54: fix EEPROM structure endianness
ssb: Add pcibios_enable_device() return value check
rc80211-pid: fix rate adjustment
[ESP]: Add select on AUTHENC
[TCP]: Improve ipv4 established hash function.
[NETPOLL]: Revert two bogus cleanups that broke netconsole.
[PPPOL2TP]: Add missing sock_put() in pppol2tp_tunnel_closeall()
Subject: [PPPOL2TP] add missing sock_put() in pppol2tp_recv_dequeue()
[BLUETOOTH]: l2cap info_timer delete fix in hci_conn_del
[NET]: Fix race in generic address resolution.
iucv: fix build error on !SMP
[TCP]: Must count fack_count also when skipping
[TUN]: Fix RTNL-locking in tun/tap driver
[SCTP]: Use proc_create to setup de->proc_fops.
...

+127 -115
+7 -1
drivers/net/pppol2tp.c
··· 455 455 skb_queue_len(&session->reorder_q)); 456 456 __skb_unlink(skb, &session->reorder_q); 457 457 kfree_skb(skb); 458 + sock_put(session->sock); 458 459 continue; 459 460 } 460 461 ··· 1111 1110 for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) { 1112 1111 again: 1113 1112 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { 1113 + struct sk_buff *skb; 1114 + 1114 1115 session = hlist_entry(walk, struct pppol2tp_session, hlist); 1115 1116 1116 1117 sk = session->sock; ··· 1141 1138 /* Purge any queued data */ 1142 1139 skb_queue_purge(&sk->sk_receive_queue); 1143 1140 skb_queue_purge(&sk->sk_write_queue); 1144 - skb_queue_purge(&session->reorder_q); 1141 + while ((skb = skb_dequeue(&session->reorder_q))) { 1142 + kfree_skb(skb); 1143 + sock_put(sk); 1144 + } 1145 1145 1146 1146 release_sock(sk); 1147 1147 sock_put(sk);
+5 -1
drivers/net/tun.c
··· 663 663 case SIOCSIFHWADDR: 664 664 { 665 665 /* try to set the actual net device's hw address */ 666 - int ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 666 + int ret; 667 + 668 + rtnl_lock(); 669 + ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 670 + rtnl_unlock(); 667 671 668 672 if (ret == 0) { 669 673 /** Set the character device's hardware address. This is used when
+1 -1
drivers/net/wireless/b43legacy/main.c
··· 3829 3829 #ifdef CONFIG_B43LEGACY_DMA 3830 3830 feat_dma = "D"; 3831 3831 #endif 3832 - printk(KERN_INFO "Broadcom 43xx driver loaded " 3832 + printk(KERN_INFO "Broadcom 43xx-legacy driver loaded " 3833 3833 "[ Features: %s%s%s%s%s, Firmware-ID: " 3834 3834 B43legacy_SUPPORTED_FIRMWARE_ID " ]\n", 3835 3835 feat_pci, feat_leds, feat_rfkill, feat_pio, feat_dma);
+3 -5
drivers/net/wireless/libertas/cmdresp.c
··· 562 562 } 563 563 564 564 resp = (void *)priv->upld_buf; 565 - 566 - curcmd = le16_to_cpu(resp->command); 567 - 565 + curcmd = le16_to_cpu(priv->cur_cmd->cmdbuf->command); 568 566 respcmd = le16_to_cpu(resp->command); 569 567 result = le16_to_cpu(resp->result); 570 568 ··· 570 572 respcmd, le16_to_cpu(resp->seqnum), priv->upld_len, jiffies); 571 573 lbs_deb_hex(LBS_DEB_HOST, "CMD_RESP", (void *) resp, priv->upld_len); 572 574 573 - if (resp->seqnum != resp->seqnum) { 575 + if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) { 574 576 lbs_pr_info("Received CMD_RESP with invalid sequence %d (expected %d)\n", 575 - le16_to_cpu(resp->seqnum), le16_to_cpu(resp->seqnum)); 577 + le16_to_cpu(resp->seqnum), le16_to_cpu(priv->cur_cmd->cmdbuf->seqnum)); 576 578 spin_unlock_irqrestore(&priv->driver_lock, flags); 577 579 ret = -1; 578 580 goto done;
+12 -8
drivers/net/wireless/p54common.c
··· 166 166 struct p54_common *priv = dev->priv; 167 167 struct eeprom_pda_wrap *wrap = NULL; 168 168 struct pda_entry *entry; 169 - int i = 0; 170 169 unsigned int data_len, entry_len; 171 170 void *tmp; 172 171 int err; 172 + u8 *end = (u8 *)eeprom + len; 173 173 174 174 wrap = (struct eeprom_pda_wrap *) eeprom; 175 - entry = (void *)wrap->data + wrap->len; 176 - i += 2; 177 - i += le16_to_cpu(entry->len)*2; 178 - while (i < len) { 175 + entry = (void *)wrap->data + le16_to_cpu(wrap->len); 176 + 177 + /* verify that at least the entry length/code fits */ 178 + while ((u8 *)entry <= end - sizeof(*entry)) { 179 179 entry_len = le16_to_cpu(entry->len); 180 180 data_len = ((entry_len - 1) << 1); 181 + 182 + /* abort if entry exceeds whole structure */ 183 + if ((u8 *)entry + sizeof(*entry) + data_len > end) 184 + break; 185 + 181 186 switch (le16_to_cpu(entry->code)) { 182 187 case PDR_MAC_ADDRESS: 183 188 SET_IEEE80211_PERM_ADDR(dev, entry->data); ··· 254 249 priv->version = *(u8 *)(entry->data + 1); 255 250 break; 256 251 case PDR_END: 257 - i = len; 252 + /* make it overrun */ 253 + entry_len = len; 258 254 break; 259 255 } 260 256 261 257 entry = (void *)entry + (entry_len + 1)*2; 262 - i += 2; 263 - i += entry_len*2; 264 258 } 265 259 266 260 if (!priv->iq_autocal || !priv->output_limit || !priv->curve_data) {
+4 -4
drivers/net/wireless/p54common.h
··· 53 53 } __attribute__ ((packed)); 54 54 55 55 struct eeprom_pda_wrap { 56 - u32 magic; 57 - u16 pad; 58 - u16 len; 59 - u32 arm_opcode; 56 + __le32 magic; 57 + __le16 pad; 58 + __le16 len; 59 + __le32 arm_opcode; 60 60 u8 data[0]; 61 61 } __attribute__ ((packed)); 62 62
+3 -5
drivers/net/wireless/rndis_wlan.c
··· 260 260 __le32 KeyLength; 261 261 u8 Bssid[6]; 262 262 u8 Padding[6]; 263 - __le64 KeyRSC; 263 + u8 KeyRSC[8]; 264 264 u8 KeyMaterial[32]; 265 265 } __attribute__((packed)); 266 266 ··· 1508 1508 struct usbnet *usbdev = dev->priv; 1509 1509 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1510 1510 struct NDIS_802_11_KEY ndis_key; 1511 - int i, keyidx, ret; 1511 + int keyidx, ret; 1512 1512 u8 *addr; 1513 1513 1514 1514 keyidx = wrqu->encoding.flags & IW_ENCODE_INDEX; ··· 1543 1543 ndis_key.KeyIndex = cpu_to_le32(keyidx); 1544 1544 1545 1545 if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { 1546 - for (i = 0; i < 6; i++) 1547 - ndis_key.KeyRSC |= 1548 - cpu_to_le64(ext->rx_seq[i] << (i * 8)); 1546 + memcpy(ndis_key.KeyRSC, ext->rx_seq, 6); 1549 1547 ndis_key.KeyIndex |= cpu_to_le32(1 << 29); 1550 1548 } 1551 1549
+4 -1
drivers/ssb/driver_pcicore.c
··· 111 111 112 112 /* Enable PCI bridge bus mastering and memory space */ 113 113 pci_set_master(dev); 114 - pcibios_enable_device(dev, ~0); 114 + if (pcibios_enable_device(dev, ~0) < 0) { 115 + ssb_printk(KERN_ERR "PCI: SSB bridge enable failed\n"); 116 + return; 117 + } 115 118 116 119 /* Enable PCI bridge BAR1 prefetch and burst */ 117 120 pci_write_config_dword(dev, SSB_BAR1_CONTROL, 3);
+4 -3
include/linux/netpoll.h
··· 25 25 26 26 struct netpoll_info { 27 27 atomic_t refcnt; 28 + int rx_flags; 28 29 spinlock_t rx_lock; 29 30 struct netpoll *rx_np; /* netpoll that registered an rx_hook */ 30 31 struct sk_buff_head arp_tx; /* list of arp requests to reply to */ ··· 51 50 unsigned long flags; 52 51 int ret = 0; 53 52 54 - if (!npinfo || !npinfo->rx_np) 53 + if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags)) 55 54 return 0; 56 55 57 56 spin_lock_irqsave(&npinfo->rx_lock, flags); 58 - /* check rx_np again with the lock held */ 59 - if (npinfo->rx_np && __netpoll_rx(skb)) 57 + /* check rx_flags again with the lock held */ 58 + if (npinfo->rx_flags && __netpoll_rx(skb)) 60 59 ret = 1; 61 60 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 62 61
+2 -1
include/net/inet_sock.h
··· 175 175 static inline unsigned int inet_ehashfn(const __be32 laddr, const __u16 lport, 176 176 const __be32 faddr, const __be16 fport) 177 177 { 178 - return jhash_2words((__force __u32) laddr ^ (__force __u32) faddr, 178 + return jhash_3words((__force __u32) laddr, 179 + (__force __u32) faddr, 179 180 ((__u32) lport) << 16 | (__force __u32)fport, 180 181 inet_ehash_secret); 181 182 }
+2 -1
net/bluetooth/l2cap.c
··· 417 417 l2cap_sock_kill(sk); 418 418 } 419 419 420 - del_timer_sync(&conn->info_timer); 420 + if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) 421 + del_timer_sync(&conn->info_timer); 421 422 422 423 hcon->l2cap_data = NULL; 423 424 kfree(conn);
+1 -1
net/core/neighbour.c
··· 839 839 struct sk_buff *skb = skb_peek(&neigh->arp_queue); 840 840 /* keep skb alive even if arp_queue overflows */ 841 841 if (skb) 842 - skb_get(skb); 842 + skb = skb_copy(skb, GFP_ATOMIC); 843 843 write_unlock(&neigh->lock); 844 844 neigh->ops->solicit(neigh, skb); 845 845 atomic_inc(&neigh->probes);
+8 -4
net/core/netpoll.c
··· 39 39 static atomic_t trapped; 40 40 41 41 #define USEC_PER_POLL 50 42 + #define NETPOLL_RX_ENABLED 1 43 + #define NETPOLL_RX_DROP 2 42 44 43 45 #define MAX_SKB_SIZE \ 44 46 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ ··· 128 126 if (!test_bit(NAPI_STATE_SCHED, &napi->state)) 129 127 return budget; 130 128 129 + npinfo->rx_flags |= NETPOLL_RX_DROP; 131 130 atomic_inc(&trapped); 132 131 133 132 work = napi->poll(napi, budget); 134 133 135 134 atomic_dec(&trapped); 135 + npinfo->rx_flags &= ~NETPOLL_RX_DROP; 136 136 137 137 return budget - work; 138 138 } ··· 476 472 if (skb->dev->type != ARPHRD_ETHER) 477 473 goto out; 478 474 479 - /* if receive ARP during middle of NAPI poll, then queue */ 475 + /* check if netpoll clients need ARP */ 480 476 if (skb->protocol == htons(ETH_P_ARP) && 481 477 atomic_read(&trapped)) { 482 478 skb_queue_tail(&npi->arp_tx, skb); ··· 538 534 return 1; 539 535 540 536 out: 541 - /* If packet received while already in poll then just 542 - * silently drop. 543 - */ 544 537 if (atomic_read(&trapped)) { 545 538 kfree_skb(skb); 546 539 return 1; ··· 676 675 goto release; 677 676 } 678 677 678 + npinfo->rx_flags = 0; 679 679 npinfo->rx_np = NULL; 680 680 681 681 spin_lock_init(&npinfo->rx_lock); ··· 758 756 759 757 if (np->rx_hook) { 760 758 spin_lock_irqsave(&npinfo->rx_lock, flags); 759 + npinfo->rx_flags |= NETPOLL_RX_ENABLED; 761 760 npinfo->rx_np = np; 762 761 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 763 762 } ··· 800 797 if (npinfo->rx_np == np) { 801 798 spin_lock_irqsave(&npinfo->rx_lock, flags); 802 799 npinfo->rx_np = NULL; 800 + npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; 803 801 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 804 802 } 805 803
+1 -1
net/ipv4/Kconfig
··· 343 343 tristate "IP: ESP transformation" 344 344 select XFRM 345 345 select CRYPTO 346 - select CRYPTO_AEAD 346 + select CRYPTO_AUTHENC 347 347 select CRYPTO_HMAC 348 348 select CRYPTO_MD5 349 349 select CRYPTO_CBC
+2 -2
net/ipv4/ipconfig.c
··· 753 753 printk("Unknown ARP type 0x%04x for device %s\n", dev->type, dev->name); 754 754 b->htype = dev->type; /* can cause undefined behavior */ 755 755 } 756 + 757 + /* server_ip and your_ip address are both already zero per RFC2131 */ 756 758 b->hlen = dev->addr_len; 757 - b->your_ip = NONE; 758 - b->server_ip = NONE; 759 759 memcpy(b->hw_addr, dev->dev_addr, dev->addr_len); 760 760 b->secs = htons(jiffies_diff / HZ); 761 761 b->xid = d->xid;
+3 -2
net/ipv4/tcp_bic.c
··· 1 1 /* 2 2 * Binary Increase Congestion control for TCP 3 - * 3 + * Home page: 4 + * http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC 4 5 * This is from the implementation of BICTCP in 5 6 * Lison-Xu, Kahaled Harfoush, and Injong Rhee. 6 7 * "Binary Increase Congestion Control for Fast, Long Distance 7 8 * Networks" in InfoComm 2004 8 9 * Available from: 9 - * http://www.csc.ncsu.edu/faculty/rhee/export/bitcp.pdf 10 + * http://netsrv.csc.ncsu.edu/export/bitcp.pdf 10 11 * 11 12 * Unless BIC is enabled and congestion window is large 12 13 * this behaves the same as the original Reno.
+9 -5
net/ipv4/tcp_input.c
··· 1367 1367 * a normal way 1368 1368 */ 1369 1369 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, 1370 - u32 skip_to_seq) 1370 + u32 skip_to_seq, int *fack_count) 1371 1371 { 1372 1372 tcp_for_write_queue_from(skb, sk) { 1373 1373 if (skb == tcp_send_head(sk)) ··· 1375 1375 1376 1376 if (!before(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) 1377 1377 break; 1378 + 1379 + *fack_count += tcp_skb_pcount(skb); 1378 1380 } 1379 1381 return skb; 1380 1382 } ··· 1392 1390 return skb; 1393 1391 1394 1392 if (before(next_dup->start_seq, skip_to_seq)) { 1395 - skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq); 1393 + skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count); 1396 1394 tcp_sacktag_walk(skb, sk, NULL, 1397 1395 next_dup->start_seq, next_dup->end_seq, 1398 1396 1, fack_count, reord, flag); ··· 1539 1537 1540 1538 /* Head todo? */ 1541 1539 if (before(start_seq, cache->start_seq)) { 1542 - skb = tcp_sacktag_skip(skb, sk, start_seq); 1540 + skb = tcp_sacktag_skip(skb, sk, start_seq, 1541 + &fack_count); 1543 1542 skb = tcp_sacktag_walk(skb, sk, next_dup, 1544 1543 start_seq, 1545 1544 cache->start_seq, ··· 1568 1565 goto walk; 1569 1566 } 1570 1567 1571 - skb = tcp_sacktag_skip(skb, sk, cache->end_seq); 1568 + skb = tcp_sacktag_skip(skb, sk, cache->end_seq, 1569 + &fack_count); 1572 1570 /* Check overlap against next cached too (past this one already) */ 1573 1571 cache++; 1574 1572 continue; ··· 1581 1577 break; 1582 1578 fack_count = tp->fackets_out; 1583 1579 } 1584 - skb = tcp_sacktag_skip(skb, sk, start_seq); 1580 + skb = tcp_sacktag_skip(skb, sk, start_seq, &fack_count); 1585 1581 1586 1582 walk: 1587 1583 skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq, end_seq,
+1 -1
net/ipv6/Kconfig
··· 85 85 depends on IPV6 86 86 select XFRM 87 87 select CRYPTO 88 - select CRYPTO_AEAD 88 + select CRYPTO_AUTHENC 89 89 select CRYPTO_HMAC 90 90 select CRYPTO_MD5 91 91 select CRYPTO_CBC
+5 -3
net/irda/ircomm/ircomm_core.c
··· 76 76 77 77 #ifdef CONFIG_PROC_FS 78 78 { struct proc_dir_entry *ent; 79 - ent = create_proc_entry("ircomm", 0, proc_irda); 80 - if (ent) 81 - ent->proc_fops = &ircomm_proc_fops; 79 + ent = proc_create("ircomm", 0, proc_irda, &ircomm_proc_fops); 80 + if (!ent) { 81 + printk(KERN_ERR "ircomm_init: can't create /proc entry!\n"); 82 + return -ENODEV; 83 + } 82 84 } 83 85 #endif /* CONFIG_PROC_FS */ 84 86
+1 -3
net/irda/irlan/irlan_common.c
··· 128 128 129 129 #ifdef CONFIG_PROC_FS 130 130 { struct proc_dir_entry *proc; 131 - proc = create_proc_entry("irlan", 0, proc_irda); 131 + proc = proc_create("irlan", 0, proc_irda, &irlan_fops); 132 132 if (!proc) { 133 133 printk(KERN_ERR "irlan_init: can't create /proc entry!\n"); 134 134 return -ENODEV; 135 135 } 136 - 137 - proc->proc_fops = &irlan_fops; 138 136 } 139 137 #endif /* CONFIG_PROC_FS */ 140 138
+3 -5
net/irda/irproc.c
··· 72 72 return; 73 73 proc_irda->owner = THIS_MODULE; 74 74 75 - for (i=0; i<ARRAY_SIZE(irda_dirs); i++) { 76 - d = create_proc_entry(irda_dirs[i].name, 0, proc_irda); 77 - if (d) 78 - d->proc_fops = irda_dirs[i].fops; 79 - } 75 + for (i = 0; i < ARRAY_SIZE(irda_dirs); i++) 76 + d = proc_create(irda_dirs[i].name, 0, proc_irda, 77 + irda_dirs[i].fops); 80 78 } 81 79 82 80 /*
-2
net/iucv/iucv.c
··· 621 621 return iucv_call_b2f0(IUCV_SEVER, parm); 622 622 } 623 623 624 - #ifdef CONFIG_SMP 625 624 /** 626 625 * __iucv_cleanup_queue 627 626 * @dummy: unused dummy argument ··· 631 632 static void __iucv_cleanup_queue(void *dummy) 632 633 { 633 634 } 634 - #endif 635 635 636 636 /** 637 637 * iucv_cleanup_queue
+43 -49
net/mac80211/rc80211_pid_algo.c
··· 2 2 * Copyright 2002-2005, Instant802 Networks, Inc. 3 3 * Copyright 2005, Devicescape Software, Inc. 4 4 * Copyright 2007, Mattias Nissler <mattias.nissler@gmx.de> 5 - * Copyright 2007, Stefano Brivio <stefano.brivio@polimi.it> 5 + * Copyright 2007-2008, Stefano Brivio <stefano.brivio@polimi.it> 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify 8 8 * it under the terms of the GNU General Public License version 2 as ··· 63 63 * RC_PID_ARITH_SHIFT. 64 64 */ 65 65 66 - 67 - /* Shift the adjustment so that we won't switch to a lower rate if it exhibited 68 - * a worse failed frames behaviour and we'll choose the highest rate whose 69 - * failed frames behaviour is not worse than the one of the original rate 70 - * target. While at it, check that the adjustment is within the ranges. Then, 71 - * provide the new rate index. */ 72 - static int rate_control_pid_shift_adjust(struct rc_pid_rateinfo *r, 73 - int adj, int cur, int l) 74 - { 75 - int i, j, k, tmp; 76 - 77 - j = r[cur].rev_index; 78 - i = j + adj; 79 - 80 - if (i < 0) 81 - return r[0].index; 82 - if (i >= l - 1) 83 - return r[l - 1].index; 84 - 85 - tmp = i; 86 - 87 - if (adj < 0) { 88 - for (k = j; k >= i; k--) 89 - if (r[k].diff <= r[j].diff) 90 - tmp = k; 91 - } else { 92 - for (k = i + 1; k + i < l; k++) 93 - if (r[k].diff <= r[i].diff) 94 - tmp = k; 95 - } 96 - 97 - return r[tmp].index; 98 - } 99 - 66 + /* Adjust the rate while ensuring that we won't switch to a lower rate if it 67 + * exhibited a worse failed frames behaviour and we'll choose the highest rate 68 + * whose failed frames behaviour is not worse than the one of the original rate 69 + * target. While at it, check that the new rate is valid. */ 100 70 static void rate_control_pid_adjust_rate(struct ieee80211_local *local, 101 71 struct sta_info *sta, int adj, 102 72 struct rc_pid_rateinfo *rinfo) 103 73 { 104 74 struct ieee80211_sub_if_data *sdata; 105 75 struct ieee80211_hw_mode *mode; 106 - int newidx; 107 - int maxrate; 108 - int back = (adj > 0) ? 1 : -1; 76 + int cur_sorted, new_sorted, probe, tmp, n_bitrates; 77 + int cur = sta->txrate; 109 78 110 79 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); 111 80 112 81 mode = local->oper_hw_mode; 113 - maxrate = sdata->bss ? sdata->bss->max_ratectrl_rateidx : -1; 82 + n_bitrates = mode->num_rates; 114 83 115 - newidx = rate_control_pid_shift_adjust(rinfo, adj, sta->txrate, 116 - mode->num_rates); 84 + /* Map passed arguments to sorted values. */ 85 + cur_sorted = rinfo[cur].rev_index; 86 + new_sorted = cur_sorted + adj; 117 87 118 - while (newidx != sta->txrate) { 119 - if (rate_supported(sta, mode, newidx) && 120 - (maxrate < 0 || newidx <= maxrate)) { 121 - sta->txrate = newidx; 88 + /* Check limits. */ 89 + if (new_sorted < 0) 90 + new_sorted = rinfo[0].rev_index; 91 + else if (new_sorted >= n_bitrates) 92 + new_sorted = rinfo[n_bitrates - 1].rev_index; 93 + 94 + tmp = new_sorted; 95 + 96 + if (adj < 0) { 97 + /* Ensure that the rate decrease isn't disadvantageous. */ 98 + for (probe = cur_sorted; probe >= new_sorted; probe--) 99 + if (rinfo[probe].diff <= rinfo[cur_sorted].diff && 100 + rate_supported(sta, mode, rinfo[probe].index)) 101 + tmp = probe; 102 + } else { 103 + /* Look for rate increase with zero (or below) cost. */ 104 + for (probe = new_sorted + 1; probe < n_bitrates; probe++) 105 + if (rinfo[probe].diff <= rinfo[new_sorted].diff && 106 + rate_supported(sta, mode, rinfo[probe].index)) 107 + tmp = probe; 108 + } 109 + 110 + /* Fit the rate found to the nearest supported rate. */ 111 + do { 112 + if (rate_supported(sta, mode, rinfo[tmp].index)) { 113 + sta->txrate = rinfo[tmp].index; 122 114 break; 123 115 } 124 - 125 - newidx += back; 126 - } 116 + if (adj < 0) 117 + tmp--; 118 + else 119 + tmp++; 120 + } while (tmp < n_bitrates && tmp >= 0); 127 121 128 122 #ifdef CONFIG_MAC80211_DEBUGFS 129 123 rate_control_pid_event_rate_change( 130 124 &((struct rc_pid_sta_info *)sta->rate_ctrl_priv)->events, 131 - newidx, mode->rates[newidx].rate); 125 + cur, mode->rates[cur].rate); 132 126 #endif 133 127 } 134 128
+3 -6
net/sctp/proc.c
··· 256 256 { 257 257 struct proc_dir_entry *p; 258 258 259 - p = create_proc_entry("eps", S_IRUGO, proc_net_sctp); 259 + p = proc_create("eps", S_IRUGO, proc_net_sctp, &sctp_eps_seq_fops); 260 260 if (!p) 261 261 return -ENOMEM; 262 - 263 - p->proc_fops = &sctp_eps_seq_fops; 264 262 265 263 return 0; 266 264 } ··· 365 367 { 366 368 struct proc_dir_entry *p; 367 369 368 - p = create_proc_entry("assocs", S_IRUGO, proc_net_sctp); 370 + p = proc_create("assocs", S_IRUGO, proc_net_sctp, 371 + &sctp_assocs_seq_fops); 369 372 if (!p) 370 373 return -ENOMEM; 371 - 372 - p->proc_fops = &sctp_assocs_seq_fops; 373 374 374 375 return 0; 375 376 }