Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
Phonet: keep TX queue disabled when the device is off
SCHED: netem: Correct documentation comment in code.
netfilter: update rwlock initialization for nat_table
netlabel: Compiler warning and NULL pointer dereference fix
e1000e: fix double release of mutex
IA64: HP_SIMETH needs to depend upon NET
netpoll: fix race on poll_list resulting in garbage entry
ipv6: silence log messages for locally generated multicast
sungem: improve ethtool output with internal pcs and serdes
tcp: tcp_vegas cong avoid fix
sungem: Make PCS PHY support partially work again.

+164 -156
+1
arch/ia64/hp/sim/Kconfig
··· 4 5 config HP_SIMETH 6 bool "Simulated Ethernet " 7 8 config HP_SIMSERIAL 9 bool "Simulated serial driver support"
··· 4 5 config HP_SIMETH 6 bool "Simulated Ethernet " 7 + depends on NET 8 9 config HP_SIMSERIAL 10 bool "Simulated serial driver support"
+7 -2
drivers/net/e1000e/ich8lan.c
··· 1893 ctrl |= E1000_CTRL_PHY_RST; 1894 } 1895 ret_val = e1000_acquire_swflag_ich8lan(hw); 1896 hw_dbg(hw, "Issuing a global reset to ich8lan"); 1897 ew32(CTRL, (ctrl | E1000_CTRL_RST)); 1898 msleep(20); 1899 1900 - /* release the swflag because it is not reset by hardware reset */ 1901 - e1000_release_swflag_ich8lan(hw); 1902 1903 ret_val = e1000e_get_auto_rd_done(hw); 1904 if (ret_val) {
··· 1893 ctrl |= E1000_CTRL_PHY_RST; 1894 } 1895 ret_val = e1000_acquire_swflag_ich8lan(hw); 1896 + /* Whether or not the swflag was acquired, we need to reset the part */ 1897 hw_dbg(hw, "Issuing a global reset to ich8lan"); 1898 ew32(CTRL, (ctrl | E1000_CTRL_RST)); 1899 msleep(20); 1900 1901 + if (!ret_val) { 1902 + /* release the swflag because it is not reset by 1903 + * hardware reset 1904 + */ 1905 + e1000_release_swflag_ich8lan(hw); 1906 + } 1907 1908 ret_val = e1000e_get_auto_rd_done(hw); 1909 if (ret_val) {
+88 -56
drivers/net/sungem.c
··· 1142 return NETDEV_TX_OK; 1143 } 1144 1145 #define STOP_TRIES 32 1146 1147 /* Must be invoked under gp->lock and gp->tx_lock. */ ··· 1232 1233 if (limit <= 0) 1234 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); 1235 } 1236 1237 /* Must be invoked under gp->lock and gp->tx_lock. */ ··· 1391 gp->phy_type == phy_serdes) { 1392 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1393 1394 - if (pcs_lpa & PCS_MIIADV_FD) 1395 full_duplex = 1; 1396 speed = SPEED_1000; 1397 } ··· 1555 val = readl(gp->regs + PCS_MIISTAT); 1556 1557 if ((val & PCS_MIISTAT_LS) != 0) { 1558 gp->lstate = link_up; 1559 netif_carrier_on(gp->dev); 1560 (void)gem_set_link_modes(gp); ··· 1778 if (gp->phy_mii.def && gp->phy_mii.def->ops->init) 1779 gp->phy_mii.def->ops->init(&gp->phy_mii); 1780 } else { 1781 - u32 val; 1782 - int limit; 1783 - 1784 - /* Reset PCS unit. */ 1785 - val = readl(gp->regs + PCS_MIICTRL); 1786 - val |= PCS_MIICTRL_RST; 1787 - writel(val, gp->regs + PCS_MIICTRL); 1788 - 1789 - limit = 32; 1790 - while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { 1791 - udelay(100); 1792 - if (limit-- <= 0) 1793 - break; 1794 - } 1795 - if (limit <= 0) 1796 - printk(KERN_WARNING "%s: PCS reset bit would not clear.\n", 1797 - gp->dev->name); 1798 - 1799 - /* Make sure PCS is disabled while changing advertisement 1800 - * configuration. 1801 - */ 1802 - val = readl(gp->regs + PCS_CFG); 1803 - val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); 1804 - writel(val, gp->regs + PCS_CFG); 1805 - 1806 - /* Advertise all capabilities except assymetric 1807 - * pause. 1808 - */ 1809 - val = readl(gp->regs + PCS_MIIADV); 1810 - val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | 1811 - PCS_MIIADV_SP | PCS_MIIADV_AP); 1812 - writel(val, gp->regs + PCS_MIIADV); 1813 - 1814 - /* Enable and restart auto-negotiation, disable wrapback/loopback, 1815 - * and re-enable PCS. 1816 - */ 1817 - val = readl(gp->regs + PCS_MIICTRL); 1818 - val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); 1819 - val &= ~PCS_MIICTRL_WB; 1820 - writel(val, gp->regs + PCS_MIICTRL); 1821 - 1822 - val = readl(gp->regs + PCS_CFG); 1823 - val |= PCS_CFG_ENABLE; 1824 - writel(val, gp->regs + PCS_CFG); 1825 - 1826 - /* Make sure serialink loopback is off. The meaning 1827 - * of this bit is logically inverted based upon whether 1828 - * you are in Serialink or SERDES mode. 1829 - */ 1830 - val = readl(gp->regs + PCS_SCTRL); 1831 - if (gp->phy_type == phy_serialink) 1832 - val &= ~PCS_SCTRL_LOOP; 1833 - else 1834 - val |= PCS_SCTRL_LOOP; 1835 - writel(val, gp->regs + PCS_SCTRL); 1836 } 1837 1838 /* Default aneg parameters */ ··· 2697 cmd->speed = 0; 2698 cmd->duplex = cmd->port = cmd->phy_address = 2699 cmd->transceiver = cmd->autoneg = 0; 2700 } 2701 cmd->maxtxpkt = cmd->maxrxpkt = 0; 2702
··· 1142 return NETDEV_TX_OK; 1143 } 1144 1145 + static void gem_pcs_reset(struct gem *gp) 1146 + { 1147 + int limit; 1148 + u32 val; 1149 + 1150 + /* Reset PCS unit. */ 1151 + val = readl(gp->regs + PCS_MIICTRL); 1152 + val |= PCS_MIICTRL_RST; 1153 + writel(val, gp->regs + PCS_MIICTRL); 1154 + 1155 + limit = 32; 1156 + while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { 1157 + udelay(100); 1158 + if (limit-- <= 0) 1159 + break; 1160 + } 1161 + if (limit <= 0) 1162 + printk(KERN_WARNING "%s: PCS reset bit would not clear.\n", 1163 + gp->dev->name); 1164 + } 1165 + 1166 + static void gem_pcs_reinit_adv(struct gem *gp) 1167 + { 1168 + u32 val; 1169 + 1170 + /* Make sure PCS is disabled while changing advertisement 1171 + * configuration. 1172 + */ 1173 + val = readl(gp->regs + PCS_CFG); 1174 + val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); 1175 + writel(val, gp->regs + PCS_CFG); 1176 + 1177 + /* Advertise all capabilities except assymetric 1178 + * pause. 1179 + */ 1180 + val = readl(gp->regs + PCS_MIIADV); 1181 + val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | 1182 + PCS_MIIADV_SP | PCS_MIIADV_AP); 1183 + writel(val, gp->regs + PCS_MIIADV); 1184 + 1185 + /* Enable and restart auto-negotiation, disable wrapback/loopback, 1186 + * and re-enable PCS. 1187 + */ 1188 + val = readl(gp->regs + PCS_MIICTRL); 1189 + val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); 1190 + val &= ~PCS_MIICTRL_WB; 1191 + writel(val, gp->regs + PCS_MIICTRL); 1192 + 1193 + val = readl(gp->regs + PCS_CFG); 1194 + val |= PCS_CFG_ENABLE; 1195 + writel(val, gp->regs + PCS_CFG); 1196 + 1197 + /* Make sure serialink loopback is off. The meaning 1198 + * of this bit is logically inverted based upon whether 1199 + * you are in Serialink or SERDES mode. 1200 + */ 1201 + val = readl(gp->regs + PCS_SCTRL); 1202 + if (gp->phy_type == phy_serialink) 1203 + val &= ~PCS_SCTRL_LOOP; 1204 + else 1205 + val |= PCS_SCTRL_LOOP; 1206 + writel(val, gp->regs + PCS_SCTRL); 1207 + } 1208 + 1209 #define STOP_TRIES 32 1210 1211 /* Must be invoked under gp->lock and gp->tx_lock. */ ··· 1168 1169 if (limit <= 0) 1170 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); 1171 + 1172 + if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) 1173 + gem_pcs_reinit_adv(gp); 1174 } 1175 1176 /* Must be invoked under gp->lock and gp->tx_lock. */ ··· 1324 gp->phy_type == phy_serdes) { 1325 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1326 1327 + if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes) 1328 full_duplex = 1; 1329 speed = SPEED_1000; 1330 } ··· 1488 val = readl(gp->regs + PCS_MIISTAT); 1489 1490 if ((val & PCS_MIISTAT_LS) != 0) { 1491 + if (gp->lstate == link_up) 1492 + goto restart; 1493 + 1494 gp->lstate = link_up; 1495 netif_carrier_on(gp->dev); 1496 (void)gem_set_link_modes(gp); ··· 1708 if (gp->phy_mii.def && gp->phy_mii.def->ops->init) 1709 gp->phy_mii.def->ops->init(&gp->phy_mii); 1710 } else { 1711 + gem_pcs_reset(gp); 1712 + gem_pcs_reinit_adv(gp); 1713 } 1714 1715 /* Default aneg parameters */ ··· 2680 cmd->speed = 0; 2681 cmd->duplex = cmd->port = cmd->phy_address = 2682 cmd->transceiver = cmd->autoneg = 0; 2683 + 2684 + /* serdes means usually a Fibre connector, with most fixed */ 2685 + if (gp->phy_type == phy_serdes) { 2686 + cmd->port = PORT_FIBRE; 2687 + cmd->supported = (SUPPORTED_1000baseT_Half | 2688 + SUPPORTED_1000baseT_Full | 2689 + SUPPORTED_FIBRE | SUPPORTED_Autoneg | 2690 + SUPPORTED_Pause | SUPPORTED_Asym_Pause); 2691 + cmd->advertising = cmd->supported; 2692 + cmd->transceiver = XCVR_INTERNAL; 2693 + if (gp->lstate == link_up) 2694 + cmd->speed = SPEED_1000; 2695 + cmd->duplex = DUPLEX_FULL; 2696 + cmd->autoneg = 1; 2697 + } 2698 } 2699 cmd->maxtxpkt = cmd->maxrxpkt = 0; 2700
+7
include/linux/netdevice.h
··· 319 { 320 NAPI_STATE_SCHED, /* Poll is scheduled */ 321 NAPI_STATE_DISABLE, /* Disable pending */ 322 }; 323 324 extern void __napi_schedule(struct napi_struct *n); ··· 1498 { 1499 unsigned long flags; 1500 1501 local_irq_save(flags); 1502 __netif_rx_complete(dev, napi); 1503 local_irq_restore(flags);
··· 319 { 320 NAPI_STATE_SCHED, /* Poll is scheduled */ 321 NAPI_STATE_DISABLE, /* Disable pending */ 322 + NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 323 }; 324 325 extern void __napi_schedule(struct napi_struct *n); ··· 1497 { 1498 unsigned long flags; 1499 1500 + /* 1501 + * don't let napi dequeue from the cpu poll list 1502 + * just in case its running on a different cpu 1503 + */ 1504 + if (unlikely(test_bit(NAPI_STATE_NPSVC, &napi->state))) 1505 + return; 1506 local_irq_save(flags); 1507 __netif_rx_complete(dev, napi); 1508 local_irq_restore(flags);
+2
net/core/netpoll.c
··· 133 134 npinfo->rx_flags |= NETPOLL_RX_DROP; 135 atomic_inc(&trapped); 136 137 work = napi->poll(napi, budget); 138 139 atomic_dec(&trapped); 140 npinfo->rx_flags &= ~NETPOLL_RX_DROP; 141
··· 133 134 npinfo->rx_flags |= NETPOLL_RX_DROP; 135 atomic_inc(&trapped); 136 + set_bit(NAPI_STATE_NPSVC, &napi->state); 137 138 work = napi->poll(napi, budget); 139 140 + clear_bit(NAPI_STATE_NPSVC, &napi->state); 141 atomic_dec(&trapped); 142 npinfo->rx_flags &= ~NETPOLL_RX_DROP; 143
+1 -1
net/ipv4/netfilter/nf_nat_rule.c
··· 61 static struct xt_table nat_table = { 62 .name = "nat", 63 .valid_hooks = NAT_VALID_HOOKS, 64 - .lock = __RW_LOCK_UNLOCKED(__nat_table.lock), 65 .me = THIS_MODULE, 66 .af = AF_INET, 67 };
··· 61 static struct xt_table nat_table = { 62 .name = "nat", 63 .valid_hooks = NAT_VALID_HOOKS, 64 + .lock = __RW_LOCK_UNLOCKED(nat_table.lock), 65 .me = THIS_MODULE, 66 .af = AF_INET, 67 };
+10 -70
net/ipv4/tcp_vegas.c
··· 40 41 #include "tcp_vegas.h" 42 43 - /* Default values of the Vegas variables, in fixed-point representation 44 - * with V_PARAM_SHIFT bits to the right of the binary point. 45 - */ 46 - #define V_PARAM_SHIFT 1 47 - static int alpha = 2<<V_PARAM_SHIFT; 48 - static int beta = 4<<V_PARAM_SHIFT; 49 - static int gamma = 1<<V_PARAM_SHIFT; 50 51 module_param(alpha, int, 0644); 52 - MODULE_PARM_DESC(alpha, "lower bound of packets in network (scale by 2)"); 53 module_param(beta, int, 0644); 54 - MODULE_PARM_DESC(beta, "upper bound of packets in network (scale by 2)"); 55 module_param(gamma, int, 0644); 56 MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)"); 57 ··· 168 return; 169 } 170 171 - /* The key players are v_beg_snd_una and v_beg_snd_nxt. 172 - * 173 - * These are so named because they represent the approximate values 174 - * of snd_una and snd_nxt at the beginning of the current RTT. More 175 - * precisely, they represent the amount of data sent during the RTT. 176 - * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt, 177 - * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding 178 - * bytes of data have been ACKed during the course of the RTT, giving 179 - * an "actual" rate of: 180 - * 181 - * (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration) 182 - * 183 - * Unfortunately, v_beg_snd_una is not exactly equal to snd_una, 184 - * because delayed ACKs can cover more than one segment, so they 185 - * don't line up nicely with the boundaries of RTTs. 186 - * 187 - * Another unfortunate fact of life is that delayed ACKs delay the 188 - * advance of the left edge of our send window, so that the number 189 - * of bytes we send in an RTT is often less than our cwnd will allow. 190 - * So we keep track of our cwnd separately, in v_beg_snd_cwnd. 191 - */ 192 - 193 if (after(ack, vegas->beg_snd_nxt)) { 194 /* Do the Vegas once-per-RTT cwnd adjustment. */ 195 - u32 old_wnd, old_snd_cwnd; 196 - 197 - 198 - /* Here old_wnd is essentially the window of data that was 199 - * sent during the previous RTT, and has all 200 - * been acknowledged in the course of the RTT that ended 201 - * with the ACK we just received. Likewise, old_snd_cwnd 202 - * is the cwnd during the previous RTT. 203 - */ 204 - old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) / 205 - tp->mss_cache; 206 - old_snd_cwnd = vegas->beg_snd_cwnd; 207 208 /* Save the extent of the current window so we can use this 209 * at the end of the next RTT. 210 */ 211 - vegas->beg_snd_una = vegas->beg_snd_nxt; 212 vegas->beg_snd_nxt = tp->snd_nxt; 213 - vegas->beg_snd_cwnd = tp->snd_cwnd; 214 215 /* We do the Vegas calculations only if we got enough RTT 216 * samples that we can be reasonably sure that we got ··· 212 * 213 * This is: 214 * (actual rate in segments) * baseRTT 215 - * We keep it as a fixed point number with 216 - * V_PARAM_SHIFT bits to the right of the binary point. 217 */ 218 - target_cwnd = ((u64)old_wnd * vegas->baseRTT); 219 - target_cwnd <<= V_PARAM_SHIFT; 220 - do_div(target_cwnd, rtt); 221 222 /* Calculate the difference between the window we had, 223 * and the window we would like to have. This quantity 224 * is the "Diff" from the Arizona Vegas papers. 225 - * 226 - * Again, this is a fixed point number with 227 - * V_PARAM_SHIFT bits to the right of the binary 228 - * point. 229 */ 230 - diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; 231 232 if (diff > gamma && tp->snd_ssthresh > 2 ) { 233 /* Going too fast. Time to slow down ··· 234 * truncation robs us of full link 235 * utilization. 236 */ 237 - tp->snd_cwnd = min(tp->snd_cwnd, 238 - ((u32)target_cwnd >> 239 - V_PARAM_SHIFT)+1); 240 241 } else if (tp->snd_cwnd <= tp->snd_ssthresh) { 242 /* Slow start. */ 243 tcp_slow_start(tp); 244 } else { 245 /* Congestion avoidance. */ 246 - u32 next_snd_cwnd; 247 248 /* Figure out where we would like cwnd 249 * to be. ··· 249 /* The old window was too fast, so 250 * we slow down. 251 */ 252 - next_snd_cwnd = old_snd_cwnd - 1; 253 } else if (diff < alpha) { 254 /* We don't have enough extra packets 255 * in the network, so speed up. 256 */ 257 - next_snd_cwnd = old_snd_cwnd + 1; 258 } else { 259 /* Sending just as fast as we 260 * should be. 261 */ 262 - next_snd_cwnd = old_snd_cwnd; 263 } 264 - 265 - /* Adjust cwnd upward or downward, toward the 266 - * desired value. 267 - */ 268 - if (next_snd_cwnd > tp->snd_cwnd) 269 - tp->snd_cwnd++; 270 - else if (next_snd_cwnd < tp->snd_cwnd) 271 - tp->snd_cwnd--; 272 } 273 274 if (tp->snd_cwnd < 2)
··· 40 41 #include "tcp_vegas.h" 42 43 + static int alpha = 2; 44 + static int beta = 4; 45 + static int gamma = 1; 46 47 module_param(alpha, int, 0644); 48 + MODULE_PARM_DESC(alpha, "lower bound of packets in network"); 49 module_param(beta, int, 0644); 50 + MODULE_PARM_DESC(beta, "upper bound of packets in network"); 51 module_param(gamma, int, 0644); 52 MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)"); 53 ··· 172 return; 173 } 174 175 if (after(ack, vegas->beg_snd_nxt)) { 176 /* Do the Vegas once-per-RTT cwnd adjustment. */ 177 178 /* Save the extent of the current window so we can use this 179 * at the end of the next RTT. 180 */ 181 vegas->beg_snd_nxt = tp->snd_nxt; 182 183 /* We do the Vegas calculations only if we got enough RTT 184 * samples that we can be reasonably sure that we got ··· 252 * 253 * This is: 254 * (actual rate in segments) * baseRTT 255 */ 256 + target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt; 257 258 /* Calculate the difference between the window we had, 259 * and the window we would like to have. This quantity 260 * is the "Diff" from the Arizona Vegas papers. 261 */ 262 + diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT; 263 264 if (diff > gamma && tp->snd_ssthresh > 2 ) { 265 /* Going too fast. Time to slow down ··· 282 * truncation robs us of full link 283 * utilization. 284 */ 285 + tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1); 286 287 } else if (tp->snd_cwnd <= tp->snd_ssthresh) { 288 /* Slow start. */ 289 tcp_slow_start(tp); 290 } else { 291 /* Congestion avoidance. */ 292 293 /* Figure out where we would like cwnd 294 * to be. ··· 300 /* The old window was too fast, so 301 * we slow down. 302 */ 303 + tp->snd_cwnd--; 304 } else if (diff < alpha) { 305 /* We don't have enough extra packets 306 * in the network, so speed up. 307 */ 308 + tp->snd_cwnd++; 309 } else { 310 /* Sending just as fast as we 311 * should be. 312 */ 313 } 314 } 315 316 if (tp->snd_cwnd < 2)
+6 -1
net/ipv6/ndisc.c
··· 912 is invalid, but ndisc specs say nothing 913 about it. It could be misconfiguration, or 914 an smart proxy agent tries to help us :-) 915 */ 916 - ND_PRINTK1(KERN_WARNING 917 "ICMPv6 NA: someone advertises our address on %s!\n", 918 ifp->idev->dev->name); 919 in6_ifa_put(ifp);
··· 912 is invalid, but ndisc specs say nothing 913 about it. It could be misconfiguration, or 914 an smart proxy agent tries to help us :-) 915 + 916 + We should not print the error if NA has been 917 + received from loopback - it is just our own 918 + unsolicited advertisement. 919 */ 920 + if (skb->pkt_type != PACKET_LOOPBACK) 921 + ND_PRINTK1(KERN_WARNING 922 "ICMPv6 NA: someone advertises our address on %s!\n", 923 ifp->idev->dev->name); 924 in6_ifa_put(ifp);
+20 -18
net/netlabel/netlabel_unlabeled.c
··· 562 const struct in_addr *mask, 563 struct netlbl_audit *audit_info) 564 { 565 - int ret_val = 0; 566 struct netlbl_af4list *list_entry; 567 struct netlbl_unlhsh_addr4 *entry; 568 struct audit_buffer *audit_buf; ··· 576 if (list_entry != NULL) 577 entry = netlbl_unlhsh_addr4_entry(list_entry); 578 else 579 - ret_val = -ENOENT; 580 581 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, 582 audit_info); ··· 587 addr->s_addr, mask->s_addr); 588 if (dev != NULL) 589 dev_put(dev); 590 - if (entry && security_secid_to_secctx(entry->secid, 591 - &secctx, 592 - &secctx_len) == 0) { 593 audit_log_format(audit_buf, " sec_obj=%s", secctx); 594 security_release_secctx(secctx, secctx_len); 595 } 596 - audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); 597 audit_log_end(audit_buf); 598 } 599 600 - if (ret_val == 0) 601 - call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4); 602 - return ret_val; 603 } 604 605 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) ··· 625 const struct in6_addr *mask, 626 struct netlbl_audit *audit_info) 627 { 628 - int ret_val = 0; 629 struct netlbl_af6list *list_entry; 630 struct netlbl_unlhsh_addr6 *entry; 631 struct audit_buffer *audit_buf; ··· 638 if (list_entry != NULL) 639 entry = netlbl_unlhsh_addr6_entry(list_entry); 640 else 641 - ret_val = -ENOENT; 642 643 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, 644 audit_info); ··· 649 addr, mask); 650 if (dev != NULL) 651 dev_put(dev); 652 - if (entry && security_secid_to_secctx(entry->secid, 653 - &secctx, 654 - &secctx_len) == 0) { 655 audit_log_format(audit_buf, " sec_obj=%s", secctx); 656 security_release_secctx(secctx, secctx_len); 657 } 658 - audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); 659 audit_log_end(audit_buf); 660 } 661 662 - if (ret_val == 0) 663 - call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6); 664 - return ret_val; 665 } 666 #endif /* IPv6 */ 667
··· 562 const struct in_addr *mask, 563 struct netlbl_audit *audit_info) 564 { 565 struct netlbl_af4list *list_entry; 566 struct netlbl_unlhsh_addr4 *entry; 567 struct audit_buffer *audit_buf; ··· 577 if (list_entry != NULL) 578 entry = netlbl_unlhsh_addr4_entry(list_entry); 579 else 580 + entry = NULL; 581 582 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, 583 audit_info); ··· 588 addr->s_addr, mask->s_addr); 589 if (dev != NULL) 590 dev_put(dev); 591 + if (entry != NULL && 592 + security_secid_to_secctx(entry->secid, 593 + &secctx, &secctx_len) == 0) { 594 audit_log_format(audit_buf, " sec_obj=%s", secctx); 595 security_release_secctx(secctx, secctx_len); 596 } 597 + audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0); 598 audit_log_end(audit_buf); 599 } 600 601 + if (entry == NULL) 602 + return -ENOENT; 603 + 604 + call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4); 605 + return 0; 606 } 607 608 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) ··· 624 const struct in6_addr *mask, 625 struct netlbl_audit *audit_info) 626 { 627 struct netlbl_af6list *list_entry; 628 struct netlbl_unlhsh_addr6 *entry; 629 struct audit_buffer *audit_buf; ··· 638 if (list_entry != NULL) 639 entry = netlbl_unlhsh_addr6_entry(list_entry); 640 else 641 + entry = NULL; 642 643 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, 644 audit_info); ··· 649 addr, mask); 650 if (dev != NULL) 651 dev_put(dev); 652 + if (entry != NULL && 653 + security_secid_to_secctx(entry->secid, 654 + &secctx, &secctx_len) == 0) { 655 audit_log_format(audit_buf, " sec_obj=%s", secctx); 656 security_release_secctx(secctx, secctx_len); 657 } 658 + audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0); 659 audit_log_end(audit_buf); 660 } 661 662 + if (entry == NULL) 663 + return -ENOENT; 664 + 665 + call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6); 666 + return 0; 667 } 668 #endif /* IPv6 */ 669
+22 -5
net/phonet/pep-gprs.c
··· 155 static void gprs_write_space(struct sock *sk) 156 { 157 struct gprs_dev *dev = sk->sk_user_data; 158 unsigned credits = pep_writeable(sk); 159 160 spin_lock_bh(&dev->tx_lock); 161 dev->tx_max = credits; 162 - if (credits > skb_queue_len(&dev->tx_queue)) 163 - netif_wake_queue(dev->net); 164 spin_unlock_bh(&dev->tx_lock); 165 } 166 167 /* 168 * Network device callbacks 169 */ 170 171 static int gprs_xmit(struct sk_buff *skb, struct net_device *net) 172 { ··· 272 net->tx_queue_len = 10; 273 274 net->destructor = free_netdev; 275 net->hard_start_xmit = gprs_xmit; /* mandatory */ 276 net->change_mtu = gprs_set_mtu; 277 net->get_stats = gprs_get_stats; ··· 338 dev->sk = sk; 339 340 printk(KERN_DEBUG"%s: attached\n", net->name); 341 - gprs_write_space(sk); /* kick off TX */ 342 return net->ifindex; 343 344 out_rel: ··· 360 361 printk(KERN_DEBUG"%s: detached\n", net->name); 362 unregister_netdev(net); 363 - flush_scheduled_work(); 364 sock_put(sk); 365 - skb_queue_purge(&dev->tx_queue); 366 }
··· 155 static void gprs_write_space(struct sock *sk) 156 { 157 struct gprs_dev *dev = sk->sk_user_data; 158 + struct net_device *net = dev->net; 159 unsigned credits = pep_writeable(sk); 160 161 spin_lock_bh(&dev->tx_lock); 162 dev->tx_max = credits; 163 + if (credits > skb_queue_len(&dev->tx_queue) && netif_running(net)) 164 + netif_wake_queue(net); 165 spin_unlock_bh(&dev->tx_lock); 166 } 167 168 /* 169 * Network device callbacks 170 */ 171 + 172 + static int gprs_open(struct net_device *dev) 173 + { 174 + struct gprs_dev *gp = netdev_priv(dev); 175 + 176 + gprs_write_space(gp->sk); 177 + return 0; 178 + } 179 + 180 + static int gprs_close(struct net_device *dev) 181 + { 182 + struct gprs_dev *gp = netdev_priv(dev); 183 + 184 + netif_stop_queue(dev); 185 + flush_work(&gp->tx_work); 186 + return 0; 187 + } 188 189 static int gprs_xmit(struct sk_buff *skb, struct net_device *net) 190 { ··· 254 net->tx_queue_len = 10; 255 256 net->destructor = free_netdev; 257 + net->open = gprs_open; 258 + net->stop = gprs_close; 259 net->hard_start_xmit = gprs_xmit; /* mandatory */ 260 net->change_mtu = gprs_set_mtu; 261 net->get_stats = gprs_get_stats; ··· 318 dev->sk = sk; 319 320 printk(KERN_DEBUG"%s: attached\n", net->name); 321 return net->ifindex; 322 323 out_rel: ··· 341 342 printk(KERN_DEBUG"%s: detached\n", net->name); 343 unregister_netdev(net); 344 sock_put(sk); 345 }
-3
net/sched/sch_netem.c
··· 46 layering other disciplines. It does not need to do bandwidth 47 control either since that can be handled by using token 48 bucket or other rate control. 49 - 50 - The simulator is limited by the Linux timer resolution 51 - and will create packet bursts on the HZ boundary (1ms). 52 */ 53 54 struct netem_sched_data {
··· 46 layering other disciplines. It does not need to do bandwidth 47 control either since that can be handled by using token 48 bucket or other rate control. 49 */ 50 51 struct netem_sched_data {