Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (27 commits)
bnx2x: allow device properly initialize after hotplug
bnx2x: fix DMAE timeout according to hw specifications
bnx2x: properly handle CFC DEL in cnic flow
bnx2x: call dev_kfree_skb_any instead of dev_kfree_skb
net: filter: move forward declarations to avoid compile warnings
pktgen: refactor pg_init() code
pktgen: use vzalloc_node() instead of vmalloc_node() + memset()
net: skb_trim explicitely check the linearity instead of data_len
ipv4: Give backtrace in ip_rt_bug().
net: avoid synchronize_rcu() in dev_deactivate_many
net: remove synchronize_net() from netdev_set_master()
rtnetlink: ignore NETDEV_RELEASE and NETDEV_JOIN event
net: rename NETDEV_BONDING_DESLAVE to NETDEV_RELEASE
bridge: call NETDEV_JOIN notifiers when add a slave
netpoll: disable netpoll when enslave a device
macvlan: Forward unicast frames in bridge mode to lowerdev
net: Remove linux/prefetch.h include from linux/skbuff.h
ipv4: Include linux/prefetch.h in fib_trie.c
netlabel: Remove prefetches from list handlers.
drivers/net: add prefetch header for prefetch users
...

Fixed up prefetch parts: removed a few duplicate prefetch.h includes,
fixed the location of the igb prefetch.h, took my version of the
skbuff.h code without the extra parentheses etc.

+217 -157
+2 -2
drivers/net/bnx2x/bnx2x_cmn.c
··· 131 131 132 132 /* release skb */ 133 133 WARN_ON(!skb); 134 - dev_kfree_skb(skb); 134 + dev_kfree_skb_any(skb); 135 135 tx_buf->first_bd = 0; 136 136 tx_buf->skb = NULL; 137 137 ··· 465 465 } else { 466 466 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" 467 467 " - dropping packet!\n"); 468 - dev_kfree_skb(skb); 468 + dev_kfree_skb_any(skb); 469 469 } 470 470 471 471
+1 -1
drivers/net/bnx2x/bnx2x_cmn.h
··· 840 840 mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size, 841 841 DMA_FROM_DEVICE); 842 842 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 843 - dev_kfree_skb(skb); 843 + dev_kfree_skb_any(skb); 844 844 return -ENOMEM; 845 845 } 846 846
+28 -46
drivers/net/bnx2x/bnx2x_main.c
··· 571 571 struct dmae_command *dmae) 572 572 { 573 573 u32 *wb_comp = bnx2x_sp(bp, wb_comp); 574 - int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40; 574 + int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; 575 575 int rc = 0; 576 576 577 577 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n", ··· 3666 3666 union event_ring_elem *elem) 3667 3667 { 3668 3668 if (!bp->cnic_eth_dev.starting_cid || 3669 - cid < bp->cnic_eth_dev.starting_cid) 3669 + (cid < bp->cnic_eth_dev.starting_cid && 3670 + cid != bp->cnic_eth_dev.iscsi_l2_cid)) 3670 3671 return 1; 3671 3672 3672 3673 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); ··· 7288 7287 msleep(MCP_ONE_TIMEOUT); 7289 7288 } 7290 7289 7291 - static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) 7290 + /* 7291 + * initializes bp->common.shmem_base and waits for validity signature to appear 7292 + */ 7293 + static int bnx2x_init_shmem(struct bnx2x *bp) 7292 7294 { 7293 - u32 shmem, cnt, validity_offset, val; 7294 - int rc = 0; 7295 + int cnt = 0; 7296 + u32 val = 0; 7295 7297 7296 - msleep(100); 7297 - 7298 - /* Get shmem offset */ 7299 - shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 7300 - if (shmem == 0) { 7301 - BNX2X_ERR("Shmem 0 return failure\n"); 7302 - rc = -ENOTTY; 7303 - goto exit_lbl; 7304 - } 7305 - 7306 - validity_offset = offsetof(struct shmem_region, validity_map[0]); 7307 - 7308 - /* Wait for MCP to come up */ 7309 - for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) { 7310 - /* TBD: its best to check validity map of last port. 7311 - * currently checks on port 0. 7312 - */ 7313 - val = REG_RD(bp, shmem + validity_offset); 7314 - DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem, 7315 - shmem + validity_offset, val); 7316 - 7317 - /* check that shared memory is valid. */ 7318 - if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 7319 - == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 7320 - break; 7298 + do { 7299 + bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 7300 + if (bp->common.shmem_base) { 7301 + val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 7302 + if (val & SHR_MEM_VALIDITY_MB) 7303 + return 0; 7304 + } 7321 7305 7322 7306 bnx2x_mcp_wait_one(bp); 7323 - } 7324 7307 7325 - DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val); 7308 + } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 7326 7309 7327 - /* Check that shared memory is valid. This indicates that MCP is up. */ 7328 - if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != 7329 - (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { 7330 - BNX2X_ERR("Shmem signature not present. MCP is not up !!\n"); 7331 - rc = -ENOTTY; 7332 - goto exit_lbl; 7333 - } 7310 + BNX2X_ERR("BAD MCP validity signature\n"); 7334 7311 7335 - exit_lbl: 7312 + return -ENODEV; 7313 + } 7314 + 7315 + static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) 7316 + { 7317 + int rc = bnx2x_init_shmem(bp); 7318 + 7336 7319 /* Restore the `magic' bit value */ 7337 7320 if (!CHIP_IS_E1(bp)) 7338 7321 bnx2x_clp_reset_done(bp, magic_val); ··· 7829 7844 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", 7830 7845 bp->common.flash_size, bp->common.flash_size); 7831 7846 7832 - bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 7847 + bnx2x_init_shmem(bp); 7848 + 7833 7849 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? 7834 7850 MISC_REG_GENERIC_CR_1 : 7835 7851 MISC_REG_GENERIC_CR_0)); 7852 + 7836 7853 bp->link_params.shmem_base = bp->common.shmem_base; 7837 7854 bp->link_params.shmem2_base = bp->common.shmem2_base; 7838 7855 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", ··· 7845 7858 bp->flags |= NO_MCP_FLAG; 7846 7859 return; 7847 7860 } 7848 - 7849 - val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 7850 - if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 7851 - != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 7852 - BNX2X_ERR("BAD MCP validity signature\n"); 7853 7861 7854 7862 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 7855 7863 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
+3 -1
drivers/net/bonding/bond_main.c
··· 1640 1640 } 1641 1641 } 1642 1642 1643 + call_netdevice_notifiers(NETDEV_JOIN, slave_dev); 1644 + 1643 1645 /* If this is the first slave, then we need to set the master's hardware 1644 1646 * address to be the same as the slave's. */ 1645 1647 if (is_zero_ether_addr(bond->dev->dev_addr)) ··· 1974 1972 } 1975 1973 1976 1974 block_netpoll_tx(); 1977 - netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE); 1975 + netdev_bonding_change(bond_dev, NETDEV_RELEASE); 1978 1976 write_lock_bh(&bond->lock); 1979 1977 1980 1978 slave = bond_get_slave_by_dev(bond, slave_dev);
+2 -4
drivers/net/macvlan.c
··· 238 238 239 239 dest = macvlan_hash_lookup(port, eth->h_dest); 240 240 if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { 241 - unsigned int length = skb->len + ETH_HLEN; 242 - int ret = dest->forward(dest->dev, skb); 243 - macvlan_count_rx(dest, length, 244 - ret == NET_RX_SUCCESS, 0); 241 + /* send to lowerdev first for its network taps */ 242 + vlan->forward(vlan->lowerdev, skb); 245 243 246 244 return NET_XMIT_SUCCESS; 247 245 }
+17 -9
drivers/net/netconsole.c
··· 621 621 bool stopped = false; 622 622 623 623 if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER || 624 - event == NETDEV_BONDING_DESLAVE || event == NETDEV_GOING_DOWN)) 624 + event == NETDEV_RELEASE || event == NETDEV_JOIN)) 625 625 goto done; 626 626 627 627 spin_lock_irqsave(&target_list_lock, flags); 628 - restart: 629 628 list_for_each_entry(nt, &target_list, list) { 630 629 netconsole_target_get(nt); 631 630 if (nt->np.dev == dev) { ··· 632 633 case NETDEV_CHANGENAME: 633 634 strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ); 634 635 break; 636 + case NETDEV_RELEASE: 637 + case NETDEV_JOIN: 635 638 case NETDEV_UNREGISTER: 636 639 /* 637 640 * rtnl_lock already held ··· 648 647 dev_put(nt->np.dev); 649 648 nt->np.dev = NULL; 650 649 netconsole_target_put(nt); 651 - goto restart; 652 650 } 653 - /* Fall through */ 654 - case NETDEV_GOING_DOWN: 655 - case NETDEV_BONDING_DESLAVE: 656 651 nt->enabled = 0; 657 652 stopped = true; 658 653 break; ··· 657 660 netconsole_target_put(nt); 658 661 } 659 662 spin_unlock_irqrestore(&target_list_lock, flags); 660 - if (stopped && (event == NETDEV_UNREGISTER || event == NETDEV_BONDING_DESLAVE)) 663 + if (stopped) { 661 664 printk(KERN_INFO "netconsole: network logging stopped on " 662 - "interface %s as it %s\n", dev->name, 663 - event == NETDEV_UNREGISTER ? "unregistered" : "released slaves"); 665 + "interface %s as it ", dev->name); 666 + switch (event) { 667 + case NETDEV_UNREGISTER: 668 + printk(KERN_CONT "unregistered\n"); 669 + break; 670 + case NETDEV_RELEASE: 671 + printk(KERN_CONT "released slaves\n"); 672 + break; 673 + case NETDEV_JOIN: 674 + printk(KERN_CONT "is joining a master device\n"); 675 + break; 676 + } 677 + } 664 678 665 679 done: 666 680 return NOTIFY_DONE;
+2 -2
drivers/net/rionet.c
··· 162 162 rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1); 163 163 164 164 if (netif_msg_tx_queued(rnet)) 165 - printk(KERN_INFO "%s: queued skb %8.8x len %8.8x\n", DRV_NAME, 166 - (u32) skb, skb->len); 165 + printk(KERN_INFO "%s: queued skb len %8.8x\n", DRV_NAME, 166 + skb->len); 167 167 168 168 return 0; 169 169 }
+4 -3
include/linux/filter.h
··· 131 131 #define SKF_LL_OFF (-0x200000) 132 132 133 133 #ifdef __KERNEL__ 134 + 135 + struct sk_buff; 136 + struct sock; 137 + 134 138 struct sk_filter 135 139 { 136 140 atomic_t refcnt; ··· 149 145 { 150 146 return fp->len * sizeof(struct sock_filter) + sizeof(*fp); 151 147 } 152 - 153 - struct sk_buff; 154 - struct sock; 155 148 156 149 extern int sk_filter(struct sock *sk, struct sk_buff *skb); 157 150 extern unsigned int sk_run_filter(const struct sk_buff *skb,
+2 -1
include/linux/notifier.h
··· 209 209 #define NETDEV_POST_TYPE_CHANGE 0x000F 210 210 #define NETDEV_POST_INIT 0x0010 211 211 #define NETDEV_UNREGISTER_BATCH 0x0011 212 - #define NETDEV_BONDING_DESLAVE 0x0012 212 + #define NETDEV_RELEASE 0x0012 213 213 #define NETDEV_NOTIFY_PEERS 0x0013 214 + #define NETDEV_JOIN 0x0014 214 215 215 216 #define SYS_DOWN 0x0001 /* Notify of system down */ 216 217 #define SYS_RESTART SYS_DOWN
+1 -1
include/linux/skbuff.h
··· 1442 1442 1443 1443 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 1444 1444 { 1445 - if (unlikely(skb->data_len)) { 1445 + if (unlikely(skb_is_nonlinear(skb))) { 1446 1446 WARN_ON(1); 1447 1447 return; 1448 1448 }
+21 -15
include/net/caif/caif_layer.h
··· 15 15 struct caif_payload_info; 16 16 struct caif_packet_funcs; 17 17 18 - 19 18 #define CAIF_LAYER_NAME_SZ 16 20 19 21 20 /** ··· 31 32 WARN_ON(!(assert)); \ 32 33 } \ 33 34 } while (0) 34 - 35 35 36 36 /** 37 37 * enum caif_ctrlcmd - CAIF Stack Control Signaling sent in layer.ctrlcmd(). ··· 139 141 * - All layers must use this structure. If embedding it, then place this 140 142 * structure first in the layer specific structure. 141 143 * 142 - * - Each layer should not depend on any others layer private data. 144 + * - Each layer should not depend on any others layer's private data. 143 145 * 144 146 * - In order to send data upwards do 145 147 * layer->up->receive(layer->up, packet); ··· 153 155 struct list_head node; 154 156 155 157 /* 156 - * receive() - Receive Function. 158 + * receive() - Receive Function (non-blocking). 157 159 * Contract: Each layer must implement a receive function passing the 158 160 * CAIF packets upwards in the stack. 159 161 * Packet handling rules: 160 - * - The CAIF packet (cfpkt) cannot be accessed after 161 - * passing it to the next layer using up->receive(). 162 + * - The CAIF packet (cfpkt) ownership is passed to the 163 + * called receive function. This means that the the 164 + * packet cannot be accessed after passing it to the 165 + * above layer using up->receive(). 166 + * 162 167 * - If parsing of the packet fails, the packet must be 163 - * destroyed and -1 returned from the function. 168 + * destroyed and negative error code returned 169 + * from the function. 170 + * EXCEPTION: If the framing layer (cffrml) returns 171 + * -EILSEQ, the packet is not freed. 172 + * 164 173 * - If parsing succeeds (and above layers return OK) then 165 - * the function must return a value > 0. 174 + * the function must return a value >= 0. 166 175 * 167 176 * Returns result < 0 indicates an error, 0 or positive value 168 177 * indicates success. ··· 181 176 int (*receive)(struct cflayer *layr, struct cfpkt *cfpkt); 182 177 183 178 /* 184 - * transmit() - Transmit Function. 179 + * transmit() - Transmit Function (non-blocking). 185 180 * Contract: Each layer must implement a transmit function passing the 186 181 * CAIF packet downwards in the stack. 187 182 * Packet handling rules: ··· 190 185 * cannot be accessed after passing it to the below 191 186 * layer using dn->transmit(). 192 187 * 193 - * - If transmit fails, however, the ownership is returned 194 - * to thecaller. The caller of "dn->transmit()" must 195 - * destroy or resend packet. 188 + * - Upon error the packet ownership is still passed on, 189 + * so the packet shall be freed where error is detected. 190 + * Callers of the transmit function shall not free packets, 191 + * but errors shall be returned. 196 192 * 197 193 * - Return value less than zero means error, zero or 198 194 * greater than zero means OK. 199 195 * 200 - * result < 0 indicates an error, 0 or positive value 201 - * indicate success. 196 + * Returns result < 0 indicates an error, 0 or positive value 197 + * indicates success. 202 198 * 203 199 * @layr: Pointer to the current layer the receive function 204 200 * isimplemented for (this pointer). ··· 208 202 int (*transmit) (struct cflayer *layr, struct cfpkt *cfpkt); 209 203 210 204 /* 211 - * cttrlcmd() - Control Function upwards in CAIF Stack. 205 + * cttrlcmd() - Control Function upwards in CAIF Stack (non-blocking). 212 206 * Used for signaling responses (CAIF_CTRLCMD_*_RSP) 213 207 * and asynchronous events from the modem (CAIF_CTRLCMD_*_IND) 214 208 *
+3
net/bridge/br_if.c
··· 147 147 dev->priv_flags &= ~IFF_BRIDGE_PORT; 148 148 149 149 netdev_rx_handler_unregister(dev); 150 + synchronize_net(); 150 151 151 152 netdev_set_master(dev, NULL); 152 153 ··· 338 337 p = new_nbp(br, dev); 339 338 if (IS_ERR(p)) 340 339 return PTR_ERR(p); 340 + 341 + call_netdevice_notifiers(NETDEV_JOIN, dev); 341 342 342 343 err = dev_set_promiscuity(dev, 1); 343 344 if (err)
+6 -1
net/caif/caif_dev.c
··· 142 142 { 143 143 struct cfpkt *pkt; 144 144 struct caif_device_entry *caifd; 145 + int err; 145 146 146 147 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); 147 148 ··· 160 159 caifd_hold(caifd); 161 160 rcu_read_unlock(); 162 161 163 - caifd->layer.up->receive(caifd->layer.up, pkt); 162 + err = caifd->layer.up->receive(caifd->layer.up, pkt); 163 + 164 + /* For -EILSEQ the packet is not freed so so it now */ 165 + if (err == -EILSEQ) 166 + cfpkt_destroy(pkt); 164 167 165 168 /* Release reference to stack upwards */ 166 169 caifd_put(caifd);
+5 -8
net/caif/caif_socket.c
··· 19 19 #include <linux/uaccess.h> 20 20 #include <linux/debugfs.h> 21 21 #include <linux/caif/caif_socket.h> 22 - #include <asm/atomic.h> 22 + #include <linux/atomic.h> 23 23 #include <net/sock.h> 24 24 #include <net/tcp_states.h> 25 25 #include <net/caif/caif_layer.h> ··· 816 816 if (sk->sk_shutdown & SHUTDOWN_MASK) { 817 817 /* Allow re-connect after SHUTDOWN_IND */ 818 818 caif_disconnect_client(sock_net(sk), &cf_sk->layer); 819 + caif_free_client(&cf_sk->layer); 819 820 break; 820 821 } 821 822 /* No reconnect on a seqpacket socket */ ··· 927 926 { 928 927 struct sock *sk = sock->sk; 929 928 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 930 - int res = 0; 931 929 932 930 if (!sk) 933 931 return 0; ··· 953 953 sk->sk_state = CAIF_DISCONNECTED; 954 954 sk->sk_shutdown = SHUTDOWN_MASK; 955 955 956 - if (cf_sk->sk.sk_socket->state == SS_CONNECTED || 957 - cf_sk->sk.sk_socket->state == SS_CONNECTING) 958 - res = caif_disconnect_client(sock_net(sk), &cf_sk->layer); 959 - 956 + caif_disconnect_client(sock_net(sk), &cf_sk->layer); 960 957 cf_sk->sk.sk_socket->state = SS_DISCONNECTING; 961 958 wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); 962 959 ··· 961 964 sk_stream_kill_queues(&cf_sk->sk); 962 965 release_sock(sk); 963 966 sock_put(sk); 964 - return res; 967 + return 0; 965 968 } 966 969 967 970 /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */ ··· 1117 1120 set_rx_flow_on(cf_sk); 1118 1121 1119 1122 /* Set default options on configuration */ 1120 - cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL; 1123 + cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL; 1121 1124 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; 1122 1125 cf_sk->conn_req.protocol = protocol; 1123 1126 /* Increase the number of sockets created. */
+19 -25
net/caif/cfcnfg.c
··· 182 182 183 183 int caif_disconnect_client(struct net *net, struct cflayer *adap_layer) 184 184 { 185 - u8 channel_id = 0; 186 - int ret = 0; 187 - struct cflayer *servl = NULL; 185 + u8 channel_id; 188 186 struct cfcnfg *cfg = get_cfcnfg(net); 189 187 190 188 caif_assert(adap_layer != NULL); 191 - 192 - channel_id = adap_layer->id; 193 - if (adap_layer->dn == NULL || channel_id == 0) { 194 - pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n"); 195 - ret = -ENOTCONN; 196 - goto end; 197 - } 198 - 199 - servl = cfmuxl_remove_uplayer(cfg->mux, channel_id); 200 - if (servl == NULL) { 201 - pr_err("PROTOCOL ERROR - " 202 - "Error removing service_layer Channel_Id(%d)", 203 - channel_id); 204 - ret = -EINVAL; 205 - goto end; 206 - } 207 - 208 - ret = cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer); 209 - 210 - end: 211 189 cfctrl_cancel_req(cfg->ctrl, adap_layer); 190 + channel_id = adap_layer->id; 191 + if (channel_id != 0) { 192 + struct cflayer *servl; 193 + servl = cfmuxl_remove_uplayer(cfg->mux, channel_id); 194 + if (servl != NULL) 195 + layer_set_up(servl, NULL); 196 + } else 197 + pr_debug("nothing to disconnect\n"); 198 + cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer); 212 199 213 200 /* Do RCU sync before initiating cleanup */ 214 201 synchronize_rcu(); 215 202 if (adap_layer->ctrlcmd != NULL) 216 203 adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0); 217 - return ret; 204 + return 0; 218 205 219 206 } 220 207 EXPORT_SYMBOL(caif_disconnect_client); ··· 387 400 struct cfcnfg_phyinfo *phyinfo; 388 401 struct net_device *netdev; 389 402 403 + if (channel_id == 0) { 404 + pr_warn("received channel_id zero\n"); 405 + if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) 406 + adapt_layer->ctrlcmd(adapt_layer, 407 + CAIF_CTRLCMD_INIT_FAIL_RSP, 0); 408 + return; 409 + } 410 + 390 411 rcu_read_lock(); 391 412 392 413 if (adapt_layer == NULL) { ··· 518 523 phyinfo->use_stx = stx; 519 524 phyinfo->use_fcs = fcs; 520 525 521 - phy_layer->type = phy_type; 522 526 frml = cffrml_create(phyid, fcs); 523 527 524 528 if (!frml) {
+31 -13
net/caif/cfctrl.c
··· 178 178 void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid) 179 179 { 180 180 struct cfctrl *cfctrl = container_obj(layer); 181 - int ret; 182 181 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 182 + struct cflayer *dn = cfctrl->serv.layer.dn; 183 183 if (!pkt) { 184 184 pr_warn("Out of memory\n"); 185 + return; 186 + } 187 + if (!dn) { 188 + pr_debug("not able to send enum request\n"); 185 189 return; 186 190 } 187 191 caif_assert(offsetof(struct cfctrl, serv.layer) == 0); ··· 194 190 cfctrl->serv.dev_info.id = physlinkid; 195 191 cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM); 196 192 cfpkt_addbdy(pkt, physlinkid); 197 - ret = 198 - cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 193 + dn->transmit(dn, pkt); 199 194 } 200 195 201 196 int cfctrl_linkup_request(struct cflayer *layer, ··· 209 206 int ret; 210 207 char utility_name[16]; 211 208 struct cfpkt *pkt; 209 + struct cflayer *dn = cfctrl->serv.layer.dn; 210 + 211 + if (!dn) { 212 + pr_debug("not able to send linkup request\n"); 213 + return -ENODEV; 214 + } 212 215 213 216 if (cfctrl_cancel_req(layer, user_layer) > 0) { 214 217 /* Slight Paranoia, check if already connecting */ ··· 291 282 */ 292 283 cfpkt_info(pkt)->dev_info->id = param->phyid; 293 284 ret = 294 - cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 285 + dn->transmit(dn, pkt); 295 286 if (ret < 0) { 296 287 int count; 297 288 ··· 310 301 int ret; 311 302 struct cfctrl *cfctrl = container_obj(layer); 312 303 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 304 + struct cflayer *dn = cfctrl->serv.layer.dn; 305 + 313 306 if (!pkt) { 314 307 pr_warn("Out of memory\n"); 315 308 return -ENOMEM; 316 309 } 310 + 311 + if (!dn) { 312 + pr_debug("not able to send link-down request\n"); 313 + return -ENODEV; 314 + } 315 + 317 316 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); 318 317 cfpkt_addbdy(pkt, channelid); 319 318 init_info(cfpkt_info(pkt), cfctrl); 320 319 ret = 321 - cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 320 + dn->transmit(dn, pkt); 322 321 #ifndef CAIF_NO_LOOP 323 322 cfctrl->loop_linkused[channelid] = 0; 324 323 #endif ··· 368 351 cfpkt_extr_head(pkt, &cmdrsp, 1); 369 352 cmd = cmdrsp & CFCTRL_CMD_MASK; 370 353 if (cmd != CFCTRL_CMD_LINK_ERR 371 - && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) { 354 + && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp) 355 + && CFCTRL_ERR_BIT != (CFCTRL_ERR_BIT & cmdrsp)) { 372 356 if (handle_loop(cfctrl, cmd, pkt) != 0) 373 357 cmdrsp |= CFCTRL_ERR_BIT; 374 358 } ··· 495 477 cfpkt_extr_head(pkt, &param, len); 496 478 break; 497 479 default: 498 - pr_warn("Request setup - invalid link type (%d)\n", 480 + pr_warn("Request setup, invalid type (%d)\n", 499 481 serv); 500 482 goto error; 501 483 } ··· 507 489 508 490 if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) || 509 491 cfpkt_erroneous(pkt)) { 510 - pr_err("Invalid O/E bit or parse error on CAIF control channel\n"); 492 + pr_err("Invalid O/E bit or parse error " 493 + "on CAIF control channel\n"); 511 494 cfctrl->res.reject_rsp(cfctrl->serv.layer.up, 512 495 0, 513 496 req ? req->client_layer ··· 569 550 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: 570 551 case CAIF_CTRLCMD_FLOW_OFF_IND: 571 552 spin_lock_bh(&this->info_list_lock); 572 - if (!list_empty(&this->list)) { 553 + if (!list_empty(&this->list)) 573 554 pr_debug("Received flow off in control layer\n"); 574 - } 575 555 spin_unlock_bh(&this->info_list_lock); 576 556 break; 577 557 case _CAIF_CTRLCMD_PHYIF_DOWN_IND: { ··· 605 587 case CFCTRL_CMD_LINK_SETUP: 606 588 spin_lock_bh(&ctrl->loop_linkid_lock); 607 589 if (!dec) { 608 - for (linkid = last_linkid + 1; linkid < 255; linkid++) 590 + for (linkid = last_linkid + 1; linkid < 254; linkid++) 609 591 if (!ctrl->loop_linkused[linkid]) 610 592 goto found; 611 593 } 612 594 dec = 1; 613 - for (linkid = last_linkid - 1; linkid > 0; linkid--) 595 + for (linkid = last_linkid - 1; linkid > 1; linkid--) 614 596 if (!ctrl->loop_linkused[linkid]) 615 597 goto found; 616 598 spin_unlock_bh(&ctrl->loop_linkid_lock); 617 - 599 + return -1; 618 600 found: 619 601 if (linkid < 10) 620 602 dec = 0;
+38 -11
net/caif/cfmuxl.c
··· 62 62 return &this->layer; 63 63 } 64 64 65 - int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid) 66 - { 67 - struct cfmuxl *muxl = container_obj(layr); 68 - 69 - spin_lock_bh(&muxl->receive_lock); 70 - list_add_rcu(&up->node, &muxl->srvl_list); 71 - spin_unlock_bh(&muxl->receive_lock); 72 - return 0; 73 - } 74 - 75 65 int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) 76 66 { 77 67 struct cfmuxl *muxl = (struct cfmuxl *) layr; ··· 81 91 } 82 92 83 93 return NULL; 94 + } 95 + 96 + int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid) 97 + { 98 + struct cfmuxl *muxl = container_obj(layr); 99 + struct cflayer *old; 100 + 101 + spin_lock_bh(&muxl->receive_lock); 102 + 103 + /* Two entries with same id is wrong, so remove old layer from mux */ 104 + old = get_from_id(&muxl->srvl_list, linkid); 105 + if (old != NULL) 106 + list_del_rcu(&old->node); 107 + 108 + list_add_rcu(&up->node, &muxl->srvl_list); 109 + spin_unlock_bh(&muxl->receive_lock); 110 + 111 + return 0; 84 112 } 85 113 86 114 struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid) ··· 153 145 struct cflayer *up; 154 146 struct cfmuxl *muxl = container_obj(layr); 155 147 int idx = id % UP_CACHE_SIZE; 148 + 149 + if (id == 0) { 150 + pr_warn("Trying to remove control layer\n"); 151 + return NULL; 152 + } 156 153 157 154 spin_lock_bh(&muxl->receive_lock); 158 155 up = get_from_id(&muxl->srvl_list, id); ··· 248 235 { 249 236 struct cfmuxl *muxl = container_obj(layr); 250 237 struct cflayer *layer; 238 + int idx; 251 239 252 240 rcu_read_lock(); 253 241 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) { 254 - if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) 242 + 243 + if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) { 244 + 245 + if ((ctrl == _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND || 246 + ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) && 247 + layer->id != 0) { 248 + 249 + idx = layer->id % UP_CACHE_SIZE; 250 + spin_lock_bh(&muxl->receive_lock); 251 + rcu_assign_pointer(muxl->up_cache[idx], NULL); 252 + list_del_rcu(&layer->node); 253 + spin_unlock_bh(&muxl->receive_lock); 254 + } 255 255 /* NOTE: ctrlcmd is not allowed to block */ 256 256 layer->ctrlcmd(layer, ctrl, phyid); 257 + } 257 258 } 258 259 rcu_read_unlock(); 259 260 }
+1 -3
net/core/dev.c
··· 4294 4294 4295 4295 slave->master = master; 4296 4296 4297 - if (old) { 4298 - synchronize_net(); 4297 + if (old) 4299 4298 dev_put(old); 4300 - } 4301 4299 return 0; 4302 4300 } 4303 4301 EXPORT_SYMBOL(netdev_set_master);
+13 -9
net/core/pktgen.c
··· 3544 3544 return -ENOMEM; 3545 3545 3546 3546 strcpy(pkt_dev->odevname, ifname); 3547 - pkt_dev->flows = vmalloc_node(MAX_CFLOWS * sizeof(struct flow_state), 3547 + pkt_dev->flows = vzalloc_node(MAX_CFLOWS * sizeof(struct flow_state), 3548 3548 node); 3549 3549 if (pkt_dev->flows == NULL) { 3550 3550 kfree(pkt_dev); 3551 3551 return -ENOMEM; 3552 3552 } 3553 - memset(pkt_dev->flows, 0, MAX_CFLOWS * sizeof(struct flow_state)); 3554 3553 3555 3554 pkt_dev->removal_mark = 0; 3556 3555 pkt_dev->min_pkt_size = ETH_ZLEN; ··· 3707 3708 { 3708 3709 int cpu; 3709 3710 struct proc_dir_entry *pe; 3711 + int ret = 0; 3710 3712 3711 3713 pr_info("%s", version); 3712 3714 ··· 3718 3718 pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops); 3719 3719 if (pe == NULL) { 3720 3720 pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL); 3721 - proc_net_remove(&init_net, PG_PROC_DIR); 3722 - return -EINVAL; 3721 + ret = -EINVAL; 3722 + goto remove_dir; 3723 3723 } 3724 3724 3725 - /* Register us to receive netdevice events */ 3726 3725 register_netdevice_notifier(&pktgen_notifier_block); 3727 3726 3728 3727 for_each_online_cpu(cpu) { ··· 3735 3736 3736 3737 if (list_empty(&pktgen_threads)) { 3737 3738 pr_err("ERROR: Initialization failed for all threads\n"); 3738 - unregister_netdevice_notifier(&pktgen_notifier_block); 3739 - remove_proc_entry(PGCTRL, pg_proc_dir); 3740 - proc_net_remove(&init_net, PG_PROC_DIR); 3741 - return -ENODEV; 3739 + ret = -ENODEV; 3740 + goto unregister; 3742 3741 } 3743 3742 3744 3743 return 0; 3744 + 3745 + unregister: 3746 + unregister_netdevice_notifier(&pktgen_notifier_block); 3747 + remove_proc_entry(PGCTRL, pg_proc_dir); 3748 + remove_dir: 3749 + proc_net_remove(&init_net, PG_PROC_DIR); 3750 + return ret; 3745 3751 } 3746 3752 3747 3753 static void __exit pg_cleanup(void)
+2
net/core/rtnetlink.c
··· 1956 1956 case NETDEV_GOING_DOWN: 1957 1957 case NETDEV_UNREGISTER: 1958 1958 case NETDEV_UNREGISTER_BATCH: 1959 + case NETDEV_RELEASE: 1960 + case NETDEV_JOIN: 1959 1961 break; 1960 1962 default: 1961 1963 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
+1
net/ipv4/route.c
··· 1665 1665 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, 1666 1666 skb->dev ? skb->dev->name : "?"); 1667 1667 kfree_skb(skb); 1668 + WARN_ON(1); 1668 1669 return 0; 1669 1670 } 1670 1671
+15 -2
net/sched/sch_generic.c
··· 815 815 return false; 816 816 } 817 817 818 + /** 819 + * dev_deactivate_many - deactivate transmissions on several devices 820 + * @head: list of devices to deactivate 821 + * 822 + * This function returns only when all outstanding transmissions 823 + * have completed, unless all devices are in dismantle phase. 824 + */ 818 825 void dev_deactivate_many(struct list_head *head) 819 826 { 820 827 struct net_device *dev; 828 + bool sync_needed = false; 821 829 822 830 list_for_each_entry(dev, head, unreg_list) { 823 831 netdev_for_each_tx_queue(dev, dev_deactivate_queue, ··· 835 827 &noop_qdisc); 836 828 837 829 dev_watchdog_down(dev); 830 + sync_needed |= !dev->dismantle; 838 831 } 839 832 840 - /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ 841 - synchronize_rcu(); 833 + /* Wait for outstanding qdisc-less dev_queue_xmit calls. 834 + * This is avoided if all devices are in dismantle phase : 835 + * Caller will call synchronize_net() for us 836 + */ 837 + if (sync_needed) 838 + synchronize_net(); 842 839 843 840 /* Wait for outstanding qdisc_run calls. */ 844 841 list_for_each_entry(dev, head, unreg_list)