at v2.6.33 1077 lines 29 kB view raw
1/* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34#include <linux/etherdevice.h> 35#include <linux/tcp.h> 36#include <linux/if_vlan.h> 37#include <linux/delay.h> 38 39#include <linux/mlx4/driver.h> 40#include <linux/mlx4/device.h> 41#include <linux/mlx4/cmd.h> 42#include <linux/mlx4/cq.h> 43 44#include "mlx4_en.h" 45#include "en_port.h" 46 47 48static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 49{ 50 struct mlx4_en_priv *priv = netdev_priv(dev); 51 struct mlx4_en_dev *mdev = priv->mdev; 52 int err; 53 54 en_dbg(HW, priv, "Registering VLAN group:%p\n", grp); 55 priv->vlgrp = grp; 56 57 mutex_lock(&mdev->state_lock); 58 if (mdev->device_up && priv->port_up) { 59 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp); 60 if (err) 61 en_err(priv, "Failed configuring VLAN filter\n"); 62 } 63 mutex_unlock(&mdev->state_lock); 64} 65 66static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 67{ 68 struct mlx4_en_priv *priv = netdev_priv(dev); 69 struct mlx4_en_dev *mdev = priv->mdev; 70 int err; 71 72 if (!priv->vlgrp) 73 return; 74 75 en_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n", 76 vid, vlan_group_get_device(priv->vlgrp, vid)); 77 78 /* Add VID to port VLAN filter */ 79 mutex_lock(&mdev->state_lock); 80 if (mdev->device_up && priv->port_up) { 81 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 82 if (err) 83 en_err(priv, "Failed configuring VLAN filter\n"); 84 } 85 mutex_unlock(&mdev->state_lock); 86} 87 88static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 89{ 90 struct mlx4_en_priv *priv = netdev_priv(dev); 91 struct mlx4_en_dev *mdev = priv->mdev; 92 int err; 93 94 if (!priv->vlgrp) 95 return; 96 97 en_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp entry:%p)\n", 98 vid, priv->vlgrp, vlan_group_get_device(priv->vlgrp, vid)); 99 vlan_group_set_device(priv->vlgrp, vid, NULL); 100 101 /* Remove VID from port VLAN filter */ 102 mutex_lock(&mdev->state_lock); 103 if (mdev->device_up && priv->port_up) { 104 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 105 if (err) 106 en_err(priv, "Failed configuring VLAN filter\n"); 107 } 108 mutex_unlock(&mdev->state_lock); 109} 110 111static u64 mlx4_en_mac_to_u64(u8 *addr) 112{ 113 u64 mac = 0; 114 int i; 115 116 for (i = 0; i < ETH_ALEN; i++) { 117 mac <<= 8; 118 mac |= addr[i]; 119 } 120 return mac; 121} 122 123static int mlx4_en_set_mac(struct net_device *dev, void *addr) 124{ 125 struct mlx4_en_priv *priv = netdev_priv(dev); 126 struct mlx4_en_dev *mdev = priv->mdev; 127 struct sockaddr *saddr = addr; 128 129 if (!is_valid_ether_addr(saddr->sa_data)) 130 return -EADDRNOTAVAIL; 131 132 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); 133 priv->mac = mlx4_en_mac_to_u64(dev->dev_addr); 134 queue_work(mdev->workqueue, &priv->mac_task); 135 return 0; 136} 137 138static void mlx4_en_do_set_mac(struct work_struct *work) 139{ 140 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 141 mac_task); 142 struct mlx4_en_dev *mdev = priv->mdev; 143 int err = 0; 144 145 mutex_lock(&mdev->state_lock); 146 if (priv->port_up) { 147 /* Remove old MAC and insert the new one */ 148 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 149 err = mlx4_register_mac(mdev->dev, priv->port, 150 priv->mac, &priv->mac_index); 151 if (err) 152 en_err(priv, "Failed changing HW MAC address\n"); 153 } else 154 en_dbg(HW, priv, "Port is down while " 155 "registering mac, exiting...\n"); 156 157 mutex_unlock(&mdev->state_lock); 158} 159 160static void mlx4_en_clear_list(struct net_device *dev) 161{ 162 struct mlx4_en_priv *priv = netdev_priv(dev); 163 struct dev_mc_list *plist = priv->mc_list; 164 struct dev_mc_list *next; 165 166 while (plist) { 167 next = plist->next; 168 kfree(plist); 169 plist = next; 170 } 171 priv->mc_list = NULL; 172} 173 174static void mlx4_en_cache_mclist(struct net_device *dev) 175{ 176 struct mlx4_en_priv *priv = netdev_priv(dev); 177 struct dev_mc_list *mclist; 178 struct dev_mc_list *tmp; 179 struct dev_mc_list *plist = NULL; 180 181 for (mclist = dev->mc_list; mclist; mclist = mclist->next) { 182 tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC); 183 if (!tmp) { 184 en_err(priv, "failed to allocate multicast list\n"); 185 mlx4_en_clear_list(dev); 186 return; 187 } 188 memcpy(tmp, mclist, sizeof(struct dev_mc_list)); 189 tmp->next = NULL; 190 if (plist) 191 plist->next = tmp; 192 else 193 priv->mc_list = tmp; 194 plist = tmp; 195 } 196} 197 198 199static void mlx4_en_set_multicast(struct net_device *dev) 200{ 201 struct mlx4_en_priv *priv = netdev_priv(dev); 202 203 if (!priv->port_up) 204 return; 205 206 queue_work(priv->mdev->workqueue, &priv->mcast_task); 207} 208 209static void mlx4_en_do_set_multicast(struct work_struct *work) 210{ 211 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 212 mcast_task); 213 struct mlx4_en_dev *mdev = priv->mdev; 214 struct net_device *dev = priv->dev; 215 struct dev_mc_list *mclist; 216 u64 mcast_addr = 0; 217 int err; 218 219 mutex_lock(&mdev->state_lock); 220 if (!mdev->device_up) { 221 en_dbg(HW, priv, "Card is not up, " 222 "ignoring multicast change.\n"); 223 goto out; 224 } 225 if (!priv->port_up) { 226 en_dbg(HW, priv, "Port is down, " 227 "ignoring multicast change.\n"); 228 goto out; 229 } 230 231 /* 232 * Promsicuous mode: disable all filters 233 */ 234 235 if (dev->flags & IFF_PROMISC) { 236 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 237 if (netif_msg_rx_status(priv)) 238 en_warn(priv, "Entering promiscuous mode\n"); 239 priv->flags |= MLX4_EN_FLAG_PROMISC; 240 241 /* Enable promiscouos mode */ 242 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 243 priv->base_qpn, 1); 244 if (err) 245 en_err(priv, "Failed enabling " 246 "promiscous mode\n"); 247 248 /* Disable port multicast filter (unconditionally) */ 249 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 250 0, MLX4_MCAST_DISABLE); 251 if (err) 252 en_err(priv, "Failed disabling " 253 "multicast filter\n"); 254 255 /* Disable port VLAN filter */ 256 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); 257 if (err) 258 en_err(priv, "Failed disabling VLAN filter\n"); 259 } 260 goto out; 261 } 262 263 /* 264 * Not in promiscous mode 265 */ 266 267 if (priv->flags & MLX4_EN_FLAG_PROMISC) { 268 if (netif_msg_rx_status(priv)) 269 en_warn(priv, "Leaving promiscuous mode\n"); 270 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 271 272 /* Disable promiscouos mode */ 273 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 274 priv->base_qpn, 0); 275 if (err) 276 en_err(priv, "Failed disabling promiscous mode\n"); 277 278 /* Enable port VLAN filter */ 279 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 280 if (err) 281 en_err(priv, "Failed enabling VLAN filter\n"); 282 } 283 284 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 285 if (dev->flags & IFF_ALLMULTI) { 286 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 287 0, MLX4_MCAST_DISABLE); 288 if (err) 289 en_err(priv, "Failed disabling multicast filter\n"); 290 } else { 291 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 292 0, MLX4_MCAST_DISABLE); 293 if (err) 294 en_err(priv, "Failed disabling multicast filter\n"); 295 296 /* Flush mcast filter and init it with broadcast address */ 297 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 298 1, MLX4_MCAST_CONFIG); 299 300 /* Update multicast list - we cache all addresses so they won't 301 * change while HW is updated holding the command semaphor */ 302 netif_tx_lock_bh(dev); 303 mlx4_en_cache_mclist(dev); 304 netif_tx_unlock_bh(dev); 305 for (mclist = priv->mc_list; mclist; mclist = mclist->next) { 306 mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr); 307 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 308 mcast_addr, 0, MLX4_MCAST_CONFIG); 309 } 310 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 311 0, MLX4_MCAST_ENABLE); 312 if (err) 313 en_err(priv, "Failed enabling multicast filter\n"); 314 315 mlx4_en_clear_list(dev); 316 } 317out: 318 mutex_unlock(&mdev->state_lock); 319} 320 321#ifdef CONFIG_NET_POLL_CONTROLLER 322static void mlx4_en_netpoll(struct net_device *dev) 323{ 324 struct mlx4_en_priv *priv = netdev_priv(dev); 325 struct mlx4_en_cq *cq; 326 unsigned long flags; 327 int i; 328 329 for (i = 0; i < priv->rx_ring_num; i++) { 330 cq = &priv->rx_cq[i]; 331 spin_lock_irqsave(&cq->lock, flags); 332 napi_synchronize(&cq->napi); 333 mlx4_en_process_rx_cq(dev, cq, 0); 334 spin_unlock_irqrestore(&cq->lock, flags); 335 } 336} 337#endif 338 339static void mlx4_en_tx_timeout(struct net_device *dev) 340{ 341 struct mlx4_en_priv *priv = netdev_priv(dev); 342 struct mlx4_en_dev *mdev = priv->mdev; 343 344 if (netif_msg_timer(priv)) 345 en_warn(priv, "Tx timeout called on port:%d\n", priv->port); 346 347 priv->port_stats.tx_timeout++; 348 en_dbg(DRV, priv, "Scheduling watchdog\n"); 349 queue_work(mdev->workqueue, &priv->watchdog_task); 350} 351 352 353static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) 354{ 355 struct mlx4_en_priv *priv = netdev_priv(dev); 356 357 spin_lock_bh(&priv->stats_lock); 358 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); 359 spin_unlock_bh(&priv->stats_lock); 360 361 return &priv->ret_stats; 362} 363 364static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 365{ 366 struct mlx4_en_cq *cq; 367 int i; 368 369 /* If we haven't received a specific coalescing setting 370 * (module param), we set the moderation parameters as follows: 371 * - moder_cnt is set to the number of mtu sized packets to 372 * satisfy our coelsing target. 373 * - moder_time is set to a fixed value. 374 */ 375 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 376 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 377 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - " 378 "rx_frames:%d rx_usecs:%d\n", 379 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 380 381 /* Setup cq moderation params */ 382 for (i = 0; i < priv->rx_ring_num; i++) { 383 cq = &priv->rx_cq[i]; 384 cq->moder_cnt = priv->rx_frames; 385 cq->moder_time = priv->rx_usecs; 386 } 387 388 for (i = 0; i < priv->tx_ring_num; i++) { 389 cq = &priv->tx_cq[i]; 390 cq->moder_cnt = MLX4_EN_TX_COAL_PKTS; 391 cq->moder_time = MLX4_EN_TX_COAL_TIME; 392 } 393 394 /* Reset auto-moderation params */ 395 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 396 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 397 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 398 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 399 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 400 priv->adaptive_rx_coal = 1; 401 priv->last_moder_time = MLX4_EN_AUTO_CONF; 402 priv->last_moder_jiffies = 0; 403 priv->last_moder_packets = 0; 404 priv->last_moder_tx_packets = 0; 405 priv->last_moder_bytes = 0; 406} 407 408static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 409{ 410 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 411 struct mlx4_en_cq *cq; 412 unsigned long packets; 413 unsigned long rate; 414 unsigned long avg_pkt_size; 415 unsigned long rx_packets; 416 unsigned long rx_bytes; 417 unsigned long rx_byte_diff; 418 unsigned long tx_packets; 419 unsigned long tx_pkt_diff; 420 unsigned long rx_pkt_diff; 421 int moder_time; 422 int i, err; 423 424 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 425 return; 426 427 spin_lock_bh(&priv->stats_lock); 428 rx_packets = priv->stats.rx_packets; 429 rx_bytes = priv->stats.rx_bytes; 430 tx_packets = priv->stats.tx_packets; 431 spin_unlock_bh(&priv->stats_lock); 432 433 if (!priv->last_moder_jiffies || !period) 434 goto out; 435 436 tx_pkt_diff = ((unsigned long) (tx_packets - 437 priv->last_moder_tx_packets)); 438 rx_pkt_diff = ((unsigned long) (rx_packets - 439 priv->last_moder_packets)); 440 packets = max(tx_pkt_diff, rx_pkt_diff); 441 rx_byte_diff = rx_bytes - priv->last_moder_bytes; 442 rx_byte_diff = rx_byte_diff ? rx_byte_diff : 1; 443 rate = packets * HZ / period; 444 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 445 priv->last_moder_bytes)) / packets : 0; 446 447 /* Apply auto-moderation only when packet rate exceeds a rate that 448 * it matters */ 449 if (rate > MLX4_EN_RX_RATE_THRESH) { 450 /* If tx and rx packet rates are not balanced, assume that 451 * traffic is mainly BW bound and apply maximum moderation. 452 * Otherwise, moderate according to packet rate */ 453 if (2 * tx_pkt_diff > 3 * rx_pkt_diff && 454 rx_pkt_diff / rx_byte_diff < 455 MLX4_EN_SMALL_PKT_SIZE) 456 moder_time = priv->rx_usecs_low; 457 else if (2 * rx_pkt_diff > 3 * tx_pkt_diff) 458 moder_time = priv->rx_usecs_high; 459 else { 460 if (rate < priv->pkt_rate_low) 461 moder_time = priv->rx_usecs_low; 462 else if (rate > priv->pkt_rate_high) 463 moder_time = priv->rx_usecs_high; 464 else 465 moder_time = (rate - priv->pkt_rate_low) * 466 (priv->rx_usecs_high - priv->rx_usecs_low) / 467 (priv->pkt_rate_high - priv->pkt_rate_low) + 468 priv->rx_usecs_low; 469 } 470 } else { 471 /* When packet rate is low, use default moderation rather than 472 * 0 to prevent interrupt storms if traffic suddenly increases */ 473 moder_time = priv->rx_usecs; 474 } 475 476 en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", 477 tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period); 478 479 en_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu " 480 "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n", 481 priv->last_moder_time, moder_time, period, packets, 482 avg_pkt_size, rate); 483 484 if (moder_time != priv->last_moder_time) { 485 priv->last_moder_time = moder_time; 486 for (i = 0; i < priv->rx_ring_num; i++) { 487 cq = &priv->rx_cq[i]; 488 cq->moder_time = moder_time; 489 err = mlx4_en_set_cq_moder(priv, cq); 490 if (err) { 491 en_err(priv, "Failed modifying moderation for cq:%d\n", i); 492 break; 493 } 494 } 495 } 496 497out: 498 priv->last_moder_packets = rx_packets; 499 priv->last_moder_tx_packets = tx_packets; 500 priv->last_moder_bytes = rx_bytes; 501 priv->last_moder_jiffies = jiffies; 502} 503 504static void mlx4_en_do_get_stats(struct work_struct *work) 505{ 506 struct delayed_work *delay = to_delayed_work(work); 507 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 508 stats_task); 509 struct mlx4_en_dev *mdev = priv->mdev; 510 int err; 511 512 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 513 if (err) 514 en_dbg(HW, priv, "Could not update stats \n"); 515 516 mutex_lock(&mdev->state_lock); 517 if (mdev->device_up) { 518 if (priv->port_up) 519 mlx4_en_auto_moderation(priv); 520 521 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 522 } 523 mutex_unlock(&mdev->state_lock); 524} 525 526static void mlx4_en_linkstate(struct work_struct *work) 527{ 528 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 529 linkstate_task); 530 struct mlx4_en_dev *mdev = priv->mdev; 531 int linkstate = priv->link_state; 532 533 mutex_lock(&mdev->state_lock); 534 /* If observable port state changed set carrier state and 535 * report to system log */ 536 if (priv->last_link_state != linkstate) { 537 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 538 en_dbg(LINK, priv, "Link Down\n"); 539 netif_carrier_off(priv->dev); 540 } else { 541 en_dbg(LINK, priv, "Link Up\n"); 542 netif_carrier_on(priv->dev); 543 } 544 } 545 priv->last_link_state = linkstate; 546 mutex_unlock(&mdev->state_lock); 547} 548 549 550int mlx4_en_start_port(struct net_device *dev) 551{ 552 struct mlx4_en_priv *priv = netdev_priv(dev); 553 struct mlx4_en_dev *mdev = priv->mdev; 554 struct mlx4_en_cq *cq; 555 struct mlx4_en_tx_ring *tx_ring; 556 int rx_index = 0; 557 int tx_index = 0; 558 int err = 0; 559 int i; 560 int j; 561 562 if (priv->port_up) { 563 en_dbg(DRV, priv, "start port called while port already up\n"); 564 return 0; 565 } 566 567 /* Calculate Rx buf size */ 568 dev->mtu = min(dev->mtu, priv->max_mtu); 569 mlx4_en_calc_rx_buf(dev); 570 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 571 572 /* Configure rx cq's and rings */ 573 err = mlx4_en_activate_rx_rings(priv); 574 if (err) { 575 en_err(priv, "Failed to activate RX rings\n"); 576 return err; 577 } 578 for (i = 0; i < priv->rx_ring_num; i++) { 579 cq = &priv->rx_cq[i]; 580 581 err = mlx4_en_activate_cq(priv, cq); 582 if (err) { 583 en_err(priv, "Failed activating Rx CQ\n"); 584 goto cq_err; 585 } 586 for (j = 0; j < cq->size; j++) 587 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 588 err = mlx4_en_set_cq_moder(priv, cq); 589 if (err) { 590 en_err(priv, "Failed setting cq moderation parameters"); 591 mlx4_en_deactivate_cq(priv, cq); 592 goto cq_err; 593 } 594 mlx4_en_arm_cq(priv, cq); 595 priv->rx_ring[i].cqn = cq->mcq.cqn; 596 ++rx_index; 597 } 598 599 err = mlx4_en_config_rss_steer(priv); 600 if (err) { 601 en_err(priv, "Failed configuring rss steering\n"); 602 goto cq_err; 603 } 604 605 /* Configure tx cq's and rings */ 606 for (i = 0; i < priv->tx_ring_num; i++) { 607 /* Configure cq */ 608 cq = &priv->tx_cq[i]; 609 err = mlx4_en_activate_cq(priv, cq); 610 if (err) { 611 en_err(priv, "Failed allocating Tx CQ\n"); 612 goto tx_err; 613 } 614 err = mlx4_en_set_cq_moder(priv, cq); 615 if (err) { 616 en_err(priv, "Failed setting cq moderation parameters"); 617 mlx4_en_deactivate_cq(priv, cq); 618 goto tx_err; 619 } 620 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 621 cq->buf->wqe_index = cpu_to_be16(0xffff); 622 623 /* Configure ring */ 624 tx_ring = &priv->tx_ring[i]; 625 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn); 626 if (err) { 627 en_err(priv, "Failed allocating Tx ring\n"); 628 mlx4_en_deactivate_cq(priv, cq); 629 goto tx_err; 630 } 631 /* Set initial ownership of all Tx TXBBs to SW (1) */ 632 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 633 *((u32 *) (tx_ring->buf + j)) = 0xffffffff; 634 ++tx_index; 635 } 636 637 /* Configure port */ 638 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 639 priv->rx_skb_size + ETH_FCS_LEN, 640 priv->prof->tx_pause, 641 priv->prof->tx_ppp, 642 priv->prof->rx_pause, 643 priv->prof->rx_ppp); 644 if (err) { 645 en_err(priv, "Failed setting port general configurations " 646 "for port %d, with error %d\n", priv->port, err); 647 goto tx_err; 648 } 649 /* Set default qp number */ 650 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 651 if (err) { 652 en_err(priv, "Failed setting default qp numbers\n"); 653 goto tx_err; 654 } 655 /* Set port mac number */ 656 en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); 657 err = mlx4_register_mac(mdev->dev, priv->port, 658 priv->mac, &priv->mac_index); 659 if (err) { 660 en_err(priv, "Failed setting port mac\n"); 661 goto tx_err; 662 } 663 664 /* Init port */ 665 en_dbg(HW, priv, "Initializing port\n"); 666 err = mlx4_INIT_PORT(mdev->dev, priv->port); 667 if (err) { 668 en_err(priv, "Failed Initializing port\n"); 669 goto mac_err; 670 } 671 672 /* Schedule multicast task to populate multicast list */ 673 queue_work(mdev->workqueue, &priv->mcast_task); 674 675 priv->port_up = true; 676 netif_tx_start_all_queues(dev); 677 return 0; 678 679mac_err: 680 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 681tx_err: 682 while (tx_index--) { 683 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); 684 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); 685 } 686 687 mlx4_en_release_rss_steer(priv); 688cq_err: 689 while (rx_index--) 690 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); 691 for (i = 0; i < priv->rx_ring_num; i++) 692 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 693 694 return err; /* need to close devices */ 695} 696 697 698void mlx4_en_stop_port(struct net_device *dev) 699{ 700 struct mlx4_en_priv *priv = netdev_priv(dev); 701 struct mlx4_en_dev *mdev = priv->mdev; 702 int i; 703 704 if (!priv->port_up) { 705 en_dbg(DRV, priv, "stop port called while port already down\n"); 706 return; 707 } 708 709 /* Synchronize with tx routine */ 710 netif_tx_lock_bh(dev); 711 netif_tx_stop_all_queues(dev); 712 netif_tx_unlock_bh(dev); 713 714 /* close port*/ 715 priv->port_up = false; 716 mlx4_CLOSE_PORT(mdev->dev, priv->port); 717 718 /* Unregister Mac address for the port */ 719 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 720 721 /* Free TX Rings */ 722 for (i = 0; i < priv->tx_ring_num; i++) { 723 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); 724 mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]); 725 } 726 msleep(10); 727 728 for (i = 0; i < priv->tx_ring_num; i++) 729 mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]); 730 731 /* Free RSS qps */ 732 mlx4_en_release_rss_steer(priv); 733 734 /* Free RX Rings */ 735 for (i = 0; i < priv->rx_ring_num; i++) { 736 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 737 while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state)) 738 msleep(1); 739 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); 740 } 741} 742 743static void mlx4_en_restart(struct work_struct *work) 744{ 745 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 746 watchdog_task); 747 struct mlx4_en_dev *mdev = priv->mdev; 748 struct net_device *dev = priv->dev; 749 750 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 751 752 mutex_lock(&mdev->state_lock); 753 if (priv->port_up) { 754 mlx4_en_stop_port(dev); 755 if (mlx4_en_start_port(dev)) 756 en_err(priv, "Failed restarting port %d\n", priv->port); 757 } 758 mutex_unlock(&mdev->state_lock); 759} 760 761 762static int mlx4_en_open(struct net_device *dev) 763{ 764 struct mlx4_en_priv *priv = netdev_priv(dev); 765 struct mlx4_en_dev *mdev = priv->mdev; 766 int i; 767 int err = 0; 768 769 mutex_lock(&mdev->state_lock); 770 771 if (!mdev->device_up) { 772 en_err(priv, "Cannot open - device down/disabled\n"); 773 err = -EBUSY; 774 goto out; 775 } 776 777 /* Reset HW statistics and performance counters */ 778 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 779 en_dbg(HW, priv, "Failed dumping statistics\n"); 780 781 memset(&priv->stats, 0, sizeof(priv->stats)); 782 memset(&priv->pstats, 0, sizeof(priv->pstats)); 783 784 for (i = 0; i < priv->tx_ring_num; i++) { 785 priv->tx_ring[i].bytes = 0; 786 priv->tx_ring[i].packets = 0; 787 } 788 for (i = 0; i < priv->rx_ring_num; i++) { 789 priv->rx_ring[i].bytes = 0; 790 priv->rx_ring[i].packets = 0; 791 } 792 793 mlx4_en_set_default_moderation(priv); 794 err = mlx4_en_start_port(dev); 795 if (err) 796 en_err(priv, "Failed starting port:%d\n", priv->port); 797 798out: 799 mutex_unlock(&mdev->state_lock); 800 return err; 801} 802 803 804static int mlx4_en_close(struct net_device *dev) 805{ 806 struct mlx4_en_priv *priv = netdev_priv(dev); 807 struct mlx4_en_dev *mdev = priv->mdev; 808 809 en_dbg(IFDOWN, priv, "Close port called\n"); 810 811 mutex_lock(&mdev->state_lock); 812 813 mlx4_en_stop_port(dev); 814 netif_carrier_off(dev); 815 816 mutex_unlock(&mdev->state_lock); 817 return 0; 818} 819 820void mlx4_en_free_resources(struct mlx4_en_priv *priv) 821{ 822 int i; 823 824 for (i = 0; i < priv->tx_ring_num; i++) { 825 if (priv->tx_ring[i].tx_info) 826 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 827 if (priv->tx_cq[i].buf) 828 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 829 } 830 831 for (i = 0; i < priv->rx_ring_num; i++) { 832 if (priv->rx_ring[i].rx_info) 833 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); 834 if (priv->rx_cq[i].buf) 835 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 836 } 837} 838 839int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 840{ 841 struct mlx4_en_port_profile *prof = priv->prof; 842 int i; 843 844 /* Create tx Rings */ 845 for (i = 0; i < priv->tx_ring_num; i++) { 846 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 847 prof->tx_ring_size, i, TX)) 848 goto err; 849 850 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 851 prof->tx_ring_size, TXBB_SIZE)) 852 goto err; 853 } 854 855 /* Create rx Rings */ 856 for (i = 0; i < priv->rx_ring_num; i++) { 857 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 858 prof->rx_ring_size, i, RX)) 859 goto err; 860 861 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 862 prof->rx_ring_size, priv->stride)) 863 goto err; 864 } 865 866 return 0; 867 868err: 869 en_err(priv, "Failed to allocate NIC resources\n"); 870 return -ENOMEM; 871} 872 873 874void mlx4_en_destroy_netdev(struct net_device *dev) 875{ 876 struct mlx4_en_priv *priv = netdev_priv(dev); 877 struct mlx4_en_dev *mdev = priv->mdev; 878 879 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 880 881 /* Unregister device - this will close the port if it was up */ 882 if (priv->registered) 883 unregister_netdev(dev); 884 885 if (priv->allocated) 886 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 887 888 cancel_delayed_work(&priv->stats_task); 889 /* flush any pending task for this netdev */ 890 flush_workqueue(mdev->workqueue); 891 892 /* Detach the netdev so tasks would not attempt to access it */ 893 mutex_lock(&mdev->state_lock); 894 mdev->pndev[priv->port] = NULL; 895 mutex_unlock(&mdev->state_lock); 896 897 mlx4_en_free_resources(priv); 898 free_netdev(dev); 899} 900 901static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 902{ 903 struct mlx4_en_priv *priv = netdev_priv(dev); 904 struct mlx4_en_dev *mdev = priv->mdev; 905 int err = 0; 906 907 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", 908 dev->mtu, new_mtu); 909 910 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 911 en_err(priv, "Bad MTU size:%d.\n", new_mtu); 912 return -EPERM; 913 } 914 dev->mtu = new_mtu; 915 916 if (netif_running(dev)) { 917 mutex_lock(&mdev->state_lock); 918 if (!mdev->device_up) { 919 /* NIC is probably restarting - let watchdog task reset 920 * the port */ 921 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 922 } else { 923 mlx4_en_stop_port(dev); 924 mlx4_en_set_default_moderation(priv); 925 err = mlx4_en_start_port(dev); 926 if (err) { 927 en_err(priv, "Failed restarting port:%d\n", 928 priv->port); 929 queue_work(mdev->workqueue, &priv->watchdog_task); 930 } 931 } 932 mutex_unlock(&mdev->state_lock); 933 } 934 return 0; 935} 936 937static const struct net_device_ops mlx4_netdev_ops = { 938 .ndo_open = mlx4_en_open, 939 .ndo_stop = mlx4_en_close, 940 .ndo_start_xmit = mlx4_en_xmit, 941 .ndo_select_queue = mlx4_en_select_queue, 942 .ndo_get_stats = mlx4_en_get_stats, 943 .ndo_set_multicast_list = mlx4_en_set_multicast, 944 .ndo_set_mac_address = mlx4_en_set_mac, 945 .ndo_validate_addr = eth_validate_addr, 946 .ndo_change_mtu = mlx4_en_change_mtu, 947 .ndo_tx_timeout = mlx4_en_tx_timeout, 948 .ndo_vlan_rx_register = mlx4_en_vlan_rx_register, 949 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 950 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 951#ifdef CONFIG_NET_POLL_CONTROLLER 952 .ndo_poll_controller = mlx4_en_netpoll, 953#endif 954}; 955 956int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 957 struct mlx4_en_port_profile *prof) 958{ 959 struct net_device *dev; 960 struct mlx4_en_priv *priv; 961 int i; 962 int err; 963 964 dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num); 965 if (dev == NULL) { 966 mlx4_err(mdev, "Net device allocation failed\n"); 967 return -ENOMEM; 968 } 969 970 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); 971 972 /* 973 * Initialize driver private data 974 */ 975 976 priv = netdev_priv(dev); 977 memset(priv, 0, sizeof(struct mlx4_en_priv)); 978 priv->dev = dev; 979 priv->mdev = mdev; 980 priv->prof = prof; 981 priv->port = port; 982 priv->port_up = false; 983 priv->rx_csum = 1; 984 priv->flags = prof->flags; 985 priv->tx_ring_num = prof->tx_ring_num; 986 priv->rx_ring_num = prof->rx_ring_num; 987 priv->mc_list = NULL; 988 priv->mac_index = -1; 989 priv->msg_enable = MLX4_EN_MSG_LEVEL; 990 spin_lock_init(&priv->stats_lock); 991 INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); 992 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); 993 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 994 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 995 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 996 997 /* Query for default mac and max mtu */ 998 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 999 priv->mac = mdev->dev->caps.def_mac[priv->port]; 1000 if (ILLEGAL_MAC(priv->mac)) { 1001 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", 1002 priv->port, priv->mac); 1003 err = -EINVAL; 1004 goto out; 1005 } 1006 1007 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 1008 DS_SIZE * MLX4_EN_MAX_RX_FRAGS); 1009 err = mlx4_en_alloc_resources(priv); 1010 if (err) 1011 goto out; 1012 1013 /* Allocate page for receive rings */ 1014 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 1015 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 1016 if (err) { 1017 en_err(priv, "Failed to allocate page for rx qps\n"); 1018 goto out; 1019 } 1020 priv->allocated = 1; 1021 1022 /* 1023 * Initialize netdev entry points 1024 */ 1025 dev->netdev_ops = &mlx4_netdev_ops; 1026 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 1027 dev->real_num_tx_queues = MLX4_EN_NUM_TX_RINGS; 1028 1029 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 1030 1031 /* Set defualt MAC */ 1032 dev->addr_len = ETH_ALEN; 1033 for (i = 0; i < ETH_ALEN; i++) 1034 dev->dev_addr[ETH_ALEN - 1 - i] = 1035 (u8) (priv->mac >> (8 * i)); 1036 1037 /* 1038 * Set driver features 1039 */ 1040 dev->features |= NETIF_F_SG; 1041 dev->vlan_features |= NETIF_F_SG; 1042 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1043 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1044 dev->features |= NETIF_F_HIGHDMA; 1045 dev->features |= NETIF_F_HW_VLAN_TX | 1046 NETIF_F_HW_VLAN_RX | 1047 NETIF_F_HW_VLAN_FILTER; 1048 if (mdev->profile.num_lro) 1049 dev->features |= NETIF_F_LRO; 1050 if (mdev->LSO_support) { 1051 dev->features |= NETIF_F_TSO; 1052 dev->features |= NETIF_F_TSO6; 1053 dev->vlan_features |= NETIF_F_TSO; 1054 dev->vlan_features |= NETIF_F_TSO6; 1055 } 1056 1057 mdev->pndev[port] = dev; 1058 1059 netif_carrier_off(dev); 1060 err = register_netdev(dev); 1061 if (err) { 1062 en_err(priv, "Netdev registration failed for port %d\n", port); 1063 goto out; 1064 } 1065 1066 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 1067 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 1068 1069 priv->registered = 1; 1070 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1071 return 0; 1072 1073out: 1074 mlx4_en_destroy_netdev(dev); 1075 return err; 1076} 1077