Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.39-rc3 5430 lines 142 kB view raw
1/* cnic.c: Broadcom CNIC core network driver. 2 * 3 * Copyright (c) 2006-2010 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) 10 * Modified and maintained by: Michael Chan <mchan@broadcom.com> 11 */ 12 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15#include <linux/module.h> 16 17#include <linux/kernel.h> 18#include <linux/errno.h> 19#include <linux/list.h> 20#include <linux/slab.h> 21#include <linux/pci.h> 22#include <linux/init.h> 23#include <linux/netdevice.h> 24#include <linux/uio_driver.h> 25#include <linux/in.h> 26#include <linux/dma-mapping.h> 27#include <linux/delay.h> 28#include <linux/ethtool.h> 29#include <linux/if_vlan.h> 30#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 31#define BCM_VLAN 1 32#endif 33#include <net/ip.h> 34#include <net/tcp.h> 35#include <net/route.h> 36#include <net/ipv6.h> 37#include <net/ip6_route.h> 38#include <net/ip6_checksum.h> 39#include <scsi/iscsi_if.h> 40 41#include "cnic_if.h" 42#include "bnx2.h" 43#include "bnx2x/bnx2x_reg.h" 44#include "bnx2x/bnx2x_fw_defs.h" 45#include "bnx2x/bnx2x_hsi.h" 46#include "../scsi/bnx2i/57xx_iscsi_constants.h" 47#include "../scsi/bnx2i/57xx_iscsi_hsi.h" 48#include "cnic.h" 49#include "cnic_defs.h" 50 51#define DRV_MODULE_NAME "cnic" 52 53static char version[] __devinitdata = 54 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; 55 56MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " 57 "Chen (zongxi@broadcom.com"); 58MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver"); 59MODULE_LICENSE("GPL"); 60MODULE_VERSION(CNIC_MODULE_VERSION); 61 62/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */ 63static LIST_HEAD(cnic_dev_list); 64static LIST_HEAD(cnic_udev_list); 65static DEFINE_RWLOCK(cnic_dev_lock); 66static DEFINE_MUTEX(cnic_lock); 67 68static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; 69 70/* helper function, assuming cnic_lock is held */ 71static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type) 72{ 73 return rcu_dereference_protected(cnic_ulp_tbl[type], 74 lockdep_is_held(&cnic_lock)); 75} 76 77static int cnic_service_bnx2(void *, void *); 78static int cnic_service_bnx2x(void *, void *); 79static int cnic_ctl(void *, struct cnic_ctl_info *); 80 81static struct cnic_ops cnic_bnx2_ops = { 82 .cnic_owner = THIS_MODULE, 83 .cnic_handler = cnic_service_bnx2, 84 .cnic_ctl = cnic_ctl, 85}; 86 87static struct cnic_ops cnic_bnx2x_ops = { 88 .cnic_owner = THIS_MODULE, 89 .cnic_handler = cnic_service_bnx2x, 90 .cnic_ctl = cnic_ctl, 91}; 92 93static struct workqueue_struct *cnic_wq; 94 95static void cnic_shutdown_rings(struct cnic_dev *); 96static void cnic_init_rings(struct cnic_dev *); 97static int cnic_cm_set_pg(struct cnic_sock *); 98 99static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) 100{ 101 struct cnic_uio_dev *udev = uinfo->priv; 102 struct cnic_dev *dev; 103 104 if (!capable(CAP_NET_ADMIN)) 105 return -EPERM; 106 107 if (udev->uio_dev != -1) 108 return -EBUSY; 109 110 rtnl_lock(); 111 dev = udev->dev; 112 113 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 114 rtnl_unlock(); 115 return -ENODEV; 116 } 117 118 udev->uio_dev = iminor(inode); 119 120 cnic_shutdown_rings(dev); 121 cnic_init_rings(dev); 122 rtnl_unlock(); 123 124 return 0; 125} 126 127static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) 128{ 129 struct cnic_uio_dev *udev = uinfo->priv; 130 131 udev->uio_dev = -1; 132 return 0; 133} 134 135static inline void cnic_hold(struct cnic_dev *dev) 136{ 137 atomic_inc(&dev->ref_count); 138} 139 140static inline void cnic_put(struct cnic_dev *dev) 141{ 142 atomic_dec(&dev->ref_count); 143} 144 145static inline void csk_hold(struct cnic_sock *csk) 146{ 147 atomic_inc(&csk->ref_count); 148} 149 150static inline void csk_put(struct cnic_sock *csk) 151{ 152 atomic_dec(&csk->ref_count); 153} 154 155static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) 156{ 157 struct cnic_dev *cdev; 158 159 read_lock(&cnic_dev_lock); 160 list_for_each_entry(cdev, &cnic_dev_list, list) { 161 if (netdev == cdev->netdev) { 162 cnic_hold(cdev); 163 read_unlock(&cnic_dev_lock); 164 return cdev; 165 } 166 } 167 read_unlock(&cnic_dev_lock); 168 return NULL; 169} 170 171static inline void ulp_get(struct cnic_ulp_ops *ulp_ops) 172{ 173 atomic_inc(&ulp_ops->ref_count); 174} 175 176static inline void ulp_put(struct cnic_ulp_ops *ulp_ops) 177{ 178 atomic_dec(&ulp_ops->ref_count); 179} 180 181static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) 182{ 183 struct cnic_local *cp = dev->cnic_priv; 184 struct cnic_eth_dev *ethdev = cp->ethdev; 185 struct drv_ctl_info info; 186 struct drv_ctl_io *io = &info.data.io; 187 188 info.cmd = DRV_CTL_CTX_WR_CMD; 189 io->cid_addr = cid_addr; 190 io->offset = off; 191 io->data = val; 192 ethdev->drv_ctl(dev->netdev, &info); 193} 194 195static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr) 196{ 197 struct cnic_local *cp = dev->cnic_priv; 198 struct cnic_eth_dev *ethdev = cp->ethdev; 199 struct drv_ctl_info info; 200 struct drv_ctl_io *io = &info.data.io; 201 202 info.cmd = DRV_CTL_CTXTBL_WR_CMD; 203 io->offset = off; 204 io->dma_addr = addr; 205 ethdev->drv_ctl(dev->netdev, &info); 206} 207 208static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) 209{ 210 struct cnic_local *cp = dev->cnic_priv; 211 struct cnic_eth_dev *ethdev = cp->ethdev; 212 struct drv_ctl_info info; 213 struct drv_ctl_l2_ring *ring = &info.data.ring; 214 215 if (start) 216 info.cmd = DRV_CTL_START_L2_CMD; 217 else 218 info.cmd = DRV_CTL_STOP_L2_CMD; 219 220 ring->cid = cid; 221 ring->client_id = cl_id; 222 ethdev->drv_ctl(dev->netdev, &info); 223} 224 225static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) 226{ 227 struct cnic_local *cp = dev->cnic_priv; 228 struct cnic_eth_dev *ethdev = cp->ethdev; 229 struct drv_ctl_info info; 230 struct drv_ctl_io *io = &info.data.io; 231 232 info.cmd = DRV_CTL_IO_WR_CMD; 233 io->offset = off; 234 io->data = val; 235 ethdev->drv_ctl(dev->netdev, &info); 236} 237 238static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) 239{ 240 struct cnic_local *cp = dev->cnic_priv; 241 struct cnic_eth_dev *ethdev = cp->ethdev; 242 struct drv_ctl_info info; 243 struct drv_ctl_io *io = &info.data.io; 244 245 info.cmd = DRV_CTL_IO_RD_CMD; 246 io->offset = off; 247 ethdev->drv_ctl(dev->netdev, &info); 248 return io->data; 249} 250 251static int cnic_in_use(struct cnic_sock *csk) 252{ 253 return test_bit(SK_F_INUSE, &csk->flags); 254} 255 256static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count) 257{ 258 struct cnic_local *cp = dev->cnic_priv; 259 struct cnic_eth_dev *ethdev = cp->ethdev; 260 struct drv_ctl_info info; 261 262 info.cmd = cmd; 263 info.data.credit.credit_count = count; 264 ethdev->drv_ctl(dev->netdev, &info); 265} 266 267static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) 268{ 269 u32 i; 270 271 for (i = 0; i < cp->max_cid_space; i++) { 272 if (cp->ctx_tbl[i].cid == cid) { 273 *l5_cid = i; 274 return 0; 275 } 276 } 277 return -EINVAL; 278} 279 280static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, 281 struct cnic_sock *csk) 282{ 283 struct iscsi_path path_req; 284 char *buf = NULL; 285 u16 len = 0; 286 u32 msg_type = ISCSI_KEVENT_IF_DOWN; 287 struct cnic_ulp_ops *ulp_ops; 288 struct cnic_uio_dev *udev = cp->udev; 289 int rc = 0, retry = 0; 290 291 if (!udev || udev->uio_dev == -1) 292 return -ENODEV; 293 294 if (csk) { 295 len = sizeof(path_req); 296 buf = (char *) &path_req; 297 memset(&path_req, 0, len); 298 299 msg_type = ISCSI_KEVENT_PATH_REQ; 300 path_req.handle = (u64) csk->l5_cid; 301 if (test_bit(SK_F_IPV6, &csk->flags)) { 302 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0], 303 sizeof(struct in6_addr)); 304 path_req.ip_addr_len = 16; 305 } else { 306 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0], 307 sizeof(struct in_addr)); 308 path_req.ip_addr_len = 4; 309 } 310 path_req.vlan_id = csk->vlan_id; 311 path_req.pmtu = csk->mtu; 312 } 313 314 while (retry < 3) { 315 rc = 0; 316 rcu_read_lock(); 317 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); 318 if (ulp_ops) 319 rc = ulp_ops->iscsi_nl_send_msg( 320 cp->ulp_handle[CNIC_ULP_ISCSI], 321 msg_type, buf, len); 322 rcu_read_unlock(); 323 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ) 324 break; 325 326 msleep(100); 327 retry++; 328 } 329 return 0; 330} 331 332static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8); 333 334static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, 335 char *buf, u16 len) 336{ 337 int rc = -EINVAL; 338 339 switch (msg_type) { 340 case ISCSI_UEVENT_PATH_UPDATE: { 341 struct cnic_local *cp; 342 u32 l5_cid; 343 struct cnic_sock *csk; 344 struct iscsi_path *path_resp; 345 346 if (len < sizeof(*path_resp)) 347 break; 348 349 path_resp = (struct iscsi_path *) buf; 350 cp = dev->cnic_priv; 351 l5_cid = (u32) path_resp->handle; 352 if (l5_cid >= MAX_CM_SK_TBL_SZ) 353 break; 354 355 rcu_read_lock(); 356 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) { 357 rc = -ENODEV; 358 rcu_read_unlock(); 359 break; 360 } 361 csk = &cp->csk_tbl[l5_cid]; 362 csk_hold(csk); 363 if (cnic_in_use(csk) && 364 test_bit(SK_F_CONNECT_START, &csk->flags)) { 365 366 memcpy(csk->ha, path_resp->mac_addr, 6); 367 if (test_bit(SK_F_IPV6, &csk->flags)) 368 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, 369 sizeof(struct in6_addr)); 370 else 371 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, 372 sizeof(struct in_addr)); 373 374 if (is_valid_ether_addr(csk->ha)) { 375 cnic_cm_set_pg(csk); 376 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) && 377 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 378 379 cnic_cm_upcall(cp, csk, 380 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 381 clear_bit(SK_F_CONNECT_START, &csk->flags); 382 } 383 } 384 csk_put(csk); 385 rcu_read_unlock(); 386 rc = 0; 387 } 388 } 389 390 return rc; 391} 392 393static int cnic_offld_prep(struct cnic_sock *csk) 394{ 395 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 396 return 0; 397 398 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) { 399 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 400 return 0; 401 } 402 403 return 1; 404} 405 406static int cnic_close_prep(struct cnic_sock *csk) 407{ 408 clear_bit(SK_F_CONNECT_START, &csk->flags); 409 smp_mb__after_clear_bit(); 410 411 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 412 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 413 msleep(1); 414 415 return 1; 416 } 417 return 0; 418} 419 420static int cnic_abort_prep(struct cnic_sock *csk) 421{ 422 clear_bit(SK_F_CONNECT_START, &csk->flags); 423 smp_mb__after_clear_bit(); 424 425 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 426 msleep(1); 427 428 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 429 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; 430 return 1; 431 } 432 433 return 0; 434} 435 436int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) 437{ 438 struct cnic_dev *dev; 439 440 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 441 pr_err("%s: Bad type %d\n", __func__, ulp_type); 442 return -EINVAL; 443 } 444 mutex_lock(&cnic_lock); 445 if (cnic_ulp_tbl_prot(ulp_type)) { 446 pr_err("%s: Type %d has already been registered\n", 447 __func__, ulp_type); 448 mutex_unlock(&cnic_lock); 449 return -EBUSY; 450 } 451 452 read_lock(&cnic_dev_lock); 453 list_for_each_entry(dev, &cnic_dev_list, list) { 454 struct cnic_local *cp = dev->cnic_priv; 455 456 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); 457 } 458 read_unlock(&cnic_dev_lock); 459 460 atomic_set(&ulp_ops->ref_count, 0); 461 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); 462 mutex_unlock(&cnic_lock); 463 464 /* Prevent race conditions with netdev_event */ 465 rtnl_lock(); 466 list_for_each_entry(dev, &cnic_dev_list, list) { 467 struct cnic_local *cp = dev->cnic_priv; 468 469 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) 470 ulp_ops->cnic_init(dev); 471 } 472 rtnl_unlock(); 473 474 return 0; 475} 476 477int cnic_unregister_driver(int ulp_type) 478{ 479 struct cnic_dev *dev; 480 struct cnic_ulp_ops *ulp_ops; 481 int i = 0; 482 483 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 484 pr_err("%s: Bad type %d\n", __func__, ulp_type); 485 return -EINVAL; 486 } 487 mutex_lock(&cnic_lock); 488 ulp_ops = cnic_ulp_tbl_prot(ulp_type); 489 if (!ulp_ops) { 490 pr_err("%s: Type %d has not been registered\n", 491 __func__, ulp_type); 492 goto out_unlock; 493 } 494 read_lock(&cnic_dev_lock); 495 list_for_each_entry(dev, &cnic_dev_list, list) { 496 struct cnic_local *cp = dev->cnic_priv; 497 498 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 499 pr_err("%s: Type %d still has devices registered\n", 500 __func__, ulp_type); 501 read_unlock(&cnic_dev_lock); 502 goto out_unlock; 503 } 504 } 505 read_unlock(&cnic_dev_lock); 506 507 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); 508 509 mutex_unlock(&cnic_lock); 510 synchronize_rcu(); 511 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) { 512 msleep(100); 513 i++; 514 } 515 516 if (atomic_read(&ulp_ops->ref_count) != 0) 517 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n"); 518 return 0; 519 520out_unlock: 521 mutex_unlock(&cnic_lock); 522 return -EINVAL; 523} 524 525static int cnic_start_hw(struct cnic_dev *); 526static void cnic_stop_hw(struct cnic_dev *); 527 528static int cnic_register_device(struct cnic_dev *dev, int ulp_type, 529 void *ulp_ctx) 530{ 531 struct cnic_local *cp = dev->cnic_priv; 532 struct cnic_ulp_ops *ulp_ops; 533 534 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 535 pr_err("%s: Bad type %d\n", __func__, ulp_type); 536 return -EINVAL; 537 } 538 mutex_lock(&cnic_lock); 539 if (cnic_ulp_tbl_prot(ulp_type) == NULL) { 540 pr_err("%s: Driver with type %d has not been registered\n", 541 __func__, ulp_type); 542 mutex_unlock(&cnic_lock); 543 return -EAGAIN; 544 } 545 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 546 pr_err("%s: Type %d has already been registered to this device\n", 547 __func__, ulp_type); 548 mutex_unlock(&cnic_lock); 549 return -EBUSY; 550 } 551 552 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); 553 cp->ulp_handle[ulp_type] = ulp_ctx; 554 ulp_ops = cnic_ulp_tbl_prot(ulp_type); 555 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); 556 cnic_hold(dev); 557 558 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 559 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) 560 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); 561 562 mutex_unlock(&cnic_lock); 563 564 return 0; 565 566} 567EXPORT_SYMBOL(cnic_register_driver); 568 569static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) 570{ 571 struct cnic_local *cp = dev->cnic_priv; 572 int i = 0; 573 574 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 575 pr_err("%s: Bad type %d\n", __func__, ulp_type); 576 return -EINVAL; 577 } 578 mutex_lock(&cnic_lock); 579 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 580 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL); 581 cnic_put(dev); 582 } else { 583 pr_err("%s: device not registered to this ulp type %d\n", 584 __func__, ulp_type); 585 mutex_unlock(&cnic_lock); 586 return -EINVAL; 587 } 588 mutex_unlock(&cnic_lock); 589 590 if (ulp_type == CNIC_ULP_ISCSI) 591 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 592 593 synchronize_rcu(); 594 595 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && 596 i < 20) { 597 msleep(100); 598 i++; 599 } 600 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) 601 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n"); 602 603 return 0; 604} 605EXPORT_SYMBOL(cnic_unregister_driver); 606 607static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id) 608{ 609 id_tbl->start = start_id; 610 id_tbl->max = size; 611 id_tbl->next = 0; 612 spin_lock_init(&id_tbl->lock); 613 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); 614 if (!id_tbl->table) 615 return -ENOMEM; 616 617 return 0; 618} 619 620static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl) 621{ 622 kfree(id_tbl->table); 623 id_tbl->table = NULL; 624} 625 626static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id) 627{ 628 int ret = -1; 629 630 id -= id_tbl->start; 631 if (id >= id_tbl->max) 632 return ret; 633 634 spin_lock(&id_tbl->lock); 635 if (!test_bit(id, id_tbl->table)) { 636 set_bit(id, id_tbl->table); 637 ret = 0; 638 } 639 spin_unlock(&id_tbl->lock); 640 return ret; 641} 642 643/* Returns -1 if not successful */ 644static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl) 645{ 646 u32 id; 647 648 spin_lock(&id_tbl->lock); 649 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); 650 if (id >= id_tbl->max) { 651 id = -1; 652 if (id_tbl->next != 0) { 653 id = find_first_zero_bit(id_tbl->table, id_tbl->next); 654 if (id >= id_tbl->next) 655 id = -1; 656 } 657 } 658 659 if (id < id_tbl->max) { 660 set_bit(id, id_tbl->table); 661 id_tbl->next = (id + 1) & (id_tbl->max - 1); 662 id += id_tbl->start; 663 } 664 665 spin_unlock(&id_tbl->lock); 666 667 return id; 668} 669 670static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id) 671{ 672 if (id == -1) 673 return; 674 675 id -= id_tbl->start; 676 if (id >= id_tbl->max) 677 return; 678 679 clear_bit(id, id_tbl->table); 680} 681 682static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) 683{ 684 int i; 685 686 if (!dma->pg_arr) 687 return; 688 689 for (i = 0; i < dma->num_pages; i++) { 690 if (dma->pg_arr[i]) { 691 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE, 692 dma->pg_arr[i], dma->pg_map_arr[i]); 693 dma->pg_arr[i] = NULL; 694 } 695 } 696 if (dma->pgtbl) { 697 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size, 698 dma->pgtbl, dma->pgtbl_map); 699 dma->pgtbl = NULL; 700 } 701 kfree(dma->pg_arr); 702 dma->pg_arr = NULL; 703 dma->num_pages = 0; 704} 705 706static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) 707{ 708 int i; 709 __le32 *page_table = (__le32 *) dma->pgtbl; 710 711 for (i = 0; i < dma->num_pages; i++) { 712 /* Each entry needs to be in big endian format. */ 713 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); 714 page_table++; 715 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); 716 page_table++; 717 } 718} 719 720static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) 721{ 722 int i; 723 __le32 *page_table = (__le32 *) dma->pgtbl; 724 725 for (i = 0; i < dma->num_pages; i++) { 726 /* Each entry needs to be in little endian format. */ 727 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); 728 page_table++; 729 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); 730 page_table++; 731 } 732} 733 734static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, 735 int pages, int use_pg_tbl) 736{ 737 int i, size; 738 struct cnic_local *cp = dev->cnic_priv; 739 740 size = pages * (sizeof(void *) + sizeof(dma_addr_t)); 741 dma->pg_arr = kzalloc(size, GFP_ATOMIC); 742 if (dma->pg_arr == NULL) 743 return -ENOMEM; 744 745 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); 746 dma->num_pages = pages; 747 748 for (i = 0; i < pages; i++) { 749 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, 750 BCM_PAGE_SIZE, 751 &dma->pg_map_arr[i], 752 GFP_ATOMIC); 753 if (dma->pg_arr[i] == NULL) 754 goto error; 755 } 756 if (!use_pg_tbl) 757 return 0; 758 759 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & 760 ~(BCM_PAGE_SIZE - 1); 761 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, 762 &dma->pgtbl_map, GFP_ATOMIC); 763 if (dma->pgtbl == NULL) 764 goto error; 765 766 cp->setup_pgtbl(dev, dma); 767 768 return 0; 769 770error: 771 cnic_free_dma(dev, dma); 772 return -ENOMEM; 773} 774 775static void cnic_free_context(struct cnic_dev *dev) 776{ 777 struct cnic_local *cp = dev->cnic_priv; 778 int i; 779 780 for (i = 0; i < cp->ctx_blks; i++) { 781 if (cp->ctx_arr[i].ctx) { 782 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size, 783 cp->ctx_arr[i].ctx, 784 cp->ctx_arr[i].mapping); 785 cp->ctx_arr[i].ctx = NULL; 786 } 787 } 788} 789 790static void __cnic_free_uio(struct cnic_uio_dev *udev) 791{ 792 uio_unregister_device(&udev->cnic_uinfo); 793 794 if (udev->l2_buf) { 795 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size, 796 udev->l2_buf, udev->l2_buf_map); 797 udev->l2_buf = NULL; 798 } 799 800 if (udev->l2_ring) { 801 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, 802 udev->l2_ring, udev->l2_ring_map); 803 udev->l2_ring = NULL; 804 } 805 806 pci_dev_put(udev->pdev); 807 kfree(udev); 808} 809 810static void cnic_free_uio(struct cnic_uio_dev *udev) 811{ 812 if (!udev) 813 return; 814 815 write_lock(&cnic_dev_lock); 816 list_del_init(&udev->list); 817 write_unlock(&cnic_dev_lock); 818 __cnic_free_uio(udev); 819} 820 821static void cnic_free_resc(struct cnic_dev *dev) 822{ 823 struct cnic_local *cp = dev->cnic_priv; 824 struct cnic_uio_dev *udev = cp->udev; 825 826 if (udev) { 827 udev->dev = NULL; 828 cp->udev = NULL; 829 } 830 831 cnic_free_context(dev); 832 kfree(cp->ctx_arr); 833 cp->ctx_arr = NULL; 834 cp->ctx_blks = 0; 835 836 cnic_free_dma(dev, &cp->gbl_buf_info); 837 cnic_free_dma(dev, &cp->conn_buf_info); 838 cnic_free_dma(dev, &cp->kwq_info); 839 cnic_free_dma(dev, &cp->kwq_16_data_info); 840 cnic_free_dma(dev, &cp->kcq2.dma); 841 cnic_free_dma(dev, &cp->kcq1.dma); 842 kfree(cp->iscsi_tbl); 843 cp->iscsi_tbl = NULL; 844 kfree(cp->ctx_tbl); 845 cp->ctx_tbl = NULL; 846 847 cnic_free_id_tbl(&cp->fcoe_cid_tbl); 848 cnic_free_id_tbl(&cp->cid_tbl); 849} 850 851static int cnic_alloc_context(struct cnic_dev *dev) 852{ 853 struct cnic_local *cp = dev->cnic_priv; 854 855 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 856 int i, k, arr_size; 857 858 cp->ctx_blk_size = BCM_PAGE_SIZE; 859 cp->cids_per_blk = BCM_PAGE_SIZE / 128; 860 arr_size = BNX2_MAX_CID / cp->cids_per_blk * 861 sizeof(struct cnic_ctx); 862 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); 863 if (cp->ctx_arr == NULL) 864 return -ENOMEM; 865 866 k = 0; 867 for (i = 0; i < 2; i++) { 868 u32 j, reg, off, lo, hi; 869 870 if (i == 0) 871 off = BNX2_PG_CTX_MAP; 872 else 873 off = BNX2_ISCSI_CTX_MAP; 874 875 reg = cnic_reg_rd_ind(dev, off); 876 lo = reg >> 16; 877 hi = reg & 0xffff; 878 for (j = lo; j < hi; j += cp->cids_per_blk, k++) 879 cp->ctx_arr[k].cid = j; 880 } 881 882 cp->ctx_blks = k; 883 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { 884 cp->ctx_blks = 0; 885 return -ENOMEM; 886 } 887 888 for (i = 0; i < cp->ctx_blks; i++) { 889 cp->ctx_arr[i].ctx = 890 dma_alloc_coherent(&dev->pcidev->dev, 891 BCM_PAGE_SIZE, 892 &cp->ctx_arr[i].mapping, 893 GFP_KERNEL); 894 if (cp->ctx_arr[i].ctx == NULL) 895 return -ENOMEM; 896 } 897 } 898 return 0; 899} 900 901static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info) 902{ 903 int err, i, is_bnx2 = 0; 904 struct kcqe **kcq; 905 906 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) 907 is_bnx2 = 1; 908 909 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2); 910 if (err) 911 return err; 912 913 kcq = (struct kcqe **) info->dma.pg_arr; 914 info->kcq = kcq; 915 916 if (is_bnx2) 917 return 0; 918 919 for (i = 0; i < KCQ_PAGE_CNT; i++) { 920 struct bnx2x_bd_chain_next *next = 921 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT]; 922 int j = i + 1; 923 924 if (j >= KCQ_PAGE_CNT) 925 j = 0; 926 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32; 927 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff; 928 } 929 return 0; 930} 931 932static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) 933{ 934 struct cnic_local *cp = dev->cnic_priv; 935 struct cnic_uio_dev *udev; 936 937 read_lock(&cnic_dev_lock); 938 list_for_each_entry(udev, &cnic_udev_list, list) { 939 if (udev->pdev == dev->pcidev) { 940 udev->dev = dev; 941 cp->udev = udev; 942 read_unlock(&cnic_dev_lock); 943 return 0; 944 } 945 } 946 read_unlock(&cnic_dev_lock); 947 948 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); 949 if (!udev) 950 return -ENOMEM; 951 952 udev->uio_dev = -1; 953 954 udev->dev = dev; 955 udev->pdev = dev->pcidev; 956 udev->l2_ring_size = pages * BCM_PAGE_SIZE; 957 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, 958 &udev->l2_ring_map, 959 GFP_KERNEL | __GFP_COMP); 960 if (!udev->l2_ring) 961 goto err_udev; 962 963 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; 964 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); 965 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, 966 &udev->l2_buf_map, 967 GFP_KERNEL | __GFP_COMP); 968 if (!udev->l2_buf) 969 goto err_dma; 970 971 write_lock(&cnic_dev_lock); 972 list_add(&udev->list, &cnic_udev_list); 973 write_unlock(&cnic_dev_lock); 974 975 pci_dev_get(udev->pdev); 976 977 cp->udev = udev; 978 979 return 0; 980 err_dma: 981 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, 982 udev->l2_ring, udev->l2_ring_map); 983 err_udev: 984 kfree(udev); 985 return -ENOMEM; 986} 987 988static int cnic_init_uio(struct cnic_dev *dev) 989{ 990 struct cnic_local *cp = dev->cnic_priv; 991 struct cnic_uio_dev *udev = cp->udev; 992 struct uio_info *uinfo; 993 int ret = 0; 994 995 if (!udev) 996 return -ENOMEM; 997 998 uinfo = &udev->cnic_uinfo; 999 1000 uinfo->mem[0].addr = dev->netdev->base_addr; 1001 uinfo->mem[0].internal_addr = dev->regview; 1002 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; 1003 uinfo->mem[0].memtype = UIO_MEM_PHYS; 1004 1005 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 1006 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & 1007 PAGE_MASK; 1008 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 1009 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; 1010 else 1011 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; 1012 1013 uinfo->name = "bnx2_cnic"; 1014 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 1015 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & 1016 PAGE_MASK; 1017 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); 1018 1019 uinfo->name = "bnx2x_cnic"; 1020 } 1021 1022 uinfo->mem[1].memtype = UIO_MEM_LOGICAL; 1023 1024 uinfo->mem[2].addr = (unsigned long) udev->l2_ring; 1025 uinfo->mem[2].size = udev->l2_ring_size; 1026 uinfo->mem[2].memtype = UIO_MEM_LOGICAL; 1027 1028 uinfo->mem[3].addr = (unsigned long) udev->l2_buf; 1029 uinfo->mem[3].size = udev->l2_buf_size; 1030 uinfo->mem[3].memtype = UIO_MEM_LOGICAL; 1031 1032 uinfo->version = CNIC_MODULE_VERSION; 1033 uinfo->irq = UIO_IRQ_CUSTOM; 1034 1035 uinfo->open = cnic_uio_open; 1036 uinfo->release = cnic_uio_close; 1037 1038 if (udev->uio_dev == -1) { 1039 if (!uinfo->priv) { 1040 uinfo->priv = udev; 1041 1042 ret = uio_register_device(&udev->pdev->dev, uinfo); 1043 } 1044 } else { 1045 cnic_init_rings(dev); 1046 } 1047 1048 return ret; 1049} 1050 1051static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) 1052{ 1053 struct cnic_local *cp = dev->cnic_priv; 1054 int ret; 1055 1056 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); 1057 if (ret) 1058 goto error; 1059 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; 1060 1061 ret = cnic_alloc_kcq(dev, &cp->kcq1); 1062 if (ret) 1063 goto error; 1064 1065 ret = cnic_alloc_context(dev); 1066 if (ret) 1067 goto error; 1068 1069 ret = cnic_alloc_uio_rings(dev, 2); 1070 if (ret) 1071 goto error; 1072 1073 ret = cnic_init_uio(dev); 1074 if (ret) 1075 goto error; 1076 1077 return 0; 1078 1079error: 1080 cnic_free_resc(dev); 1081 return ret; 1082} 1083 1084static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) 1085{ 1086 struct cnic_local *cp = dev->cnic_priv; 1087 int ctx_blk_size = cp->ethdev->ctx_blk_size; 1088 int total_mem, blks, i; 1089 1090 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space; 1091 blks = total_mem / ctx_blk_size; 1092 if (total_mem % ctx_blk_size) 1093 blks++; 1094 1095 if (blks > cp->ethdev->ctx_tbl_len) 1096 return -ENOMEM; 1097 1098 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL); 1099 if (cp->ctx_arr == NULL) 1100 return -ENOMEM; 1101 1102 cp->ctx_blks = blks; 1103 cp->ctx_blk_size = ctx_blk_size; 1104 if (!BNX2X_CHIP_IS_57710(cp->chip_id)) 1105 cp->ctx_align = 0; 1106 else 1107 cp->ctx_align = ctx_blk_size; 1108 1109 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE; 1110 1111 for (i = 0; i < blks; i++) { 1112 cp->ctx_arr[i].ctx = 1113 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size, 1114 &cp->ctx_arr[i].mapping, 1115 GFP_KERNEL); 1116 if (cp->ctx_arr[i].ctx == NULL) 1117 return -ENOMEM; 1118 1119 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) { 1120 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { 1121 cnic_free_context(dev); 1122 cp->ctx_blk_size += cp->ctx_align; 1123 i = -1; 1124 continue; 1125 } 1126 } 1127 } 1128 return 0; 1129} 1130 1131static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) 1132{ 1133 struct cnic_local *cp = dev->cnic_priv; 1134 struct cnic_eth_dev *ethdev = cp->ethdev; 1135 u32 start_cid = ethdev->starting_cid; 1136 int i, j, n, ret, pages; 1137 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; 1138 1139 cp->iro_arr = ethdev->iro_arr; 1140 1141 cp->max_cid_space = MAX_ISCSI_TBL_SZ + BNX2X_FCOE_NUM_CONNECTIONS; 1142 cp->iscsi_start_cid = start_cid; 1143 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; 1144 1145 if (BNX2X_CHIP_IS_E2(cp->chip_id)) { 1146 cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS; 1147 cp->fcoe_init_cid = ethdev->fcoe_init_cid; 1148 if (!cp->fcoe_init_cid) 1149 cp->fcoe_init_cid = 0x10; 1150 } 1151 1152 if (start_cid < BNX2X_ISCSI_START_CID) { 1153 u32 delta = BNX2X_ISCSI_START_CID - start_cid; 1154 1155 cp->iscsi_start_cid = BNX2X_ISCSI_START_CID; 1156 cp->fcoe_start_cid += delta; 1157 cp->max_cid_space += delta; 1158 } 1159 1160 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, 1161 GFP_KERNEL); 1162 if (!cp->iscsi_tbl) 1163 goto error; 1164 1165 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * 1166 cp->max_cid_space, GFP_KERNEL); 1167 if (!cp->ctx_tbl) 1168 goto error; 1169 1170 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { 1171 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i]; 1172 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; 1173 } 1174 1175 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) 1176 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; 1177 1178 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / 1179 PAGE_SIZE; 1180 1181 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); 1182 if (ret) 1183 return -ENOMEM; 1184 1185 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; 1186 for (i = 0, j = 0; i < cp->max_cid_space; i++) { 1187 long off = CNIC_KWQ16_DATA_SIZE * (i % n); 1188 1189 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; 1190 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] + 1191 off; 1192 1193 if ((i % n) == (n - 1)) 1194 j++; 1195 } 1196 1197 ret = cnic_alloc_kcq(dev, &cp->kcq1); 1198 if (ret) 1199 goto error; 1200 1201 if (BNX2X_CHIP_IS_E2(cp->chip_id)) { 1202 ret = cnic_alloc_kcq(dev, &cp->kcq2); 1203 if (ret) 1204 goto error; 1205 } 1206 1207 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS * 1208 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE; 1209 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1); 1210 if (ret) 1211 goto error; 1212 1213 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; 1214 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); 1215 if (ret) 1216 goto error; 1217 1218 ret = cnic_alloc_bnx2x_context(dev); 1219 if (ret) 1220 goto error; 1221 1222 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; 1223 1224 cp->l2_rx_ring_size = 15; 1225 1226 ret = cnic_alloc_uio_rings(dev, 4); 1227 if (ret) 1228 goto error; 1229 1230 ret = cnic_init_uio(dev); 1231 if (ret) 1232 goto error; 1233 1234 return 0; 1235 1236error: 1237 cnic_free_resc(dev); 1238 return -ENOMEM; 1239} 1240 1241static inline u32 cnic_kwq_avail(struct cnic_local *cp) 1242{ 1243 return cp->max_kwq_idx - 1244 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); 1245} 1246 1247static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 1248 u32 num_wqes) 1249{ 1250 struct cnic_local *cp = dev->cnic_priv; 1251 struct kwqe *prod_qe; 1252 u16 prod, sw_prod, i; 1253 1254 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 1255 return -EAGAIN; /* bnx2 is down */ 1256 1257 spin_lock_bh(&cp->cnic_ulp_lock); 1258 if (num_wqes > cnic_kwq_avail(cp) && 1259 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) { 1260 spin_unlock_bh(&cp->cnic_ulp_lock); 1261 return -EAGAIN; 1262 } 1263 1264 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 1265 1266 prod = cp->kwq_prod_idx; 1267 sw_prod = prod & MAX_KWQ_IDX; 1268 for (i = 0; i < num_wqes; i++) { 1269 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; 1270 memcpy(prod_qe, wqes[i], sizeof(struct kwqe)); 1271 prod++; 1272 sw_prod = prod & MAX_KWQ_IDX; 1273 } 1274 cp->kwq_prod_idx = prod; 1275 1276 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); 1277 1278 spin_unlock_bh(&cp->cnic_ulp_lock); 1279 return 0; 1280} 1281 1282static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid, 1283 union l5cm_specific_data *l5_data) 1284{ 1285 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1286 dma_addr_t map; 1287 1288 map = ctx->kwqe_data_mapping; 1289 l5_data->phy_address.lo = (u64) map & 0xffffffff; 1290 l5_data->phy_address.hi = (u64) map >> 32; 1291 return ctx->kwqe_data; 1292} 1293 1294static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, 1295 u32 type, union l5cm_specific_data *l5_data) 1296{ 1297 struct cnic_local *cp = dev->cnic_priv; 1298 struct l5cm_spe kwqe; 1299 struct kwqe_16 *kwq[1]; 1300 u16 type_16; 1301 int ret; 1302 1303 kwqe.hdr.conn_and_cmd_data = 1304 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | 1305 BNX2X_HW_CID(cp, cid))); 1306 1307 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 1308 type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & 1309 SPE_HDR_FUNCTION_ID; 1310 1311 kwqe.hdr.type = cpu_to_le16(type_16); 1312 kwqe.hdr.reserved1 = 0; 1313 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); 1314 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); 1315 1316 kwq[0] = (struct kwqe_16 *) &kwqe; 1317 1318 spin_lock_bh(&cp->cnic_ulp_lock); 1319 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1); 1320 spin_unlock_bh(&cp->cnic_ulp_lock); 1321 1322 if (ret == 1) 1323 return 0; 1324 1325 return -EBUSY; 1326} 1327 1328static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, 1329 struct kcqe *cqes[], u32 num_cqes) 1330{ 1331 struct cnic_local *cp = dev->cnic_priv; 1332 struct cnic_ulp_ops *ulp_ops; 1333 1334 rcu_read_lock(); 1335 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 1336 if (likely(ulp_ops)) { 1337 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 1338 cqes, num_cqes); 1339 } 1340 rcu_read_unlock(); 1341} 1342 1343static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) 1344{ 1345 struct cnic_local *cp = dev->cnic_priv; 1346 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; 1347 int hq_bds, pages; 1348 u32 pfid = cp->pfid; 1349 1350 cp->num_iscsi_tasks = req1->num_tasks_per_conn; 1351 cp->num_ccells = req1->num_ccells_per_conn; 1352 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE * 1353 cp->num_iscsi_tasks; 1354 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * 1355 BNX2X_ISCSI_R2TQE_SIZE; 1356 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; 1357 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; 1358 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); 1359 cp->num_cqs = req1->num_cqs; 1360 1361 if (!dev->max_iscsi_conn) 1362 return 0; 1363 1364 /* init Tstorm RAM */ 1365 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), 1366 req1->rq_num_wqes); 1367 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1368 PAGE_SIZE); 1369 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1370 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1371 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 1372 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1373 req1->num_tasks_per_conn); 1374 1375 /* init Ustorm RAM */ 1376 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1377 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), 1378 req1->rq_buffer_size); 1379 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1380 PAGE_SIZE); 1381 CNIC_WR8(dev, BAR_USTRORM_INTMEM + 1382 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1383 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1384 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1385 req1->num_tasks_per_conn); 1386 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid), 1387 req1->rq_num_wqes); 1388 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid), 1389 req1->cq_num_wqes); 1390 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), 1391 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1392 1393 /* init Xstorm RAM */ 1394 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1395 PAGE_SIZE); 1396 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1397 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1398 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 1399 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1400 req1->num_tasks_per_conn); 1401 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), 1402 hq_bds); 1403 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid), 1404 req1->num_tasks_per_conn); 1405 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), 1406 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1407 1408 /* init Cstorm RAM */ 1409 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1410 PAGE_SIZE); 1411 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 1412 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1413 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1414 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1415 req1->num_tasks_per_conn); 1416 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid), 1417 req1->cq_num_wqes); 1418 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), 1419 hq_bds); 1420 1421 return 0; 1422} 1423 1424static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) 1425{ 1426 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; 1427 struct cnic_local *cp = dev->cnic_priv; 1428 u32 pfid = cp->pfid; 1429 struct iscsi_kcqe kcqe; 1430 struct kcqe *cqes[1]; 1431 1432 memset(&kcqe, 0, sizeof(kcqe)); 1433 if (!dev->max_iscsi_conn) { 1434 kcqe.completion_status = 1435 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED; 1436 goto done; 1437 } 1438 1439 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1440 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); 1441 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1442 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, 1443 req2->error_bit_map[1]); 1444 1445 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1446 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); 1447 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1448 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); 1449 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1450 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, 1451 req2->error_bit_map[1]); 1452 1453 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1454 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); 1455 1456 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1457 1458done: 1459 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT; 1460 cqes[0] = (struct kcqe *) &kcqe; 1461 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1462 1463 return 0; 1464} 1465 1466static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) 1467{ 1468 struct cnic_local *cp = dev->cnic_priv; 1469 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1470 1471 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) { 1472 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1473 1474 cnic_free_dma(dev, &iscsi->hq_info); 1475 cnic_free_dma(dev, &iscsi->r2tq_info); 1476 cnic_free_dma(dev, &iscsi->task_array_info); 1477 cnic_free_id(&cp->cid_tbl, ctx->cid); 1478 } else { 1479 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid); 1480 } 1481 1482 ctx->cid = 0; 1483} 1484 1485static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) 1486{ 1487 u32 cid; 1488 int ret, pages; 1489 struct cnic_local *cp = dev->cnic_priv; 1490 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1491 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1492 1493 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) { 1494 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl); 1495 if (cid == -1) { 1496 ret = -ENOMEM; 1497 goto error; 1498 } 1499 ctx->cid = cid; 1500 return 0; 1501 } 1502 1503 cid = cnic_alloc_new_id(&cp->cid_tbl); 1504 if (cid == -1) { 1505 ret = -ENOMEM; 1506 goto error; 1507 } 1508 1509 ctx->cid = cid; 1510 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; 1511 1512 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); 1513 if (ret) 1514 goto error; 1515 1516 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; 1517 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); 1518 if (ret) 1519 goto error; 1520 1521 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; 1522 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); 1523 if (ret) 1524 goto error; 1525 1526 return 0; 1527 1528error: 1529 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1530 return ret; 1531} 1532 1533static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init, 1534 struct regpair *ctx_addr) 1535{ 1536 struct cnic_local *cp = dev->cnic_priv; 1537 struct cnic_eth_dev *ethdev = cp->ethdev; 1538 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk; 1539 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk; 1540 unsigned long align_off = 0; 1541 dma_addr_t ctx_map; 1542 void *ctx; 1543 1544 if (cp->ctx_align) { 1545 unsigned long mask = cp->ctx_align - 1; 1546 1547 if (cp->ctx_arr[blk].mapping & mask) 1548 align_off = cp->ctx_align - 1549 (cp->ctx_arr[blk].mapping & mask); 1550 } 1551 ctx_map = cp->ctx_arr[blk].mapping + align_off + 1552 (off * BNX2X_CONTEXT_MEM_SIZE); 1553 ctx = cp->ctx_arr[blk].ctx + align_off + 1554 (off * BNX2X_CONTEXT_MEM_SIZE); 1555 if (init) 1556 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE); 1557 1558 ctx_addr->lo = ctx_map & 0xffffffff; 1559 ctx_addr->hi = (u64) ctx_map >> 32; 1560 return ctx; 1561} 1562 1563static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], 1564 u32 num) 1565{ 1566 struct cnic_local *cp = dev->cnic_priv; 1567 struct iscsi_kwqe_conn_offload1 *req1 = 1568 (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1569 struct iscsi_kwqe_conn_offload2 *req2 = 1570 (struct iscsi_kwqe_conn_offload2 *) wqes[1]; 1571 struct iscsi_kwqe_conn_offload3 *req3; 1572 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; 1573 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1574 u32 cid = ctx->cid; 1575 u32 hw_cid = BNX2X_HW_CID(cp, cid); 1576 struct iscsi_context *ictx; 1577 struct regpair context_addr; 1578 int i, j, n = 2, n_max; 1579 1580 ctx->ctx_flags = 0; 1581 if (!req2->num_additional_wqes) 1582 return -EINVAL; 1583 1584 n_max = req2->num_additional_wqes + 2; 1585 1586 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr); 1587 if (ictx == NULL) 1588 return -ENOMEM; 1589 1590 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; 1591 1592 ictx->xstorm_ag_context.hq_prod = 1; 1593 1594 ictx->xstorm_st_context.iscsi.first_burst_length = 1595 ISCSI_DEF_FIRST_BURST_LEN; 1596 ictx->xstorm_st_context.iscsi.max_send_pdu_length = 1597 ISCSI_DEF_MAX_RECV_SEG_LEN; 1598 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo = 1599 req1->sq_page_table_addr_lo; 1600 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi = 1601 req1->sq_page_table_addr_hi; 1602 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi; 1603 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo; 1604 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo = 1605 iscsi->hq_info.pgtbl_map & 0xffffffff; 1606 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi = 1607 (u64) iscsi->hq_info.pgtbl_map >> 32; 1608 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo = 1609 iscsi->hq_info.pgtbl[0]; 1610 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi = 1611 iscsi->hq_info.pgtbl[1]; 1612 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo = 1613 iscsi->r2tq_info.pgtbl_map & 0xffffffff; 1614 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi = 1615 (u64) iscsi->r2tq_info.pgtbl_map >> 32; 1616 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo = 1617 iscsi->r2tq_info.pgtbl[0]; 1618 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = 1619 iscsi->r2tq_info.pgtbl[1]; 1620 ictx->xstorm_st_context.iscsi.task_pbl_base.lo = 1621 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1622 ictx->xstorm_st_context.iscsi.task_pbl_base.hi = 1623 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1624 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx = 1625 BNX2X_ISCSI_PBL_NOT_CACHED; 1626 ictx->xstorm_st_context.iscsi.flags.flags |= 1627 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; 1628 ictx->xstorm_st_context.iscsi.flags.flags |= 1629 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; 1630 1631 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; 1632 /* TSTORM requires the base address of RQ DB & not PTE */ 1633 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = 1634 req2->rq_page_table_addr_lo & PAGE_MASK; 1635 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = 1636 req2->rq_page_table_addr_hi; 1637 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; 1638 ictx->tstorm_st_context.tcp.cwnd = 0x5A8; 1639 ictx->tstorm_st_context.tcp.flags2 |= 1640 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; 1641 ictx->tstorm_st_context.tcp.ooo_support_mode = 1642 TCP_TSTORM_OOO_DROP_AND_PROC_ACK; 1643 1644 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; 1645 1646 ictx->ustorm_st_context.ring.rq.pbl_base.lo = 1647 req2->rq_page_table_addr_lo; 1648 ictx->ustorm_st_context.ring.rq.pbl_base.hi = 1649 req2->rq_page_table_addr_hi; 1650 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi; 1651 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo; 1652 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo = 1653 iscsi->r2tq_info.pgtbl_map & 0xffffffff; 1654 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi = 1655 (u64) iscsi->r2tq_info.pgtbl_map >> 32; 1656 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo = 1657 iscsi->r2tq_info.pgtbl[0]; 1658 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi = 1659 iscsi->r2tq_info.pgtbl[1]; 1660 ictx->ustorm_st_context.ring.cq_pbl_base.lo = 1661 req1->cq_page_table_addr_lo; 1662 ictx->ustorm_st_context.ring.cq_pbl_base.hi = 1663 req1->cq_page_table_addr_hi; 1664 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; 1665 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; 1666 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; 1667 ictx->ustorm_st_context.task_pbe_cache_index = 1668 BNX2X_ISCSI_PBL_NOT_CACHED; 1669 ictx->ustorm_st_context.task_pdu_cache_index = 1670 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED; 1671 1672 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { 1673 if (j == 3) { 1674 if (n >= n_max) 1675 break; 1676 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; 1677 j = 0; 1678 } 1679 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; 1680 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = 1681 req3->qp_first_pte[j].hi; 1682 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = 1683 req3->qp_first_pte[j].lo; 1684 } 1685 1686 ictx->ustorm_st_context.task_pbl_base.lo = 1687 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1688 ictx->ustorm_st_context.task_pbl_base.hi = 1689 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1690 ictx->ustorm_st_context.tce_phy_addr.lo = 1691 iscsi->task_array_info.pgtbl[0]; 1692 ictx->ustorm_st_context.tce_phy_addr.hi = 1693 iscsi->task_array_info.pgtbl[1]; 1694 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1695 ictx->ustorm_st_context.num_cqs = cp->num_cqs; 1696 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN; 1697 ictx->ustorm_st_context.negotiated_rx_and_flags |= 1698 ISCSI_DEF_MAX_BURST_LEN; 1699 ictx->ustorm_st_context.negotiated_rx |= 1700 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T << 1701 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT; 1702 1703 ictx->cstorm_st_context.hq_pbl_base.lo = 1704 iscsi->hq_info.pgtbl_map & 0xffffffff; 1705 ictx->cstorm_st_context.hq_pbl_base.hi = 1706 (u64) iscsi->hq_info.pgtbl_map >> 32; 1707 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0]; 1708 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1]; 1709 ictx->cstorm_st_context.task_pbl_base.lo = 1710 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1711 ictx->cstorm_st_context.task_pbl_base.hi = 1712 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1713 /* CSTORM and USTORM initialization is different, CSTORM requires 1714 * CQ DB base & not PTE addr */ 1715 ictx->cstorm_st_context.cq_db_base.lo = 1716 req1->cq_page_table_addr_lo & PAGE_MASK; 1717 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; 1718 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1719 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; 1720 for (i = 0; i < cp->num_cqs; i++) { 1721 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] = 1722 ISCSI_INITIAL_SN; 1723 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] = 1724 ISCSI_INITIAL_SN; 1725 } 1726 1727 ictx->xstorm_ag_context.cdu_reserved = 1728 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, 1729 ISCSI_CONNECTION_TYPE); 1730 ictx->ustorm_ag_context.cdu_usage = 1731 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, 1732 ISCSI_CONNECTION_TYPE); 1733 return 0; 1734 1735} 1736 1737static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], 1738 u32 num, int *work) 1739{ 1740 struct iscsi_kwqe_conn_offload1 *req1; 1741 struct iscsi_kwqe_conn_offload2 *req2; 1742 struct cnic_local *cp = dev->cnic_priv; 1743 struct cnic_context *ctx; 1744 struct iscsi_kcqe kcqe; 1745 struct kcqe *cqes[1]; 1746 u32 l5_cid; 1747 int ret = 0; 1748 1749 if (num < 2) { 1750 *work = num; 1751 return -EINVAL; 1752 } 1753 1754 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1755 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1]; 1756 if ((num - 2) < req2->num_additional_wqes) { 1757 *work = num; 1758 return -EINVAL; 1759 } 1760 *work = 2 + req2->num_additional_wqes; 1761 1762 l5_cid = req1->iscsi_conn_id; 1763 if (l5_cid >= MAX_ISCSI_TBL_SZ) 1764 return -EINVAL; 1765 1766 memset(&kcqe, 0, sizeof(kcqe)); 1767 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN; 1768 kcqe.iscsi_conn_id = l5_cid; 1769 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; 1770 1771 ctx = &cp->ctx_tbl[l5_cid]; 1772 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) { 1773 kcqe.completion_status = 1774 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY; 1775 goto done; 1776 } 1777 1778 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { 1779 atomic_dec(&cp->iscsi_conn); 1780 goto done; 1781 } 1782 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); 1783 if (ret) { 1784 atomic_dec(&cp->iscsi_conn); 1785 ret = 0; 1786 goto done; 1787 } 1788 ret = cnic_setup_bnx2x_ctx(dev, wqes, num); 1789 if (ret < 0) { 1790 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1791 atomic_dec(&cp->iscsi_conn); 1792 goto done; 1793 } 1794 1795 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1796 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid); 1797 1798done: 1799 cqes[0] = (struct kcqe *) &kcqe; 1800 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1801 return ret; 1802} 1803 1804 1805static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) 1806{ 1807 struct cnic_local *cp = dev->cnic_priv; 1808 struct iscsi_kwqe_conn_update *req = 1809 (struct iscsi_kwqe_conn_update *) kwqe; 1810 void *data; 1811 union l5cm_specific_data l5_data; 1812 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id); 1813 int ret; 1814 1815 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0) 1816 return -EINVAL; 1817 1818 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 1819 if (!data) 1820 return -ENOMEM; 1821 1822 memcpy(data, kwqe, sizeof(struct kwqe)); 1823 1824 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN, 1825 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data); 1826 return ret; 1827} 1828 1829static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) 1830{ 1831 struct cnic_local *cp = dev->cnic_priv; 1832 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1833 union l5cm_specific_data l5_data; 1834 int ret; 1835 u32 hw_cid; 1836 1837 init_waitqueue_head(&ctx->waitq); 1838 ctx->wait_cond = 0; 1839 memset(&l5_data, 0, sizeof(l5_data)); 1840 hw_cid = BNX2X_HW_CID(cp, ctx->cid); 1841 1842 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, 1843 hw_cid, NONE_CONNECTION_TYPE, &l5_data); 1844 1845 if (ret == 0) 1846 wait_event(ctx->waitq, ctx->wait_cond); 1847 1848 return ret; 1849} 1850 1851static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 1852{ 1853 struct cnic_local *cp = dev->cnic_priv; 1854 struct iscsi_kwqe_conn_destroy *req = 1855 (struct iscsi_kwqe_conn_destroy *) kwqe; 1856 u32 l5_cid = req->reserved0; 1857 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1858 int ret = 0; 1859 struct iscsi_kcqe kcqe; 1860 struct kcqe *cqes[1]; 1861 1862 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 1863 goto skip_cfc_delete; 1864 1865 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { 1866 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies; 1867 1868 if (delta > (2 * HZ)) 1869 delta = 0; 1870 1871 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); 1872 queue_delayed_work(cnic_wq, &cp->delete_task, delta); 1873 goto destroy_reply; 1874 } 1875 1876 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid); 1877 1878skip_cfc_delete: 1879 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1880 1881 atomic_dec(&cp->iscsi_conn); 1882 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 1883 1884destroy_reply: 1885 memset(&kcqe, 0, sizeof(kcqe)); 1886 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; 1887 kcqe.iscsi_conn_id = l5_cid; 1888 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1889 kcqe.iscsi_conn_context_id = req->context_id; 1890 1891 cqes[0] = (struct kcqe *) &kcqe; 1892 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1893 1894 return ret; 1895} 1896 1897static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, 1898 struct l4_kwq_connect_req1 *kwqe1, 1899 struct l4_kwq_connect_req3 *kwqe3, 1900 struct l5cm_active_conn_buffer *conn_buf) 1901{ 1902 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf; 1903 struct l5cm_xstorm_conn_buffer *xstorm_buf = 1904 &conn_buf->xstorm_conn_buffer; 1905 struct l5cm_tstorm_conn_buffer *tstorm_buf = 1906 &conn_buf->tstorm_conn_buffer; 1907 struct regpair context_addr; 1908 u32 cid = BNX2X_SW_CID(kwqe1->cid); 1909 struct in6_addr src_ip, dst_ip; 1910 int i; 1911 u32 *addrp; 1912 1913 addrp = (u32 *) &conn_addr->local_ip_addr; 1914 for (i = 0; i < 4; i++, addrp++) 1915 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); 1916 1917 addrp = (u32 *) &conn_addr->remote_ip_addr; 1918 for (i = 0; i < 4; i++, addrp++) 1919 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); 1920 1921 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr); 1922 1923 xstorm_buf->context_addr.hi = context_addr.hi; 1924 xstorm_buf->context_addr.lo = context_addr.lo; 1925 xstorm_buf->mss = 0xffff; 1926 xstorm_buf->rcv_buf = kwqe3->rcv_buf; 1927 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE) 1928 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE; 1929 xstorm_buf->pseudo_header_checksum = 1930 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); 1931 1932 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK)) 1933 tstorm_buf->params |= 1934 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE; 1935 if (kwqe3->ka_timeout) { 1936 tstorm_buf->ka_enable = 1; 1937 tstorm_buf->ka_timeout = kwqe3->ka_timeout; 1938 tstorm_buf->ka_interval = kwqe3->ka_interval; 1939 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count; 1940 } 1941 tstorm_buf->rcv_buf = kwqe3->rcv_buf; 1942 tstorm_buf->snd_buf = kwqe3->snd_buf; 1943 tstorm_buf->max_rt_time = 0xffffffff; 1944} 1945 1946static void cnic_init_bnx2x_mac(struct cnic_dev *dev) 1947{ 1948 struct cnic_local *cp = dev->cnic_priv; 1949 u32 pfid = cp->pfid; 1950 u8 *mac = dev->mac_addr; 1951 1952 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1953 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]); 1954 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1955 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]); 1956 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1957 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]); 1958 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1959 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]); 1960 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1961 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]); 1962 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1963 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]); 1964 1965 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1966 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]); 1967 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1968 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, 1969 mac[4]); 1970 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1971 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]); 1972 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1973 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, 1974 mac[2]); 1975 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1976 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2, 1977 mac[1]); 1978 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1979 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3, 1980 mac[0]); 1981} 1982 1983static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts) 1984{ 1985 struct cnic_local *cp = dev->cnic_priv; 1986 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; 1987 u16 tstorm_flags = 0; 1988 1989 if (tcp_ts) { 1990 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; 1991 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; 1992 } 1993 1994 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1995 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags); 1996 1997 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 1998 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags); 1999} 2000 2001static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], 2002 u32 num, int *work) 2003{ 2004 struct cnic_local *cp = dev->cnic_priv; 2005 struct l4_kwq_connect_req1 *kwqe1 = 2006 (struct l4_kwq_connect_req1 *) wqes[0]; 2007 struct l4_kwq_connect_req3 *kwqe3; 2008 struct l5cm_active_conn_buffer *conn_buf; 2009 struct l5cm_conn_addr_params *conn_addr; 2010 union l5cm_specific_data l5_data; 2011 u32 l5_cid = kwqe1->pg_cid; 2012 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 2013 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 2014 int ret; 2015 2016 if (num < 2) { 2017 *work = num; 2018 return -EINVAL; 2019 } 2020 2021 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) 2022 *work = 3; 2023 else 2024 *work = 2; 2025 2026 if (num < *work) { 2027 *work = num; 2028 return -EINVAL; 2029 } 2030 2031 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) { 2032 netdev_err(dev->netdev, "conn_buf size too big\n"); 2033 return -ENOMEM; 2034 } 2035 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2036 if (!conn_buf) 2037 return -ENOMEM; 2038 2039 memset(conn_buf, 0, sizeof(*conn_buf)); 2040 2041 conn_addr = &conn_buf->conn_addr_buf; 2042 conn_addr->remote_addr_0 = csk->ha[0]; 2043 conn_addr->remote_addr_1 = csk->ha[1]; 2044 conn_addr->remote_addr_2 = csk->ha[2]; 2045 conn_addr->remote_addr_3 = csk->ha[3]; 2046 conn_addr->remote_addr_4 = csk->ha[4]; 2047 conn_addr->remote_addr_5 = csk->ha[5]; 2048 2049 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) { 2050 struct l4_kwq_connect_req2 *kwqe2 = 2051 (struct l4_kwq_connect_req2 *) wqes[1]; 2052 2053 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4; 2054 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3; 2055 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2; 2056 2057 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4; 2058 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3; 2059 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2; 2060 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION; 2061 } 2062 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1]; 2063 2064 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip; 2065 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip; 2066 conn_addr->local_tcp_port = kwqe1->src_port; 2067 conn_addr->remote_tcp_port = kwqe1->dst_port; 2068 2069 conn_addr->pmtu = kwqe3->pmtu; 2070 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); 2071 2072 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 2073 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id); 2074 2075 cnic_bnx2x_set_tcp_timestamp(dev, 2076 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP); 2077 2078 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, 2079 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2080 if (!ret) 2081 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 2082 2083 return ret; 2084} 2085 2086static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe) 2087{ 2088 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe; 2089 union l5cm_specific_data l5_data; 2090 int ret; 2091 2092 memset(&l5_data, 0, sizeof(l5_data)); 2093 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE, 2094 req->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2095 return ret; 2096} 2097 2098static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe) 2099{ 2100 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe; 2101 union l5cm_specific_data l5_data; 2102 int ret; 2103 2104 memset(&l5_data, 0, sizeof(l5_data)); 2105 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT, 2106 req->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2107 return ret; 2108} 2109static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe) 2110{ 2111 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe; 2112 struct l4_kcq kcqe; 2113 struct kcqe *cqes[1]; 2114 2115 memset(&kcqe, 0, sizeof(kcqe)); 2116 kcqe.pg_host_opaque = req->host_opaque; 2117 kcqe.pg_cid = req->host_opaque; 2118 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG; 2119 cqes[0] = (struct kcqe *) &kcqe; 2120 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); 2121 return 0; 2122} 2123 2124static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) 2125{ 2126 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe; 2127 struct l4_kcq kcqe; 2128 struct kcqe *cqes[1]; 2129 2130 memset(&kcqe, 0, sizeof(kcqe)); 2131 kcqe.pg_host_opaque = req->pg_host_opaque; 2132 kcqe.pg_cid = req->pg_cid; 2133 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG; 2134 cqes[0] = (struct kcqe *) &kcqe; 2135 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); 2136 return 0; 2137} 2138 2139static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe) 2140{ 2141 struct fcoe_kwqe_stat *req; 2142 struct fcoe_stat_ramrod_params *fcoe_stat; 2143 union l5cm_specific_data l5_data; 2144 struct cnic_local *cp = dev->cnic_priv; 2145 int ret; 2146 u32 cid; 2147 2148 req = (struct fcoe_kwqe_stat *) kwqe; 2149 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2150 2151 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); 2152 if (!fcoe_stat) 2153 return -ENOMEM; 2154 2155 memset(fcoe_stat, 0, sizeof(*fcoe_stat)); 2156 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req)); 2157 2158 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid, 2159 FCOE_CONNECTION_TYPE, &l5_data); 2160 return ret; 2161} 2162 2163static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], 2164 u32 num, int *work) 2165{ 2166 int ret; 2167 struct cnic_local *cp = dev->cnic_priv; 2168 u32 cid; 2169 struct fcoe_init_ramrod_params *fcoe_init; 2170 struct fcoe_kwqe_init1 *req1; 2171 struct fcoe_kwqe_init2 *req2; 2172 struct fcoe_kwqe_init3 *req3; 2173 union l5cm_specific_data l5_data; 2174 2175 if (num < 3) { 2176 *work = num; 2177 return -EINVAL; 2178 } 2179 req1 = (struct fcoe_kwqe_init1 *) wqes[0]; 2180 req2 = (struct fcoe_kwqe_init2 *) wqes[1]; 2181 req3 = (struct fcoe_kwqe_init3 *) wqes[2]; 2182 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) { 2183 *work = 1; 2184 return -EINVAL; 2185 } 2186 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) { 2187 *work = 2; 2188 return -EINVAL; 2189 } 2190 2191 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) { 2192 netdev_err(dev->netdev, "fcoe_init size too big\n"); 2193 return -ENOMEM; 2194 } 2195 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); 2196 if (!fcoe_init) 2197 return -ENOMEM; 2198 2199 memset(fcoe_init, 0, sizeof(*fcoe_init)); 2200 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1)); 2201 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2)); 2202 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3)); 2203 fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff; 2204 fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32; 2205 fcoe_init->eq_next_page_addr.lo = 2206 cp->kcq2.dma.pg_map_arr[1] & 0xffffffff; 2207 fcoe_init->eq_next_page_addr.hi = 2208 (u64) cp->kcq2.dma.pg_map_arr[1] >> 32; 2209 2210 fcoe_init->sb_num = cp->status_blk_num; 2211 fcoe_init->eq_prod = MAX_KCQ_IDX; 2212 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS; 2213 cp->kcq2.sw_prod_idx = 0; 2214 2215 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2216 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid, 2217 FCOE_CONNECTION_TYPE, &l5_data); 2218 *work = 3; 2219 return ret; 2220} 2221 2222static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], 2223 u32 num, int *work) 2224{ 2225 int ret = 0; 2226 u32 cid = -1, l5_cid; 2227 struct cnic_local *cp = dev->cnic_priv; 2228 struct fcoe_kwqe_conn_offload1 *req1; 2229 struct fcoe_kwqe_conn_offload2 *req2; 2230 struct fcoe_kwqe_conn_offload3 *req3; 2231 struct fcoe_kwqe_conn_offload4 *req4; 2232 struct fcoe_conn_offload_ramrod_params *fcoe_offload; 2233 struct cnic_context *ctx; 2234 struct fcoe_context *fctx; 2235 struct regpair ctx_addr; 2236 union l5cm_specific_data l5_data; 2237 struct fcoe_kcqe kcqe; 2238 struct kcqe *cqes[1]; 2239 2240 if (num < 4) { 2241 *work = num; 2242 return -EINVAL; 2243 } 2244 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0]; 2245 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1]; 2246 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2]; 2247 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3]; 2248 2249 *work = 4; 2250 2251 l5_cid = req1->fcoe_conn_id; 2252 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) 2253 goto err_reply; 2254 2255 l5_cid += BNX2X_FCOE_L5_CID_BASE; 2256 2257 ctx = &cp->ctx_tbl[l5_cid]; 2258 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 2259 goto err_reply; 2260 2261 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); 2262 if (ret) { 2263 ret = 0; 2264 goto err_reply; 2265 } 2266 cid = ctx->cid; 2267 2268 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr); 2269 if (fctx) { 2270 u32 hw_cid = BNX2X_HW_CID(cp, cid); 2271 u32 val; 2272 2273 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, 2274 FCOE_CONNECTION_TYPE); 2275 fctx->xstorm_ag_context.cdu_reserved = val; 2276 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, 2277 FCOE_CONNECTION_TYPE); 2278 fctx->ustorm_ag_context.cdu_usage = val; 2279 } 2280 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) { 2281 netdev_err(dev->netdev, "fcoe_offload size too big\n"); 2282 goto err_reply; 2283 } 2284 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2285 if (!fcoe_offload) 2286 goto err_reply; 2287 2288 memset(fcoe_offload, 0, sizeof(*fcoe_offload)); 2289 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1)); 2290 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2)); 2291 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3)); 2292 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4)); 2293 2294 cid = BNX2X_HW_CID(cp, cid); 2295 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid, 2296 FCOE_CONNECTION_TYPE, &l5_data); 2297 if (!ret) 2298 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 2299 2300 return ret; 2301 2302err_reply: 2303 if (cid != -1) 2304 cnic_free_bnx2x_conn_resc(dev, l5_cid); 2305 2306 memset(&kcqe, 0, sizeof(kcqe)); 2307 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN; 2308 kcqe.fcoe_conn_id = req1->fcoe_conn_id; 2309 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; 2310 2311 cqes[0] = (struct kcqe *) &kcqe; 2312 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); 2313 return ret; 2314} 2315 2316static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe) 2317{ 2318 struct fcoe_kwqe_conn_enable_disable *req; 2319 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable; 2320 union l5cm_specific_data l5_data; 2321 int ret; 2322 u32 cid, l5_cid; 2323 struct cnic_local *cp = dev->cnic_priv; 2324 2325 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; 2326 cid = req->context_id; 2327 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE; 2328 2329 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) { 2330 netdev_err(dev->netdev, "fcoe_enable size too big\n"); 2331 return -ENOMEM; 2332 } 2333 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2334 if (!fcoe_enable) 2335 return -ENOMEM; 2336 2337 memset(fcoe_enable, 0, sizeof(*fcoe_enable)); 2338 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req)); 2339 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid, 2340 FCOE_CONNECTION_TYPE, &l5_data); 2341 return ret; 2342} 2343 2344static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe) 2345{ 2346 struct fcoe_kwqe_conn_enable_disable *req; 2347 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable; 2348 union l5cm_specific_data l5_data; 2349 int ret; 2350 u32 cid, l5_cid; 2351 struct cnic_local *cp = dev->cnic_priv; 2352 2353 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; 2354 cid = req->context_id; 2355 l5_cid = req->conn_id; 2356 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) 2357 return -EINVAL; 2358 2359 l5_cid += BNX2X_FCOE_L5_CID_BASE; 2360 2361 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) { 2362 netdev_err(dev->netdev, "fcoe_disable size too big\n"); 2363 return -ENOMEM; 2364 } 2365 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2366 if (!fcoe_disable) 2367 return -ENOMEM; 2368 2369 memset(fcoe_disable, 0, sizeof(*fcoe_disable)); 2370 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req)); 2371 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid, 2372 FCOE_CONNECTION_TYPE, &l5_data); 2373 return ret; 2374} 2375 2376static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 2377{ 2378 struct fcoe_kwqe_conn_destroy *req; 2379 union l5cm_specific_data l5_data; 2380 int ret; 2381 u32 cid, l5_cid; 2382 struct cnic_local *cp = dev->cnic_priv; 2383 struct cnic_context *ctx; 2384 struct fcoe_kcqe kcqe; 2385 struct kcqe *cqes[1]; 2386 2387 req = (struct fcoe_kwqe_conn_destroy *) kwqe; 2388 cid = req->context_id; 2389 l5_cid = req->conn_id; 2390 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) 2391 return -EINVAL; 2392 2393 l5_cid += BNX2X_FCOE_L5_CID_BASE; 2394 2395 ctx = &cp->ctx_tbl[l5_cid]; 2396 2397 init_waitqueue_head(&ctx->waitq); 2398 ctx->wait_cond = 0; 2399 2400 memset(&l5_data, 0, sizeof(l5_data)); 2401 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid, 2402 FCOE_CONNECTION_TYPE, &l5_data); 2403 if (ret == 0) { 2404 wait_event(ctx->waitq, ctx->wait_cond); 2405 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); 2406 queue_delayed_work(cnic_wq, &cp->delete_task, 2407 msecs_to_jiffies(2000)); 2408 } 2409 2410 memset(&kcqe, 0, sizeof(kcqe)); 2411 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN; 2412 kcqe.fcoe_conn_id = req->conn_id; 2413 kcqe.fcoe_conn_context_id = cid; 2414 2415 cqes[0] = (struct kcqe *) &kcqe; 2416 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); 2417 return ret; 2418} 2419 2420static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 2421{ 2422 struct fcoe_kwqe_destroy *req; 2423 union l5cm_specific_data l5_data; 2424 struct cnic_local *cp = dev->cnic_priv; 2425 int ret; 2426 u32 cid; 2427 2428 req = (struct fcoe_kwqe_destroy *) kwqe; 2429 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); 2430 2431 memset(&l5_data, 0, sizeof(l5_data)); 2432 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid, 2433 FCOE_CONNECTION_TYPE, &l5_data); 2434 return ret; 2435} 2436 2437static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, 2438 struct kwqe *wqes[], u32 num_wqes) 2439{ 2440 int i, work, ret; 2441 u32 opcode; 2442 struct kwqe *kwqe; 2443 2444 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2445 return -EAGAIN; /* bnx2 is down */ 2446 2447 for (i = 0; i < num_wqes; ) { 2448 kwqe = wqes[i]; 2449 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); 2450 work = 1; 2451 2452 switch (opcode) { 2453 case ISCSI_KWQE_OPCODE_INIT1: 2454 ret = cnic_bnx2x_iscsi_init1(dev, kwqe); 2455 break; 2456 case ISCSI_KWQE_OPCODE_INIT2: 2457 ret = cnic_bnx2x_iscsi_init2(dev, kwqe); 2458 break; 2459 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1: 2460 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i], 2461 num_wqes - i, &work); 2462 break; 2463 case ISCSI_KWQE_OPCODE_UPDATE_CONN: 2464 ret = cnic_bnx2x_iscsi_update(dev, kwqe); 2465 break; 2466 case ISCSI_KWQE_OPCODE_DESTROY_CONN: 2467 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe); 2468 break; 2469 case L4_KWQE_OPCODE_VALUE_CONNECT1: 2470 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i, 2471 &work); 2472 break; 2473 case L4_KWQE_OPCODE_VALUE_CLOSE: 2474 ret = cnic_bnx2x_close(dev, kwqe); 2475 break; 2476 case L4_KWQE_OPCODE_VALUE_RESET: 2477 ret = cnic_bnx2x_reset(dev, kwqe); 2478 break; 2479 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG: 2480 ret = cnic_bnx2x_offload_pg(dev, kwqe); 2481 break; 2482 case L4_KWQE_OPCODE_VALUE_UPDATE_PG: 2483 ret = cnic_bnx2x_update_pg(dev, kwqe); 2484 break; 2485 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG: 2486 ret = 0; 2487 break; 2488 default: 2489 ret = 0; 2490 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", 2491 opcode); 2492 break; 2493 } 2494 if (ret < 0) 2495 netdev_err(dev->netdev, "KWQE(0x%x) failed\n", 2496 opcode); 2497 i += work; 2498 } 2499 return 0; 2500} 2501 2502static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, 2503 struct kwqe *wqes[], u32 num_wqes) 2504{ 2505 struct cnic_local *cp = dev->cnic_priv; 2506 int i, work, ret; 2507 u32 opcode; 2508 struct kwqe *kwqe; 2509 2510 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2511 return -EAGAIN; /* bnx2 is down */ 2512 2513 if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710) 2514 return -EINVAL; 2515 2516 for (i = 0; i < num_wqes; ) { 2517 kwqe = wqes[i]; 2518 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); 2519 work = 1; 2520 2521 switch (opcode) { 2522 case FCOE_KWQE_OPCODE_INIT1: 2523 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i], 2524 num_wqes - i, &work); 2525 break; 2526 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1: 2527 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i], 2528 num_wqes - i, &work); 2529 break; 2530 case FCOE_KWQE_OPCODE_ENABLE_CONN: 2531 ret = cnic_bnx2x_fcoe_enable(dev, kwqe); 2532 break; 2533 case FCOE_KWQE_OPCODE_DISABLE_CONN: 2534 ret = cnic_bnx2x_fcoe_disable(dev, kwqe); 2535 break; 2536 case FCOE_KWQE_OPCODE_DESTROY_CONN: 2537 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe); 2538 break; 2539 case FCOE_KWQE_OPCODE_DESTROY: 2540 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe); 2541 break; 2542 case FCOE_KWQE_OPCODE_STAT: 2543 ret = cnic_bnx2x_fcoe_stat(dev, kwqe); 2544 break; 2545 default: 2546 ret = 0; 2547 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", 2548 opcode); 2549 break; 2550 } 2551 if (ret < 0) 2552 netdev_err(dev->netdev, "KWQE(0x%x) failed\n", 2553 opcode); 2554 i += work; 2555 } 2556 return 0; 2557} 2558 2559static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 2560 u32 num_wqes) 2561{ 2562 int ret = -EINVAL; 2563 u32 layer_code; 2564 2565 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2566 return -EAGAIN; /* bnx2x is down */ 2567 2568 if (!num_wqes) 2569 return 0; 2570 2571 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK; 2572 switch (layer_code) { 2573 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI: 2574 case KWQE_FLAGS_LAYER_MASK_L4: 2575 case KWQE_FLAGS_LAYER_MASK_L2: 2576 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes); 2577 break; 2578 2579 case KWQE_FLAGS_LAYER_MASK_L5_FCOE: 2580 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes); 2581 break; 2582 } 2583 return ret; 2584} 2585 2586static inline u32 cnic_get_kcqe_layer_mask(u32 opflag) 2587{ 2588 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN)) 2589 return KCQE_FLAGS_LAYER_MASK_L4; 2590 2591 return opflag & KCQE_FLAGS_LAYER_MASK; 2592} 2593 2594static void service_kcqes(struct cnic_dev *dev, int num_cqes) 2595{ 2596 struct cnic_local *cp = dev->cnic_priv; 2597 int i, j, comp = 0; 2598 2599 i = 0; 2600 j = 1; 2601 while (num_cqes) { 2602 struct cnic_ulp_ops *ulp_ops; 2603 int ulp_type; 2604 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; 2605 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag); 2606 2607 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) 2608 comp++; 2609 2610 while (j < num_cqes) { 2611 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; 2612 2613 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer) 2614 break; 2615 2616 if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) 2617 comp++; 2618 j++; 2619 } 2620 2621 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA) 2622 ulp_type = CNIC_ULP_RDMA; 2623 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) 2624 ulp_type = CNIC_ULP_ISCSI; 2625 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE) 2626 ulp_type = CNIC_ULP_FCOE; 2627 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) 2628 ulp_type = CNIC_ULP_L4; 2629 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) 2630 goto end; 2631 else { 2632 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n", 2633 kcqe_op_flag); 2634 goto end; 2635 } 2636 2637 rcu_read_lock(); 2638 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 2639 if (likely(ulp_ops)) { 2640 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 2641 cp->completed_kcq + i, j); 2642 } 2643 rcu_read_unlock(); 2644end: 2645 num_cqes -= j; 2646 i += j; 2647 j = 1; 2648 } 2649 if (unlikely(comp)) 2650 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp); 2651} 2652 2653static u16 cnic_bnx2_next_idx(u16 idx) 2654{ 2655 return idx + 1; 2656} 2657 2658static u16 cnic_bnx2_hw_idx(u16 idx) 2659{ 2660 return idx; 2661} 2662 2663static u16 cnic_bnx2x_next_idx(u16 idx) 2664{ 2665 idx++; 2666 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) 2667 idx++; 2668 2669 return idx; 2670} 2671 2672static u16 cnic_bnx2x_hw_idx(u16 idx) 2673{ 2674 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) 2675 idx++; 2676 return idx; 2677} 2678 2679static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info) 2680{ 2681 struct cnic_local *cp = dev->cnic_priv; 2682 u16 i, ri, hw_prod, last; 2683 struct kcqe *kcqe; 2684 int kcqe_cnt = 0, last_cnt = 0; 2685 2686 i = ri = last = info->sw_prod_idx; 2687 ri &= MAX_KCQ_IDX; 2688 hw_prod = *info->hw_prod_idx_ptr; 2689 hw_prod = cp->hw_idx(hw_prod); 2690 2691 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { 2692 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; 2693 cp->completed_kcq[kcqe_cnt++] = kcqe; 2694 i = cp->next_idx(i); 2695 ri = i & MAX_KCQ_IDX; 2696 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) { 2697 last_cnt = kcqe_cnt; 2698 last = i; 2699 } 2700 } 2701 2702 info->sw_prod_idx = last; 2703 return last_cnt; 2704} 2705 2706static int cnic_l2_completion(struct cnic_local *cp) 2707{ 2708 u16 hw_cons, sw_cons; 2709 struct cnic_uio_dev *udev = cp->udev; 2710 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) 2711 (udev->l2_ring + (2 * BCM_PAGE_SIZE)); 2712 u32 cmd; 2713 int comp = 0; 2714 2715 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags)) 2716 return 0; 2717 2718 hw_cons = *cp->rx_cons_ptr; 2719 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT) 2720 hw_cons++; 2721 2722 sw_cons = cp->rx_cons; 2723 while (sw_cons != hw_cons) { 2724 u8 cqe_fp_flags; 2725 2726 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT]; 2727 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; 2728 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) { 2729 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data); 2730 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT; 2731 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP || 2732 cmd == RAMROD_CMD_ID_ETH_HALT) 2733 comp++; 2734 } 2735 sw_cons = BNX2X_NEXT_RCQE(sw_cons); 2736 } 2737 return comp; 2738} 2739 2740static void cnic_chk_pkt_rings(struct cnic_local *cp) 2741{ 2742 u16 rx_cons, tx_cons; 2743 int comp = 0; 2744 2745 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 2746 return; 2747 2748 rx_cons = *cp->rx_cons_ptr; 2749 tx_cons = *cp->tx_cons_ptr; 2750 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { 2751 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 2752 comp = cnic_l2_completion(cp); 2753 2754 cp->tx_cons = tx_cons; 2755 cp->rx_cons = rx_cons; 2756 2757 if (cp->udev) 2758 uio_event_notify(&cp->udev->cnic_uinfo); 2759 } 2760 if (comp) 2761 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 2762} 2763 2764static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) 2765{ 2766 struct cnic_local *cp = dev->cnic_priv; 2767 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2768 int kcqe_cnt; 2769 2770 /* status block index must be read before reading other fields */ 2771 rmb(); 2772 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2773 2774 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { 2775 2776 service_kcqes(dev, kcqe_cnt); 2777 2778 /* Tell compiler that status_blk fields can change. */ 2779 barrier(); 2780 if (status_idx != *cp->kcq1.status_idx_ptr) { 2781 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2782 /* status block index must be read first */ 2783 rmb(); 2784 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2785 } else 2786 break; 2787 } 2788 2789 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx); 2790 2791 cnic_chk_pkt_rings(cp); 2792 2793 return status_idx; 2794} 2795 2796static int cnic_service_bnx2(void *data, void *status_blk) 2797{ 2798 struct cnic_dev *dev = data; 2799 2800 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 2801 struct status_block *sblk = status_blk; 2802 2803 return sblk->status_idx; 2804 } 2805 2806 return cnic_service_bnx2_queues(dev); 2807} 2808 2809static void cnic_service_bnx2_msix(unsigned long data) 2810{ 2811 struct cnic_dev *dev = (struct cnic_dev *) data; 2812 struct cnic_local *cp = dev->cnic_priv; 2813 2814 cp->last_status_idx = cnic_service_bnx2_queues(dev); 2815 2816 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 2817 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 2818} 2819 2820static void cnic_doirq(struct cnic_dev *dev) 2821{ 2822 struct cnic_local *cp = dev->cnic_priv; 2823 2824 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 2825 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX; 2826 2827 prefetch(cp->status_blk.gen); 2828 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 2829 2830 tasklet_schedule(&cp->cnic_irq_task); 2831 } 2832} 2833 2834static irqreturn_t cnic_irq(int irq, void *dev_instance) 2835{ 2836 struct cnic_dev *dev = dev_instance; 2837 struct cnic_local *cp = dev->cnic_priv; 2838 2839 if (cp->ack_int) 2840 cp->ack_int(dev); 2841 2842 cnic_doirq(dev); 2843 2844 return IRQ_HANDLED; 2845} 2846 2847static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, 2848 u16 index, u8 op, u8 update) 2849{ 2850 struct cnic_local *cp = dev->cnic_priv; 2851 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + 2852 COMMAND_REG_INT_ACK); 2853 struct igu_ack_register igu_ack; 2854 2855 igu_ack.status_block_index = index; 2856 igu_ack.sb_id_and_flags = 2857 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | 2858 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | 2859 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 2860 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 2861 2862 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); 2863} 2864 2865static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment, 2866 u16 index, u8 op, u8 update) 2867{ 2868 struct igu_regular cmd_data; 2869 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8; 2870 2871 cmd_data.sb_id_and_flags = 2872 (index << IGU_REGULAR_SB_INDEX_SHIFT) | 2873 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 2874 (update << IGU_REGULAR_BUPDATE_SHIFT) | 2875 (op << IGU_REGULAR_ENABLE_INT_SHIFT); 2876 2877 2878 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags); 2879} 2880 2881static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) 2882{ 2883 struct cnic_local *cp = dev->cnic_priv; 2884 2885 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0, 2886 IGU_INT_DISABLE, 0); 2887} 2888 2889static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev) 2890{ 2891 struct cnic_local *cp = dev->cnic_priv; 2892 2893 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0, 2894 IGU_INT_DISABLE, 0); 2895} 2896 2897static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) 2898{ 2899 u32 last_status = *info->status_idx_ptr; 2900 int kcqe_cnt; 2901 2902 /* status block index must be read before reading the KCQ */ 2903 rmb(); 2904 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { 2905 2906 service_kcqes(dev, kcqe_cnt); 2907 2908 /* Tell compiler that sblk fields can change. */ 2909 barrier(); 2910 if (last_status == *info->status_idx_ptr) 2911 break; 2912 2913 last_status = *info->status_idx_ptr; 2914 /* status block index must be read before reading the KCQ */ 2915 rmb(); 2916 } 2917 return last_status; 2918} 2919 2920static void cnic_service_bnx2x_bh(unsigned long data) 2921{ 2922 struct cnic_dev *dev = (struct cnic_dev *) data; 2923 struct cnic_local *cp = dev->cnic_priv; 2924 u32 status_idx, new_status_idx; 2925 2926 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2927 return; 2928 2929 while (1) { 2930 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 2931 2932 CNIC_WR16(dev, cp->kcq1.io_addr, 2933 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 2934 2935 if (!BNX2X_CHIP_IS_E2(cp->chip_id)) { 2936 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, 2937 status_idx, IGU_INT_ENABLE, 1); 2938 break; 2939 } 2940 2941 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); 2942 2943 if (new_status_idx != status_idx) 2944 continue; 2945 2946 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + 2947 MAX_KCQ_IDX); 2948 2949 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 2950 status_idx, IGU_INT_ENABLE, 1); 2951 2952 break; 2953 } 2954} 2955 2956static int cnic_service_bnx2x(void *data, void *status_blk) 2957{ 2958 struct cnic_dev *dev = data; 2959 struct cnic_local *cp = dev->cnic_priv; 2960 2961 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 2962 cnic_doirq(dev); 2963 2964 cnic_chk_pkt_rings(cp); 2965 2966 return 0; 2967} 2968 2969static void cnic_ulp_stop(struct cnic_dev *dev) 2970{ 2971 struct cnic_local *cp = dev->cnic_priv; 2972 int if_type; 2973 2974 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 2975 2976 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 2977 struct cnic_ulp_ops *ulp_ops; 2978 2979 mutex_lock(&cnic_lock); 2980 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], 2981 lockdep_is_held(&cnic_lock)); 2982 if (!ulp_ops) { 2983 mutex_unlock(&cnic_lock); 2984 continue; 2985 } 2986 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 2987 mutex_unlock(&cnic_lock); 2988 2989 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) 2990 ulp_ops->cnic_stop(cp->ulp_handle[if_type]); 2991 2992 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 2993 } 2994} 2995 2996static void cnic_ulp_start(struct cnic_dev *dev) 2997{ 2998 struct cnic_local *cp = dev->cnic_priv; 2999 int if_type; 3000 3001 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 3002 struct cnic_ulp_ops *ulp_ops; 3003 3004 mutex_lock(&cnic_lock); 3005 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], 3006 lockdep_is_held(&cnic_lock)); 3007 if (!ulp_ops || !ulp_ops->cnic_start) { 3008 mutex_unlock(&cnic_lock); 3009 continue; 3010 } 3011 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3012 mutex_unlock(&cnic_lock); 3013 3014 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) 3015 ulp_ops->cnic_start(cp->ulp_handle[if_type]); 3016 3017 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3018 } 3019} 3020 3021static int cnic_ctl(void *data, struct cnic_ctl_info *info) 3022{ 3023 struct cnic_dev *dev = data; 3024 3025 switch (info->cmd) { 3026 case CNIC_CTL_STOP_CMD: 3027 cnic_hold(dev); 3028 3029 cnic_ulp_stop(dev); 3030 cnic_stop_hw(dev); 3031 3032 cnic_put(dev); 3033 break; 3034 case CNIC_CTL_START_CMD: 3035 cnic_hold(dev); 3036 3037 if (!cnic_start_hw(dev)) 3038 cnic_ulp_start(dev); 3039 3040 cnic_put(dev); 3041 break; 3042 case CNIC_CTL_COMPLETION_CMD: { 3043 u32 cid = BNX2X_SW_CID(info->data.comp.cid); 3044 u32 l5_cid; 3045 struct cnic_local *cp = dev->cnic_priv; 3046 3047 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { 3048 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3049 3050 ctx->wait_cond = 1; 3051 wake_up(&ctx->waitq); 3052 } 3053 break; 3054 } 3055 default: 3056 return -EINVAL; 3057 } 3058 return 0; 3059} 3060 3061static void cnic_ulp_init(struct cnic_dev *dev) 3062{ 3063 int i; 3064 struct cnic_local *cp = dev->cnic_priv; 3065 3066 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 3067 struct cnic_ulp_ops *ulp_ops; 3068 3069 mutex_lock(&cnic_lock); 3070 ulp_ops = cnic_ulp_tbl_prot(i); 3071 if (!ulp_ops || !ulp_ops->cnic_init) { 3072 mutex_unlock(&cnic_lock); 3073 continue; 3074 } 3075 ulp_get(ulp_ops); 3076 mutex_unlock(&cnic_lock); 3077 3078 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) 3079 ulp_ops->cnic_init(dev); 3080 3081 ulp_put(ulp_ops); 3082 } 3083} 3084 3085static void cnic_ulp_exit(struct cnic_dev *dev) 3086{ 3087 int i; 3088 struct cnic_local *cp = dev->cnic_priv; 3089 3090 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 3091 struct cnic_ulp_ops *ulp_ops; 3092 3093 mutex_lock(&cnic_lock); 3094 ulp_ops = cnic_ulp_tbl_prot(i); 3095 if (!ulp_ops || !ulp_ops->cnic_exit) { 3096 mutex_unlock(&cnic_lock); 3097 continue; 3098 } 3099 ulp_get(ulp_ops); 3100 mutex_unlock(&cnic_lock); 3101 3102 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) 3103 ulp_ops->cnic_exit(dev); 3104 3105 ulp_put(ulp_ops); 3106 } 3107} 3108 3109static int cnic_cm_offload_pg(struct cnic_sock *csk) 3110{ 3111 struct cnic_dev *dev = csk->dev; 3112 struct l4_kwq_offload_pg *l4kwqe; 3113 struct kwqe *wqes[1]; 3114 3115 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1; 3116 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3117 wqes[0] = (struct kwqe *) l4kwqe; 3118 3119 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG; 3120 l4kwqe->flags = 3121 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT; 3122 l4kwqe->l2hdr_nbytes = ETH_HLEN; 3123 3124 l4kwqe->da0 = csk->ha[0]; 3125 l4kwqe->da1 = csk->ha[1]; 3126 l4kwqe->da2 = csk->ha[2]; 3127 l4kwqe->da3 = csk->ha[3]; 3128 l4kwqe->da4 = csk->ha[4]; 3129 l4kwqe->da5 = csk->ha[5]; 3130 3131 l4kwqe->sa0 = dev->mac_addr[0]; 3132 l4kwqe->sa1 = dev->mac_addr[1]; 3133 l4kwqe->sa2 = dev->mac_addr[2]; 3134 l4kwqe->sa3 = dev->mac_addr[3]; 3135 l4kwqe->sa4 = dev->mac_addr[4]; 3136 l4kwqe->sa5 = dev->mac_addr[5]; 3137 3138 l4kwqe->etype = ETH_P_IP; 3139 l4kwqe->ipid_start = DEF_IPID_START; 3140 l4kwqe->host_opaque = csk->l5_cid; 3141 3142 if (csk->vlan_id) { 3143 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING; 3144 l4kwqe->vlan_tag = csk->vlan_id; 3145 l4kwqe->l2hdr_nbytes += 4; 3146 } 3147 3148 return dev->submit_kwqes(dev, wqes, 1); 3149} 3150 3151static int cnic_cm_update_pg(struct cnic_sock *csk) 3152{ 3153 struct cnic_dev *dev = csk->dev; 3154 struct l4_kwq_update_pg *l4kwqe; 3155 struct kwqe *wqes[1]; 3156 3157 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1; 3158 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3159 wqes[0] = (struct kwqe *) l4kwqe; 3160 3161 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG; 3162 l4kwqe->flags = 3163 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT; 3164 l4kwqe->pg_cid = csk->pg_cid; 3165 3166 l4kwqe->da0 = csk->ha[0]; 3167 l4kwqe->da1 = csk->ha[1]; 3168 l4kwqe->da2 = csk->ha[2]; 3169 l4kwqe->da3 = csk->ha[3]; 3170 l4kwqe->da4 = csk->ha[4]; 3171 l4kwqe->da5 = csk->ha[5]; 3172 3173 l4kwqe->pg_host_opaque = csk->l5_cid; 3174 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA; 3175 3176 return dev->submit_kwqes(dev, wqes, 1); 3177} 3178 3179static int cnic_cm_upload_pg(struct cnic_sock *csk) 3180{ 3181 struct cnic_dev *dev = csk->dev; 3182 struct l4_kwq_upload *l4kwqe; 3183 struct kwqe *wqes[1]; 3184 3185 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1; 3186 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3187 wqes[0] = (struct kwqe *) l4kwqe; 3188 3189 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG; 3190 l4kwqe->flags = 3191 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT; 3192 l4kwqe->cid = csk->pg_cid; 3193 3194 return dev->submit_kwqes(dev, wqes, 1); 3195} 3196 3197static int cnic_cm_conn_req(struct cnic_sock *csk) 3198{ 3199 struct cnic_dev *dev = csk->dev; 3200 struct l4_kwq_connect_req1 *l4kwqe1; 3201 struct l4_kwq_connect_req2 *l4kwqe2; 3202 struct l4_kwq_connect_req3 *l4kwqe3; 3203 struct kwqe *wqes[3]; 3204 u8 tcp_flags = 0; 3205 int num_wqes = 2; 3206 3207 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1; 3208 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2; 3209 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3; 3210 memset(l4kwqe1, 0, sizeof(*l4kwqe1)); 3211 memset(l4kwqe2, 0, sizeof(*l4kwqe2)); 3212 memset(l4kwqe3, 0, sizeof(*l4kwqe3)); 3213 3214 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3; 3215 l4kwqe3->flags = 3216 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT; 3217 l4kwqe3->ka_timeout = csk->ka_timeout; 3218 l4kwqe3->ka_interval = csk->ka_interval; 3219 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count; 3220 l4kwqe3->tos = csk->tos; 3221 l4kwqe3->ttl = csk->ttl; 3222 l4kwqe3->snd_seq_scale = csk->snd_seq_scale; 3223 l4kwqe3->pmtu = csk->mtu; 3224 l4kwqe3->rcv_buf = csk->rcv_buf; 3225 l4kwqe3->snd_buf = csk->snd_buf; 3226 l4kwqe3->seed = csk->seed; 3227 3228 wqes[0] = (struct kwqe *) l4kwqe1; 3229 if (test_bit(SK_F_IPV6, &csk->flags)) { 3230 wqes[1] = (struct kwqe *) l4kwqe2; 3231 wqes[2] = (struct kwqe *) l4kwqe3; 3232 num_wqes = 3; 3233 3234 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6; 3235 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2; 3236 l4kwqe2->flags = 3237 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT | 3238 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT; 3239 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]); 3240 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]); 3241 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]); 3242 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]); 3243 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]); 3244 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]); 3245 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) - 3246 sizeof(struct tcphdr); 3247 } else { 3248 wqes[1] = (struct kwqe *) l4kwqe3; 3249 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) - 3250 sizeof(struct tcphdr); 3251 } 3252 3253 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1; 3254 l4kwqe1->flags = 3255 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) | 3256 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT; 3257 l4kwqe1->cid = csk->cid; 3258 l4kwqe1->pg_cid = csk->pg_cid; 3259 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]); 3260 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]); 3261 l4kwqe1->src_port = be16_to_cpu(csk->src_port); 3262 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port); 3263 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK) 3264 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK; 3265 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE) 3266 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE; 3267 if (csk->tcp_flags & SK_TCP_NAGLE) 3268 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE; 3269 if (csk->tcp_flags & SK_TCP_TIMESTAMP) 3270 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP; 3271 if (csk->tcp_flags & SK_TCP_SACK) 3272 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK; 3273 if (csk->tcp_flags & SK_TCP_SEG_SCALING) 3274 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING; 3275 3276 l4kwqe1->tcp_flags = tcp_flags; 3277 3278 return dev->submit_kwqes(dev, wqes, num_wqes); 3279} 3280 3281static int cnic_cm_close_req(struct cnic_sock *csk) 3282{ 3283 struct cnic_dev *dev = csk->dev; 3284 struct l4_kwq_close_req *l4kwqe; 3285 struct kwqe *wqes[1]; 3286 3287 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2; 3288 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3289 wqes[0] = (struct kwqe *) l4kwqe; 3290 3291 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE; 3292 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT; 3293 l4kwqe->cid = csk->cid; 3294 3295 return dev->submit_kwqes(dev, wqes, 1); 3296} 3297 3298static int cnic_cm_abort_req(struct cnic_sock *csk) 3299{ 3300 struct cnic_dev *dev = csk->dev; 3301 struct l4_kwq_reset_req *l4kwqe; 3302 struct kwqe *wqes[1]; 3303 3304 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2; 3305 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3306 wqes[0] = (struct kwqe *) l4kwqe; 3307 3308 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET; 3309 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT; 3310 l4kwqe->cid = csk->cid; 3311 3312 return dev->submit_kwqes(dev, wqes, 1); 3313} 3314 3315static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, 3316 u32 l5_cid, struct cnic_sock **csk, void *context) 3317{ 3318 struct cnic_local *cp = dev->cnic_priv; 3319 struct cnic_sock *csk1; 3320 3321 if (l5_cid >= MAX_CM_SK_TBL_SZ) 3322 return -EINVAL; 3323 3324 if (cp->ctx_tbl) { 3325 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3326 3327 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 3328 return -EAGAIN; 3329 } 3330 3331 csk1 = &cp->csk_tbl[l5_cid]; 3332 if (atomic_read(&csk1->ref_count)) 3333 return -EAGAIN; 3334 3335 if (test_and_set_bit(SK_F_INUSE, &csk1->flags)) 3336 return -EBUSY; 3337 3338 csk1->dev = dev; 3339 csk1->cid = cid; 3340 csk1->l5_cid = l5_cid; 3341 csk1->ulp_type = ulp_type; 3342 csk1->context = context; 3343 3344 csk1->ka_timeout = DEF_KA_TIMEOUT; 3345 csk1->ka_interval = DEF_KA_INTERVAL; 3346 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT; 3347 csk1->tos = DEF_TOS; 3348 csk1->ttl = DEF_TTL; 3349 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE; 3350 csk1->rcv_buf = DEF_RCV_BUF; 3351 csk1->snd_buf = DEF_SND_BUF; 3352 csk1->seed = DEF_SEED; 3353 3354 *csk = csk1; 3355 return 0; 3356} 3357 3358static void cnic_cm_cleanup(struct cnic_sock *csk) 3359{ 3360 if (csk->src_port) { 3361 struct cnic_dev *dev = csk->dev; 3362 struct cnic_local *cp = dev->cnic_priv; 3363 3364 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port)); 3365 csk->src_port = 0; 3366 } 3367} 3368 3369static void cnic_close_conn(struct cnic_sock *csk) 3370{ 3371 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) { 3372 cnic_cm_upload_pg(csk); 3373 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 3374 } 3375 cnic_cm_cleanup(csk); 3376} 3377 3378static int cnic_cm_destroy(struct cnic_sock *csk) 3379{ 3380 if (!cnic_in_use(csk)) 3381 return -EINVAL; 3382 3383 csk_hold(csk); 3384 clear_bit(SK_F_INUSE, &csk->flags); 3385 smp_mb__after_clear_bit(); 3386 while (atomic_read(&csk->ref_count) != 1) 3387 msleep(1); 3388 cnic_cm_cleanup(csk); 3389 3390 csk->flags = 0; 3391 csk_put(csk); 3392 return 0; 3393} 3394 3395static inline u16 cnic_get_vlan(struct net_device *dev, 3396 struct net_device **vlan_dev) 3397{ 3398 if (dev->priv_flags & IFF_802_1Q_VLAN) { 3399 *vlan_dev = vlan_dev_real_dev(dev); 3400 return vlan_dev_vlan_id(dev); 3401 } 3402 *vlan_dev = dev; 3403 return 0; 3404} 3405 3406static int cnic_get_v4_route(struct sockaddr_in *dst_addr, 3407 struct dst_entry **dst) 3408{ 3409#if defined(CONFIG_INET) 3410 struct rtable *rt; 3411 3412 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0); 3413 if (!IS_ERR(rt)) { 3414 *dst = &rt->dst; 3415 return 0; 3416 } 3417 return PTR_ERR(rt); 3418#else 3419 return -ENETUNREACH; 3420#endif 3421} 3422 3423static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, 3424 struct dst_entry **dst) 3425{ 3426#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) 3427 struct flowi6 fl6; 3428 3429 memset(&fl6, 0, sizeof(fl6)); 3430 ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr); 3431 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) 3432 fl6.flowi6_oif = dst_addr->sin6_scope_id; 3433 3434 *dst = ip6_route_output(&init_net, NULL, &fl6); 3435 if (*dst) 3436 return 0; 3437#endif 3438 3439 return -ENETUNREACH; 3440} 3441 3442static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr, 3443 int ulp_type) 3444{ 3445 struct cnic_dev *dev = NULL; 3446 struct dst_entry *dst; 3447 struct net_device *netdev = NULL; 3448 int err = -ENETUNREACH; 3449 3450 if (dst_addr->sin_family == AF_INET) 3451 err = cnic_get_v4_route(dst_addr, &dst); 3452 else if (dst_addr->sin_family == AF_INET6) { 3453 struct sockaddr_in6 *dst_addr6 = 3454 (struct sockaddr_in6 *) dst_addr; 3455 3456 err = cnic_get_v6_route(dst_addr6, &dst); 3457 } else 3458 return NULL; 3459 3460 if (err) 3461 return NULL; 3462 3463 if (!dst->dev) 3464 goto done; 3465 3466 cnic_get_vlan(dst->dev, &netdev); 3467 3468 dev = cnic_from_netdev(netdev); 3469 3470done: 3471 dst_release(dst); 3472 if (dev) 3473 cnic_put(dev); 3474 return dev; 3475} 3476 3477static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3478{ 3479 struct cnic_dev *dev = csk->dev; 3480 struct cnic_local *cp = dev->cnic_priv; 3481 3482 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk); 3483} 3484 3485static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3486{ 3487 struct cnic_dev *dev = csk->dev; 3488 struct cnic_local *cp = dev->cnic_priv; 3489 int is_v6, rc = 0; 3490 struct dst_entry *dst = NULL; 3491 struct net_device *realdev; 3492 __be16 local_port; 3493 u32 port_id; 3494 3495 if (saddr->local.v6.sin6_family == AF_INET6 && 3496 saddr->remote.v6.sin6_family == AF_INET6) 3497 is_v6 = 1; 3498 else if (saddr->local.v4.sin_family == AF_INET && 3499 saddr->remote.v4.sin_family == AF_INET) 3500 is_v6 = 0; 3501 else 3502 return -EINVAL; 3503 3504 clear_bit(SK_F_IPV6, &csk->flags); 3505 3506 if (is_v6) { 3507 set_bit(SK_F_IPV6, &csk->flags); 3508 cnic_get_v6_route(&saddr->remote.v6, &dst); 3509 3510 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr, 3511 sizeof(struct in6_addr)); 3512 csk->dst_port = saddr->remote.v6.sin6_port; 3513 local_port = saddr->local.v6.sin6_port; 3514 3515 } else { 3516 cnic_get_v4_route(&saddr->remote.v4, &dst); 3517 3518 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr; 3519 csk->dst_port = saddr->remote.v4.sin_port; 3520 local_port = saddr->local.v4.sin_port; 3521 } 3522 3523 csk->vlan_id = 0; 3524 csk->mtu = dev->netdev->mtu; 3525 if (dst && dst->dev) { 3526 u16 vlan = cnic_get_vlan(dst->dev, &realdev); 3527 if (realdev == dev->netdev) { 3528 csk->vlan_id = vlan; 3529 csk->mtu = dst_mtu(dst); 3530 } 3531 } 3532 3533 port_id = be16_to_cpu(local_port); 3534 if (port_id >= CNIC_LOCAL_PORT_MIN && 3535 port_id < CNIC_LOCAL_PORT_MAX) { 3536 if (cnic_alloc_id(&cp->csk_port_tbl, port_id)) 3537 port_id = 0; 3538 } else 3539 port_id = 0; 3540 3541 if (!port_id) { 3542 port_id = cnic_alloc_new_id(&cp->csk_port_tbl); 3543 if (port_id == -1) { 3544 rc = -ENOMEM; 3545 goto err_out; 3546 } 3547 local_port = cpu_to_be16(port_id); 3548 } 3549 csk->src_port = local_port; 3550 3551err_out: 3552 dst_release(dst); 3553 return rc; 3554} 3555 3556static void cnic_init_csk_state(struct cnic_sock *csk) 3557{ 3558 csk->state = 0; 3559 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3560 clear_bit(SK_F_CLOSING, &csk->flags); 3561} 3562 3563static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3564{ 3565 int err = 0; 3566 3567 if (!cnic_in_use(csk)) 3568 return -EINVAL; 3569 3570 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags)) 3571 return -EINVAL; 3572 3573 cnic_init_csk_state(csk); 3574 3575 err = cnic_get_route(csk, saddr); 3576 if (err) 3577 goto err_out; 3578 3579 err = cnic_resolve_addr(csk, saddr); 3580 if (!err) 3581 return 0; 3582 3583err_out: 3584 clear_bit(SK_F_CONNECT_START, &csk->flags); 3585 return err; 3586} 3587 3588static int cnic_cm_abort(struct cnic_sock *csk) 3589{ 3590 struct cnic_local *cp = csk->dev->cnic_priv; 3591 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP; 3592 3593 if (!cnic_in_use(csk)) 3594 return -EINVAL; 3595 3596 if (cnic_abort_prep(csk)) 3597 return cnic_cm_abort_req(csk); 3598 3599 /* Getting here means that we haven't started connect, or 3600 * connect was not successful. 3601 */ 3602 3603 cp->close_conn(csk, opcode); 3604 if (csk->state != opcode) 3605 return -EALREADY; 3606 3607 return 0; 3608} 3609 3610static int cnic_cm_close(struct cnic_sock *csk) 3611{ 3612 if (!cnic_in_use(csk)) 3613 return -EINVAL; 3614 3615 if (cnic_close_prep(csk)) { 3616 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 3617 return cnic_cm_close_req(csk); 3618 } else { 3619 return -EALREADY; 3620 } 3621 return 0; 3622} 3623 3624static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk, 3625 u8 opcode) 3626{ 3627 struct cnic_ulp_ops *ulp_ops; 3628 int ulp_type = csk->ulp_type; 3629 3630 rcu_read_lock(); 3631 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 3632 if (ulp_ops) { 3633 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE) 3634 ulp_ops->cm_connect_complete(csk); 3635 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) 3636 ulp_ops->cm_close_complete(csk); 3637 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) 3638 ulp_ops->cm_remote_abort(csk); 3639 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) 3640 ulp_ops->cm_abort_complete(csk); 3641 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED) 3642 ulp_ops->cm_remote_close(csk); 3643 } 3644 rcu_read_unlock(); 3645} 3646 3647static int cnic_cm_set_pg(struct cnic_sock *csk) 3648{ 3649 if (cnic_offld_prep(csk)) { 3650 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 3651 cnic_cm_update_pg(csk); 3652 else 3653 cnic_cm_offload_pg(csk); 3654 } 3655 return 0; 3656} 3657 3658static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe) 3659{ 3660 struct cnic_local *cp = dev->cnic_priv; 3661 u32 l5_cid = kcqe->pg_host_opaque; 3662 u8 opcode = kcqe->op_code; 3663 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 3664 3665 csk_hold(csk); 3666 if (!cnic_in_use(csk)) 3667 goto done; 3668 3669 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3670 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3671 goto done; 3672 } 3673 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */ 3674 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) { 3675 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3676 cnic_cm_upcall(cp, csk, 3677 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 3678 goto done; 3679 } 3680 3681 csk->pg_cid = kcqe->pg_cid; 3682 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 3683 cnic_cm_conn_req(csk); 3684 3685done: 3686 csk_put(csk); 3687} 3688 3689static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe) 3690{ 3691 struct cnic_local *cp = dev->cnic_priv; 3692 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe; 3693 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE; 3694 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3695 3696 ctx->timestamp = jiffies; 3697 ctx->wait_cond = 1; 3698 wake_up(&ctx->waitq); 3699} 3700 3701static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) 3702{ 3703 struct cnic_local *cp = dev->cnic_priv; 3704 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe; 3705 u8 opcode = l4kcqe->op_code; 3706 u32 l5_cid; 3707 struct cnic_sock *csk; 3708 3709 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) { 3710 cnic_process_fcoe_term_conn(dev, kcqe); 3711 return; 3712 } 3713 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || 3714 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3715 cnic_cm_process_offld_pg(dev, l4kcqe); 3716 return; 3717 } 3718 3719 l5_cid = l4kcqe->conn_id; 3720 if (opcode & 0x80) 3721 l5_cid = l4kcqe->cid; 3722 if (l5_cid >= MAX_CM_SK_TBL_SZ) 3723 return; 3724 3725 csk = &cp->csk_tbl[l5_cid]; 3726 csk_hold(csk); 3727 3728 if (!cnic_in_use(csk)) { 3729 csk_put(csk); 3730 return; 3731 } 3732 3733 switch (opcode) { 3734 case L5CM_RAMROD_CMD_ID_TCP_CONNECT: 3735 if (l4kcqe->status != 0) { 3736 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3737 cnic_cm_upcall(cp, csk, 3738 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 3739 } 3740 break; 3741 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE: 3742 if (l4kcqe->status == 0) 3743 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags); 3744 3745 smp_mb__before_clear_bit(); 3746 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3747 cnic_cm_upcall(cp, csk, opcode); 3748 break; 3749 3750 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3751 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3752 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3753 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 3754 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: 3755 cp->close_conn(csk, opcode); 3756 break; 3757 3758 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED: 3759 cnic_cm_upcall(cp, csk, opcode); 3760 break; 3761 } 3762 csk_put(csk); 3763} 3764 3765static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num) 3766{ 3767 struct cnic_dev *dev = data; 3768 int i; 3769 3770 for (i = 0; i < num; i++) 3771 cnic_cm_process_kcqe(dev, kcqe[i]); 3772} 3773 3774static struct cnic_ulp_ops cm_ulp_ops = { 3775 .indicate_kcqes = cnic_cm_indicate_kcqe, 3776}; 3777 3778static void cnic_cm_free_mem(struct cnic_dev *dev) 3779{ 3780 struct cnic_local *cp = dev->cnic_priv; 3781 3782 kfree(cp->csk_tbl); 3783 cp->csk_tbl = NULL; 3784 cnic_free_id_tbl(&cp->csk_port_tbl); 3785} 3786 3787static int cnic_cm_alloc_mem(struct cnic_dev *dev) 3788{ 3789 struct cnic_local *cp = dev->cnic_priv; 3790 3791 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ, 3792 GFP_KERNEL); 3793 if (!cp->csk_tbl) 3794 return -ENOMEM; 3795 3796 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, 3797 CNIC_LOCAL_PORT_MIN)) { 3798 cnic_cm_free_mem(dev); 3799 return -ENOMEM; 3800 } 3801 return 0; 3802} 3803 3804static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) 3805{ 3806 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 3807 /* Unsolicited RESET_COMP or RESET_RECEIVED */ 3808 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED; 3809 csk->state = opcode; 3810 } 3811 3812 /* 1. If event opcode matches the expected event in csk->state 3813 * 2. If the expected event is CLOSE_COMP, we accept any event 3814 * 3. If the expected event is 0, meaning the connection was never 3815 * never established, we accept the opcode from cm_abort. 3816 */ 3817 if (opcode == csk->state || csk->state == 0 || 3818 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) { 3819 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) { 3820 if (csk->state == 0) 3821 csk->state = opcode; 3822 return 1; 3823 } 3824 } 3825 return 0; 3826} 3827 3828static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode) 3829{ 3830 struct cnic_dev *dev = csk->dev; 3831 struct cnic_local *cp = dev->cnic_priv; 3832 3833 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) { 3834 cnic_cm_upcall(cp, csk, opcode); 3835 return; 3836 } 3837 3838 clear_bit(SK_F_CONNECT_START, &csk->flags); 3839 cnic_close_conn(csk); 3840 csk->state = opcode; 3841 cnic_cm_upcall(cp, csk, opcode); 3842} 3843 3844static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev) 3845{ 3846} 3847 3848static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) 3849{ 3850 u32 seed; 3851 3852 get_random_bytes(&seed, 4); 3853 cnic_ctx_wr(dev, 45, 0, seed); 3854 return 0; 3855} 3856 3857static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) 3858{ 3859 struct cnic_dev *dev = csk->dev; 3860 struct cnic_local *cp = dev->cnic_priv; 3861 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid]; 3862 union l5cm_specific_data l5_data; 3863 u32 cmd = 0; 3864 int close_complete = 0; 3865 3866 switch (opcode) { 3867 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3868 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3869 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3870 if (cnic_ready_to_close(csk, opcode)) { 3871 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 3872 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; 3873 else 3874 close_complete = 1; 3875 } 3876 break; 3877 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 3878 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; 3879 break; 3880 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: 3881 close_complete = 1; 3882 break; 3883 } 3884 if (cmd) { 3885 memset(&l5_data, 0, sizeof(l5_data)); 3886 3887 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE, 3888 &l5_data); 3889 } else if (close_complete) { 3890 ctx->timestamp = jiffies; 3891 cnic_close_conn(csk); 3892 cnic_cm_upcall(cp, csk, csk->state); 3893 } 3894} 3895 3896static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) 3897{ 3898 struct cnic_local *cp = dev->cnic_priv; 3899 int i; 3900 3901 if (!cp->ctx_tbl) 3902 return; 3903 3904 if (!netif_running(dev->netdev)) 3905 return; 3906 3907 for (i = 0; i < cp->max_cid_space; i++) { 3908 struct cnic_context *ctx = &cp->ctx_tbl[i]; 3909 3910 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 3911 msleep(10); 3912 3913 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 3914 netdev_warn(dev->netdev, "CID %x not deleted\n", 3915 ctx->cid); 3916 } 3917 3918 cancel_delayed_work(&cp->delete_task); 3919 flush_workqueue(cnic_wq); 3920 3921 if (atomic_read(&cp->iscsi_conn) != 0) 3922 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n", 3923 atomic_read(&cp->iscsi_conn)); 3924} 3925 3926static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) 3927{ 3928 struct cnic_local *cp = dev->cnic_priv; 3929 u32 pfid = cp->pfid; 3930 u32 port = CNIC_PORT(cp); 3931 3932 cnic_init_bnx2x_mac(dev); 3933 cnic_bnx2x_set_tcp_timestamp(dev, 1); 3934 3935 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 3936 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); 3937 3938 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3939 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1); 3940 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3941 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port), 3942 DEF_MAX_DA_COUNT); 3943 3944 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3945 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL); 3946 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3947 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS); 3948 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3949 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2); 3950 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3951 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER); 3952 3953 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid), 3954 DEF_MAX_CWND); 3955 return 0; 3956} 3957 3958static void cnic_delete_task(struct work_struct *work) 3959{ 3960 struct cnic_local *cp; 3961 struct cnic_dev *dev; 3962 u32 i; 3963 int need_resched = 0; 3964 3965 cp = container_of(work, struct cnic_local, delete_task.work); 3966 dev = cp->dev; 3967 3968 for (i = 0; i < cp->max_cid_space; i++) { 3969 struct cnic_context *ctx = &cp->ctx_tbl[i]; 3970 3971 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) || 3972 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 3973 continue; 3974 3975 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { 3976 need_resched = 1; 3977 continue; 3978 } 3979 3980 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 3981 continue; 3982 3983 cnic_bnx2x_destroy_ramrod(dev, i); 3984 3985 cnic_free_bnx2x_conn_resc(dev, i); 3986 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) 3987 atomic_dec(&cp->iscsi_conn); 3988 3989 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 3990 } 3991 3992 if (need_resched) 3993 queue_delayed_work(cnic_wq, &cp->delete_task, 3994 msecs_to_jiffies(10)); 3995 3996} 3997 3998static int cnic_cm_open(struct cnic_dev *dev) 3999{ 4000 struct cnic_local *cp = dev->cnic_priv; 4001 int err; 4002 4003 err = cnic_cm_alloc_mem(dev); 4004 if (err) 4005 return err; 4006 4007 err = cp->start_cm(dev); 4008 4009 if (err) 4010 goto err_out; 4011 4012 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task); 4013 4014 dev->cm_create = cnic_cm_create; 4015 dev->cm_destroy = cnic_cm_destroy; 4016 dev->cm_connect = cnic_cm_connect; 4017 dev->cm_abort = cnic_cm_abort; 4018 dev->cm_close = cnic_cm_close; 4019 dev->cm_select_dev = cnic_cm_select_dev; 4020 4021 cp->ulp_handle[CNIC_ULP_L4] = dev; 4022 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops); 4023 return 0; 4024 4025err_out: 4026 cnic_cm_free_mem(dev); 4027 return err; 4028} 4029 4030static int cnic_cm_shutdown(struct cnic_dev *dev) 4031{ 4032 struct cnic_local *cp = dev->cnic_priv; 4033 int i; 4034 4035 cp->stop_cm(dev); 4036 4037 if (!cp->csk_tbl) 4038 return 0; 4039 4040 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) { 4041 struct cnic_sock *csk = &cp->csk_tbl[i]; 4042 4043 clear_bit(SK_F_INUSE, &csk->flags); 4044 cnic_cm_cleanup(csk); 4045 } 4046 cnic_cm_free_mem(dev); 4047 4048 return 0; 4049} 4050 4051static void cnic_init_context(struct cnic_dev *dev, u32 cid) 4052{ 4053 u32 cid_addr; 4054 int i; 4055 4056 cid_addr = GET_CID_ADDR(cid); 4057 4058 for (i = 0; i < CTX_SIZE; i += 4) 4059 cnic_ctx_wr(dev, cid_addr, i, 0); 4060} 4061 4062static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) 4063{ 4064 struct cnic_local *cp = dev->cnic_priv; 4065 int ret = 0, i; 4066 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0; 4067 4068 if (CHIP_NUM(cp) != CHIP_NUM_5709) 4069 return 0; 4070 4071 for (i = 0; i < cp->ctx_blks; i++) { 4072 int j; 4073 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; 4074 u32 val; 4075 4076 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE); 4077 4078 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, 4079 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); 4080 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1, 4081 (u64) cp->ctx_arr[i].mapping >> 32); 4082 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx | 4083 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 4084 for (j = 0; j < 10; j++) { 4085 4086 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL); 4087 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) 4088 break; 4089 udelay(5); 4090 } 4091 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { 4092 ret = -EBUSY; 4093 break; 4094 } 4095 } 4096 return ret; 4097} 4098 4099static void cnic_free_irq(struct cnic_dev *dev) 4100{ 4101 struct cnic_local *cp = dev->cnic_priv; 4102 struct cnic_eth_dev *ethdev = cp->ethdev; 4103 4104 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4105 cp->disable_int_sync(dev); 4106 tasklet_kill(&cp->cnic_irq_task); 4107 free_irq(ethdev->irq_arr[0].vector, dev); 4108 } 4109} 4110 4111static int cnic_request_irq(struct cnic_dev *dev) 4112{ 4113 struct cnic_local *cp = dev->cnic_priv; 4114 struct cnic_eth_dev *ethdev = cp->ethdev; 4115 int err; 4116 4117 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev); 4118 if (err) 4119 tasklet_disable(&cp->cnic_irq_task); 4120 4121 return err; 4122} 4123 4124static int cnic_init_bnx2_irq(struct cnic_dev *dev) 4125{ 4126 struct cnic_local *cp = dev->cnic_priv; 4127 struct cnic_eth_dev *ethdev = cp->ethdev; 4128 4129 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4130 int err, i = 0; 4131 int sblk_num = cp->status_blk_num; 4132 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) + 4133 BNX2_HC_SB_CONFIG_1; 4134 4135 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT); 4136 4137 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8); 4138 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220); 4139 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); 4140 4141 cp->last_status_idx = cp->status_blk.bnx2->status_idx; 4142 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, 4143 (unsigned long) dev); 4144 err = cnic_request_irq(dev); 4145 if (err) 4146 return err; 4147 4148 while (cp->status_blk.bnx2->status_completion_producer_index && 4149 i < 10) { 4150 CNIC_WR(dev, BNX2_HC_COALESCE_NOW, 4151 1 << (11 + sblk_num)); 4152 udelay(10); 4153 i++; 4154 barrier(); 4155 } 4156 if (cp->status_blk.bnx2->status_completion_producer_index) { 4157 cnic_free_irq(dev); 4158 goto failed; 4159 } 4160 4161 } else { 4162 struct status_block *sblk = cp->status_blk.gen; 4163 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND); 4164 int i = 0; 4165 4166 while (sblk->status_completion_producer_index && i < 10) { 4167 CNIC_WR(dev, BNX2_HC_COMMAND, 4168 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 4169 udelay(10); 4170 i++; 4171 barrier(); 4172 } 4173 if (sblk->status_completion_producer_index) 4174 goto failed; 4175 4176 } 4177 return 0; 4178 4179failed: 4180 netdev_err(dev->netdev, "KCQ index not resetting to 0\n"); 4181 return -EBUSY; 4182} 4183 4184static void cnic_enable_bnx2_int(struct cnic_dev *dev) 4185{ 4186 struct cnic_local *cp = dev->cnic_priv; 4187 struct cnic_eth_dev *ethdev = cp->ethdev; 4188 4189 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 4190 return; 4191 4192 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 4193 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 4194} 4195 4196static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev) 4197{ 4198 u32 max_conn; 4199 4200 max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN); 4201 dev->max_iscsi_conn = max_conn; 4202} 4203 4204static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) 4205{ 4206 struct cnic_local *cp = dev->cnic_priv; 4207 struct cnic_eth_dev *ethdev = cp->ethdev; 4208 4209 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 4210 return; 4211 4212 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 4213 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 4214 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD); 4215 synchronize_irq(ethdev->irq_arr[0].vector); 4216} 4217 4218static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) 4219{ 4220 struct cnic_local *cp = dev->cnic_priv; 4221 struct cnic_eth_dev *ethdev = cp->ethdev; 4222 struct cnic_uio_dev *udev = cp->udev; 4223 u32 cid_addr, tx_cid, sb_id; 4224 u32 val, offset0, offset1, offset2, offset3; 4225 int i; 4226 struct tx_bd *txbd; 4227 dma_addr_t buf_map, ring_map = udev->l2_ring_map; 4228 struct status_block *s_blk = cp->status_blk.gen; 4229 4230 sb_id = cp->status_blk_num; 4231 tx_cid = 20; 4232 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; 4233 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4234 struct status_block_msix *sblk = cp->status_blk.bnx2; 4235 4236 tx_cid = TX_TSS_CID + sb_id - 1; 4237 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | 4238 (TX_TSS_CID << 7)); 4239 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; 4240 } 4241 cp->tx_cons = *cp->tx_cons_ptr; 4242 4243 cid_addr = GET_CID_ADDR(tx_cid); 4244 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 4245 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40; 4246 4247 for (i = 0; i < PHY_CTX_SIZE; i += 4) 4248 cnic_ctx_wr(dev, cid_addr2, i, 0); 4249 4250 offset0 = BNX2_L2CTX_TYPE_XI; 4251 offset1 = BNX2_L2CTX_CMD_TYPE_XI; 4252 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; 4253 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; 4254 } else { 4255 cnic_init_context(dev, tx_cid); 4256 cnic_init_context(dev, tx_cid + 1); 4257 4258 offset0 = BNX2_L2CTX_TYPE; 4259 offset1 = BNX2_L2CTX_CMD_TYPE; 4260 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; 4261 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; 4262 } 4263 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; 4264 cnic_ctx_wr(dev, cid_addr, offset0, val); 4265 4266 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4267 cnic_ctx_wr(dev, cid_addr, offset1, val); 4268 4269 txbd = (struct tx_bd *) udev->l2_ring; 4270 4271 buf_map = udev->l2_buf_map; 4272 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { 4273 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; 4274 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 4275 } 4276 val = (u64) ring_map >> 32; 4277 cnic_ctx_wr(dev, cid_addr, offset2, val); 4278 txbd->tx_bd_haddr_hi = val; 4279 4280 val = (u64) ring_map & 0xffffffff; 4281 cnic_ctx_wr(dev, cid_addr, offset3, val); 4282 txbd->tx_bd_haddr_lo = val; 4283} 4284 4285static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) 4286{ 4287 struct cnic_local *cp = dev->cnic_priv; 4288 struct cnic_eth_dev *ethdev = cp->ethdev; 4289 struct cnic_uio_dev *udev = cp->udev; 4290 u32 cid_addr, sb_id, val, coal_reg, coal_val; 4291 int i; 4292 struct rx_bd *rxbd; 4293 struct status_block *s_blk = cp->status_blk.gen; 4294 dma_addr_t ring_map = udev->l2_ring_map; 4295 4296 sb_id = cp->status_blk_num; 4297 cnic_init_context(dev, 2); 4298 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2; 4299 coal_reg = BNX2_HC_COMMAND; 4300 coal_val = CNIC_RD(dev, coal_reg); 4301 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4302 struct status_block_msix *sblk = cp->status_blk.bnx2; 4303 4304 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; 4305 coal_reg = BNX2_HC_COALESCE_NOW; 4306 coal_val = 1 << (11 + sb_id); 4307 } 4308 i = 0; 4309 while (!(*cp->rx_cons_ptr != 0) && i < 10) { 4310 CNIC_WR(dev, coal_reg, coal_val); 4311 udelay(10); 4312 i++; 4313 barrier(); 4314 } 4315 cp->rx_cons = *cp->rx_cons_ptr; 4316 4317 cid_addr = GET_CID_ADDR(2); 4318 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4319 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4320 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); 4321 4322 if (sb_id == 0) 4323 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT; 4324 else 4325 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); 4326 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 4327 4328 rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE); 4329 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { 4330 dma_addr_t buf_map; 4331 int n = (i % cp->l2_rx_ring_size) + 1; 4332 4333 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); 4334 rxbd->rx_bd_len = cp->l2_single_buf_size; 4335 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; 4336 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; 4337 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 4338 } 4339 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; 4340 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); 4341 rxbd->rx_bd_haddr_hi = val; 4342 4343 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; 4344 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); 4345 rxbd->rx_bd_haddr_lo = val; 4346 4347 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD); 4348 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2)); 4349} 4350 4351static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) 4352{ 4353 struct kwqe *wqes[1], l2kwqe; 4354 4355 memset(&l2kwqe, 0, sizeof(l2kwqe)); 4356 wqes[0] = &l2kwqe; 4357 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) | 4358 (L2_KWQE_OPCODE_VALUE_FLUSH << 4359 KWQE_OPCODE_SHIFT) | 2; 4360 dev->submit_kwqes(dev, wqes, 1); 4361} 4362 4363static void cnic_set_bnx2_mac(struct cnic_dev *dev) 4364{ 4365 struct cnic_local *cp = dev->cnic_priv; 4366 u32 val; 4367 4368 val = cp->func << 2; 4369 4370 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val); 4371 4372 val = cnic_reg_rd_ind(dev, cp->shmem_base + 4373 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER); 4374 dev->mac_addr[0] = (u8) (val >> 8); 4375 dev->mac_addr[1] = (u8) val; 4376 4377 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val); 4378 4379 val = cnic_reg_rd_ind(dev, cp->shmem_base + 4380 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER); 4381 dev->mac_addr[2] = (u8) (val >> 24); 4382 dev->mac_addr[3] = (u8) (val >> 16); 4383 dev->mac_addr[4] = (u8) (val >> 8); 4384 dev->mac_addr[5] = (u8) val; 4385 4386 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val); 4387 4388 val = 4 | BNX2_RPM_SORT_USER2_BC_EN; 4389 if (CHIP_NUM(cp) != CHIP_NUM_5709) 4390 val |= BNX2_RPM_SORT_USER2_PROM_VLAN; 4391 4392 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0); 4393 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val); 4394 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA); 4395} 4396 4397static int cnic_start_bnx2_hw(struct cnic_dev *dev) 4398{ 4399 struct cnic_local *cp = dev->cnic_priv; 4400 struct cnic_eth_dev *ethdev = cp->ethdev; 4401 struct status_block *sblk = cp->status_blk.gen; 4402 u32 val, kcq_cid_addr, kwq_cid_addr; 4403 int err; 4404 4405 cnic_set_bnx2_mac(dev); 4406 4407 val = CNIC_RD(dev, BNX2_MQ_CONFIG); 4408 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4409 if (BCM_PAGE_BITS > 12) 4410 val |= (12 - 8) << 4; 4411 else 4412 val |= (BCM_PAGE_BITS - 8) << 4; 4413 4414 CNIC_WR(dev, BNX2_MQ_CONFIG, val); 4415 4416 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8); 4417 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220); 4418 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220); 4419 4420 err = cnic_setup_5709_context(dev, 1); 4421 if (err) 4422 return err; 4423 4424 cnic_init_context(dev, KWQ_CID); 4425 cnic_init_context(dev, KCQ_CID); 4426 4427 kwq_cid_addr = GET_CID_ADDR(KWQ_CID); 4428 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; 4429 4430 cp->max_kwq_idx = MAX_KWQ_IDX; 4431 cp->kwq_prod_idx = 0; 4432 cp->kwq_con_idx = 0; 4433 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 4434 4435 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708) 4436 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; 4437 else 4438 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; 4439 4440 /* Initialize the kernel work queue context. */ 4441 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4442 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 4443 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); 4444 4445 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; 4446 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 4447 4448 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; 4449 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 4450 4451 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); 4452 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 4453 4454 val = (u32) cp->kwq_info.pgtbl_map; 4455 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 4456 4457 kcq_cid_addr = GET_CID_ADDR(KCQ_CID); 4458 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; 4459 4460 cp->kcq1.sw_prod_idx = 0; 4461 cp->kcq1.hw_prod_idx_ptr = 4462 (u16 *) &sblk->status_completion_producer_index; 4463 4464 cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx; 4465 4466 /* Initialize the kernel complete queue context. */ 4467 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4468 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 4469 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); 4470 4471 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; 4472 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 4473 4474 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; 4475 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 4476 4477 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); 4478 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 4479 4480 val = (u32) cp->kcq1.dma.pgtbl_map; 4481 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 4482 4483 cp->int_num = 0; 4484 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4485 struct status_block_msix *msblk = cp->status_blk.bnx2; 4486 u32 sb_id = cp->status_blk_num; 4487 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); 4488 4489 cp->kcq1.hw_prod_idx_ptr = 4490 (u16 *) &msblk->status_completion_producer_index; 4491 cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx; 4492 cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index; 4493 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 4494 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 4495 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 4496 } 4497 4498 /* Enable Commnad Scheduler notification when we write to the 4499 * host producer index of the kernel contexts. */ 4500 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2); 4501 4502 /* Enable Command Scheduler notification when we write to either 4503 * the Send Queue or Receive Queue producer indexes of the kernel 4504 * bypass contexts. */ 4505 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7); 4506 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7); 4507 4508 /* Notify COM when the driver post an application buffer. */ 4509 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000); 4510 4511 /* Set the CP and COM doorbells. These two processors polls the 4512 * doorbell for a non zero value before running. This must be done 4513 * after setting up the kernel queue contexts. */ 4514 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1); 4515 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1); 4516 4517 cnic_init_bnx2_tx_ring(dev); 4518 cnic_init_bnx2_rx_ring(dev); 4519 4520 err = cnic_init_bnx2_irq(dev); 4521 if (err) { 4522 netdev_err(dev->netdev, "cnic_init_irq failed\n"); 4523 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 4524 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 4525 return err; 4526 } 4527 4528 cnic_get_bnx2_iscsi_info(dev); 4529 4530 return 0; 4531} 4532 4533static void cnic_setup_bnx2x_context(struct cnic_dev *dev) 4534{ 4535 struct cnic_local *cp = dev->cnic_priv; 4536 struct cnic_eth_dev *ethdev = cp->ethdev; 4537 u32 start_offset = ethdev->ctx_tbl_offset; 4538 int i; 4539 4540 for (i = 0; i < cp->ctx_blks; i++) { 4541 struct cnic_ctx *ctx = &cp->ctx_arr[i]; 4542 dma_addr_t map = ctx->mapping; 4543 4544 if (cp->ctx_align) { 4545 unsigned long mask = cp->ctx_align - 1; 4546 4547 map = (map + mask) & ~mask; 4548 } 4549 4550 cnic_ctx_tbl_wr(dev, start_offset + i, map); 4551 } 4552} 4553 4554static int cnic_init_bnx2x_irq(struct cnic_dev *dev) 4555{ 4556 struct cnic_local *cp = dev->cnic_priv; 4557 struct cnic_eth_dev *ethdev = cp->ethdev; 4558 int err = 0; 4559 4560 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, 4561 (unsigned long) dev); 4562 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 4563 err = cnic_request_irq(dev); 4564 4565 return err; 4566} 4567 4568static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev, 4569 u16 sb_id, u8 sb_index, 4570 u8 disable) 4571{ 4572 4573 u32 addr = BAR_CSTRORM_INTMEM + 4574 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + 4575 offsetof(struct hc_status_block_data_e1x, index_data) + 4576 sizeof(struct hc_index_data)*sb_index + 4577 offsetof(struct hc_index_data, flags); 4578 u16 flags = CNIC_RD16(dev, addr); 4579 /* clear and set */ 4580 flags &= ~HC_INDEX_DATA_HC_ENABLED; 4581 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) & 4582 HC_INDEX_DATA_HC_ENABLED); 4583 CNIC_WR16(dev, addr, flags); 4584} 4585 4586static void cnic_enable_bnx2x_int(struct cnic_dev *dev) 4587{ 4588 struct cnic_local *cp = dev->cnic_priv; 4589 u8 sb_id = cp->status_blk_num; 4590 4591 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4592 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + 4593 offsetof(struct hc_status_block_data_e1x, index_data) + 4594 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS + 4595 offsetof(struct hc_index_data, timeout), 64 / 12); 4596 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0); 4597} 4598 4599static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) 4600{ 4601} 4602 4603static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, 4604 struct client_init_ramrod_data *data) 4605{ 4606 struct cnic_local *cp = dev->cnic_priv; 4607 struct cnic_uio_dev *udev = cp->udev; 4608 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; 4609 dma_addr_t buf_map, ring_map = udev->l2_ring_map; 4610 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4611 int port = CNIC_PORT(cp); 4612 int i; 4613 u32 cli = cp->ethdev->iscsi_l2_client_id; 4614 u32 val; 4615 4616 memset(txbd, 0, BCM_PAGE_SIZE); 4617 4618 buf_map = udev->l2_buf_map; 4619 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { 4620 struct eth_tx_start_bd *start_bd = &txbd->start_bd; 4621 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); 4622 4623 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 4624 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 4625 reg_bd->addr_hi = start_bd->addr_hi; 4626 reg_bd->addr_lo = start_bd->addr_lo + 0x10; 4627 start_bd->nbytes = cpu_to_le16(0x10); 4628 start_bd->nbd = cpu_to_le16(3); 4629 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 4630 start_bd->general_data = (UNICAST_ADDRESS << 4631 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); 4632 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 4633 4634 } 4635 4636 val = (u64) ring_map >> 32; 4637 txbd->next_bd.addr_hi = cpu_to_le32(val); 4638 4639 data->tx.tx_bd_page_base.hi = cpu_to_le32(val); 4640 4641 val = (u64) ring_map & 0xffffffff; 4642 txbd->next_bd.addr_lo = cpu_to_le32(val); 4643 4644 data->tx.tx_bd_page_base.lo = cpu_to_le32(val); 4645 4646 /* Other ramrod params */ 4647 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS; 4648 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID; 4649 4650 /* reset xstorm per client statistics */ 4651 if (cli < MAX_STAT_COUNTER_ID) { 4652 val = BAR_XSTRORM_INTMEM + 4653 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 4654 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) 4655 CNIC_WR(dev, val + i * 4, 0); 4656 } 4657 4658 cp->tx_cons_ptr = 4659 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS]; 4660} 4661 4662static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, 4663 struct client_init_ramrod_data *data) 4664{ 4665 struct cnic_local *cp = dev->cnic_priv; 4666 struct cnic_uio_dev *udev = cp->udev; 4667 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + 4668 BCM_PAGE_SIZE); 4669 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) 4670 (udev->l2_ring + (2 * BCM_PAGE_SIZE)); 4671 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4672 int i; 4673 int port = CNIC_PORT(cp); 4674 u32 cli = cp->ethdev->iscsi_l2_client_id; 4675 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); 4676 u32 val; 4677 dma_addr_t ring_map = udev->l2_ring_map; 4678 4679 /* General data */ 4680 data->general.client_id = cli; 4681 data->general.statistics_en_flg = 1; 4682 data->general.statistics_counter_id = cli; 4683 data->general.activate_flg = 1; 4684 data->general.sp_client_id = cli; 4685 4686 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { 4687 dma_addr_t buf_map; 4688 int n = (i % cp->l2_rx_ring_size) + 1; 4689 4690 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); 4691 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 4692 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 4693 } 4694 4695 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; 4696 rxbd->addr_hi = cpu_to_le32(val); 4697 data->rx.bd_page_base.hi = cpu_to_le32(val); 4698 4699 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; 4700 rxbd->addr_lo = cpu_to_le32(val); 4701 data->rx.bd_page_base.lo = cpu_to_le32(val); 4702 4703 rxcqe += BNX2X_MAX_RCQ_DESC_CNT; 4704 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32; 4705 rxcqe->addr_hi = cpu_to_le32(val); 4706 data->rx.cqe_page_base.hi = cpu_to_le32(val); 4707 4708 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff; 4709 rxcqe->addr_lo = cpu_to_le32(val); 4710 data->rx.cqe_page_base.lo = cpu_to_le32(val); 4711 4712 /* Other ramrod params */ 4713 data->rx.client_qzone_id = cl_qzone_id; 4714 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS; 4715 data->rx.status_block_id = BNX2X_DEF_SB_ID; 4716 4717 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT; 4718 data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size); 4719 4720 data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); 4721 data->rx.outer_vlan_removal_enable_flg = 1; 4722 4723 /* reset tstorm and ustorm per client statistics */ 4724 if (cli < MAX_STAT_COUNTER_ID) { 4725 val = BAR_TSTRORM_INTMEM + 4726 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 4727 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) 4728 CNIC_WR(dev, val + i * 4, 0); 4729 4730 val = BAR_USTRORM_INTMEM + 4731 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 4732 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++) 4733 CNIC_WR(dev, val + i * 4, 0); 4734 } 4735 4736 cp->rx_cons_ptr = 4737 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS]; 4738 cp->rx_cons = *cp->rx_cons_ptr; 4739} 4740 4741static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) 4742{ 4743 struct cnic_local *cp = dev->cnic_priv; 4744 u32 pfid = cp->pfid; 4745 4746 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + 4747 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); 4748 cp->kcq1.sw_prod_idx = 0; 4749 4750 if (BNX2X_CHIP_IS_E2(cp->chip_id)) { 4751 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; 4752 4753 cp->kcq1.hw_prod_idx_ptr = 4754 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; 4755 cp->kcq1.status_idx_ptr = 4756 &sb->sb.running_index[SM_RX_ID]; 4757 } else { 4758 struct host_hc_status_block_e1x *sb = cp->status_blk.gen; 4759 4760 cp->kcq1.hw_prod_idx_ptr = 4761 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; 4762 cp->kcq1.status_idx_ptr = 4763 &sb->sb.running_index[SM_RX_ID]; 4764 } 4765 4766 if (BNX2X_CHIP_IS_E2(cp->chip_id)) { 4767 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; 4768 4769 cp->kcq2.io_addr = BAR_USTRORM_INTMEM + 4770 USTORM_FCOE_EQ_PROD_OFFSET(pfid); 4771 cp->kcq2.sw_prod_idx = 0; 4772 cp->kcq2.hw_prod_idx_ptr = 4773 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS]; 4774 cp->kcq2.status_idx_ptr = 4775 &sb->sb.running_index[SM_RX_ID]; 4776 } 4777} 4778 4779static int cnic_start_bnx2x_hw(struct cnic_dev *dev) 4780{ 4781 struct cnic_local *cp = dev->cnic_priv; 4782 struct cnic_eth_dev *ethdev = cp->ethdev; 4783 int func = CNIC_FUNC(cp), ret, i; 4784 u32 pfid; 4785 4786 if (BNX2X_CHIP_IS_E2(cp->chip_id)) { 4787 u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR); 4788 4789 if (!(val & 1)) 4790 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN); 4791 else 4792 val = (val >> 1) & 1; 4793 4794 if (val) 4795 cp->pfid = func >> 1; 4796 else 4797 cp->pfid = func & 0x6; 4798 } else { 4799 cp->pfid = func; 4800 } 4801 pfid = cp->pfid; 4802 4803 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, 4804 cp->iscsi_start_cid); 4805 4806 if (ret) 4807 return -ENOMEM; 4808 4809 if (BNX2X_CHIP_IS_E2(cp->chip_id)) { 4810 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, 4811 BNX2X_FCOE_NUM_CONNECTIONS, 4812 cp->fcoe_start_cid); 4813 4814 if (ret) 4815 return -ENOMEM; 4816 } 4817 4818 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2; 4819 4820 cnic_init_bnx2x_kcq(dev); 4821 4822 /* Only 1 EQ */ 4823 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); 4824 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4825 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0); 4826 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4827 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0), 4828 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff); 4829 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4830 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4, 4831 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32); 4832 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4833 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0), 4834 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff); 4835 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4836 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4, 4837 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32); 4838 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4839 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1); 4840 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 4841 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num); 4842 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4843 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0), 4844 HC_INDEX_ISCSI_EQ_CONS); 4845 4846 for (i = 0; i < cp->conn_buf_info.num_pages; i++) { 4847 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4848 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i), 4849 cp->conn_buf_info.pgtbl[2 * i]); 4850 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4851 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4, 4852 cp->conn_buf_info.pgtbl[(2 * i) + 1]); 4853 } 4854 4855 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4856 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid), 4857 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); 4858 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4859 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4, 4860 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); 4861 4862 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4863 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF); 4864 4865 cnic_setup_bnx2x_context(dev); 4866 4867 ret = cnic_init_bnx2x_irq(dev); 4868 if (ret) 4869 return ret; 4870 4871 return 0; 4872} 4873 4874static void cnic_init_rings(struct cnic_dev *dev) 4875{ 4876 struct cnic_local *cp = dev->cnic_priv; 4877 struct cnic_uio_dev *udev = cp->udev; 4878 4879 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 4880 return; 4881 4882 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 4883 cnic_init_bnx2_tx_ring(dev); 4884 cnic_init_bnx2_rx_ring(dev); 4885 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 4886 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 4887 u32 cli = cp->ethdev->iscsi_l2_client_id; 4888 u32 cid = cp->ethdev->iscsi_l2_cid; 4889 u32 cl_qzone_id; 4890 struct client_init_ramrod_data *data; 4891 union l5cm_specific_data l5_data; 4892 struct ustorm_eth_rx_producers rx_prods = {0}; 4893 u32 off, i; 4894 4895 rx_prods.bd_prod = 0; 4896 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; 4897 barrier(); 4898 4899 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); 4900 4901 off = BAR_USTRORM_INTMEM + 4902 (BNX2X_CHIP_IS_E2(cp->chip_id) ? 4903 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : 4904 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli)); 4905 4906 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) 4907 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); 4908 4909 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 4910 4911 data = udev->l2_buf; 4912 4913 memset(data, 0, sizeof(*data)); 4914 4915 cnic_init_bnx2x_tx_ring(dev, data); 4916 cnic_init_bnx2x_rx_ring(dev, data); 4917 4918 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff; 4919 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32; 4920 4921 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 4922 4923 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, 4924 cid, ETH_CONNECTION_TYPE, &l5_data); 4925 4926 i = 0; 4927 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 4928 ++i < 10) 4929 msleep(1); 4930 4931 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 4932 netdev_err(dev->netdev, 4933 "iSCSI CLIENT_SETUP did not complete\n"); 4934 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 4935 cnic_ring_ctl(dev, cid, cli, 1); 4936 } 4937} 4938 4939static void cnic_shutdown_rings(struct cnic_dev *dev) 4940{ 4941 struct cnic_local *cp = dev->cnic_priv; 4942 4943 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 4944 return; 4945 4946 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 4947 cnic_shutdown_bnx2_rx_ring(dev); 4948 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 4949 struct cnic_local *cp = dev->cnic_priv; 4950 u32 cli = cp->ethdev->iscsi_l2_client_id; 4951 u32 cid = cp->ethdev->iscsi_l2_cid; 4952 union l5cm_specific_data l5_data; 4953 int i; 4954 4955 cnic_ring_ctl(dev, cid, cli, 0); 4956 4957 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 4958 4959 l5_data.phy_address.lo = cli; 4960 l5_data.phy_address.hi = 0; 4961 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT, 4962 cid, ETH_CONNECTION_TYPE, &l5_data); 4963 i = 0; 4964 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 4965 ++i < 10) 4966 msleep(1); 4967 4968 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 4969 netdev_err(dev->netdev, 4970 "iSCSI CLIENT_HALT did not complete\n"); 4971 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 4972 4973 memset(&l5_data, 0, sizeof(l5_data)); 4974 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, 4975 cid, NONE_CONNECTION_TYPE, &l5_data); 4976 msleep(10); 4977 } 4978 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 4979} 4980 4981static int cnic_register_netdev(struct cnic_dev *dev) 4982{ 4983 struct cnic_local *cp = dev->cnic_priv; 4984 struct cnic_eth_dev *ethdev = cp->ethdev; 4985 int err; 4986 4987 if (!ethdev) 4988 return -ENODEV; 4989 4990 if (ethdev->drv_state & CNIC_DRV_STATE_REGD) 4991 return 0; 4992 4993 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); 4994 if (err) 4995 netdev_err(dev->netdev, "register_cnic failed\n"); 4996 4997 return err; 4998} 4999 5000static void cnic_unregister_netdev(struct cnic_dev *dev) 5001{ 5002 struct cnic_local *cp = dev->cnic_priv; 5003 struct cnic_eth_dev *ethdev = cp->ethdev; 5004 5005 if (!ethdev) 5006 return; 5007 5008 ethdev->drv_unregister_cnic(dev->netdev); 5009} 5010 5011static int cnic_start_hw(struct cnic_dev *dev) 5012{ 5013 struct cnic_local *cp = dev->cnic_priv; 5014 struct cnic_eth_dev *ethdev = cp->ethdev; 5015 int err; 5016 5017 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 5018 return -EALREADY; 5019 5020 dev->regview = ethdev->io_base; 5021 pci_dev_get(dev->pcidev); 5022 cp->func = PCI_FUNC(dev->pcidev->devfn); 5023 cp->status_blk.gen = ethdev->irq_arr[0].status_blk; 5024 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; 5025 5026 err = cp->alloc_resc(dev); 5027 if (err) { 5028 netdev_err(dev->netdev, "allocate resource failure\n"); 5029 goto err1; 5030 } 5031 5032 err = cp->start_hw(dev); 5033 if (err) 5034 goto err1; 5035 5036 err = cnic_cm_open(dev); 5037 if (err) 5038 goto err1; 5039 5040 set_bit(CNIC_F_CNIC_UP, &dev->flags); 5041 5042 cp->enable_int(dev); 5043 5044 return 0; 5045 5046err1: 5047 cp->free_resc(dev); 5048 pci_dev_put(dev->pcidev); 5049 return err; 5050} 5051 5052static void cnic_stop_bnx2_hw(struct cnic_dev *dev) 5053{ 5054 cnic_disable_bnx2_int_sync(dev); 5055 5056 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 5057 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 5058 5059 cnic_init_context(dev, KWQ_CID); 5060 cnic_init_context(dev, KCQ_CID); 5061 5062 cnic_setup_5709_context(dev, 0); 5063 cnic_free_irq(dev); 5064 5065 cnic_free_resc(dev); 5066} 5067 5068 5069static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) 5070{ 5071 struct cnic_local *cp = dev->cnic_priv; 5072 5073 cnic_free_irq(dev); 5074 *cp->kcq1.hw_prod_idx_ptr = 0; 5075 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5076 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0); 5077 CNIC_WR16(dev, cp->kcq1.io_addr, 0); 5078 cnic_free_resc(dev); 5079} 5080 5081static void cnic_stop_hw(struct cnic_dev *dev) 5082{ 5083 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 5084 struct cnic_local *cp = dev->cnic_priv; 5085 int i = 0; 5086 5087 /* Need to wait for the ring shutdown event to complete 5088 * before clearing the CNIC_UP flag. 5089 */ 5090 while (cp->udev->uio_dev != -1 && i < 15) { 5091 msleep(100); 5092 i++; 5093 } 5094 cnic_shutdown_rings(dev); 5095 clear_bit(CNIC_F_CNIC_UP, &dev->flags); 5096 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); 5097 synchronize_rcu(); 5098 cnic_cm_shutdown(dev); 5099 cp->stop_hw(dev); 5100 pci_dev_put(dev->pcidev); 5101 } 5102} 5103 5104static void cnic_free_dev(struct cnic_dev *dev) 5105{ 5106 int i = 0; 5107 5108 while ((atomic_read(&dev->ref_count) != 0) && i < 10) { 5109 msleep(100); 5110 i++; 5111 } 5112 if (atomic_read(&dev->ref_count) != 0) 5113 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n"); 5114 5115 netdev_info(dev->netdev, "Removed CNIC device\n"); 5116 dev_put(dev->netdev); 5117 kfree(dev); 5118} 5119 5120static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, 5121 struct pci_dev *pdev) 5122{ 5123 struct cnic_dev *cdev; 5124 struct cnic_local *cp; 5125 int alloc_size; 5126 5127 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local); 5128 5129 cdev = kzalloc(alloc_size , GFP_KERNEL); 5130 if (cdev == NULL) { 5131 netdev_err(dev, "allocate dev struct failure\n"); 5132 return NULL; 5133 } 5134 5135 cdev->netdev = dev; 5136 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev); 5137 cdev->register_device = cnic_register_device; 5138 cdev->unregister_device = cnic_unregister_device; 5139 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; 5140 5141 cp = cdev->cnic_priv; 5142 cp->dev = cdev; 5143 cp->l2_single_buf_size = 0x400; 5144 cp->l2_rx_ring_size = 3; 5145 5146 spin_lock_init(&cp->cnic_ulp_lock); 5147 5148 netdev_info(dev, "Added CNIC device\n"); 5149 5150 return cdev; 5151} 5152 5153static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) 5154{ 5155 struct pci_dev *pdev; 5156 struct cnic_dev *cdev; 5157 struct cnic_local *cp; 5158 struct cnic_eth_dev *ethdev = NULL; 5159 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; 5160 5161 probe = symbol_get(bnx2_cnic_probe); 5162 if (probe) { 5163 ethdev = (*probe)(dev); 5164 symbol_put(bnx2_cnic_probe); 5165 } 5166 if (!ethdev) 5167 return NULL; 5168 5169 pdev = ethdev->pdev; 5170 if (!pdev) 5171 return NULL; 5172 5173 dev_hold(dev); 5174 pci_dev_get(pdev); 5175 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 || 5176 pdev->device == PCI_DEVICE_ID_NX2_5709S) && 5177 (pdev->revision < 0x10)) { 5178 pci_dev_put(pdev); 5179 goto cnic_err; 5180 } 5181 pci_dev_put(pdev); 5182 5183 cdev = cnic_alloc_dev(dev, pdev); 5184 if (cdev == NULL) 5185 goto cnic_err; 5186 5187 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags); 5188 cdev->submit_kwqes = cnic_submit_bnx2_kwqes; 5189 5190 cp = cdev->cnic_priv; 5191 cp->ethdev = ethdev; 5192 cdev->pcidev = pdev; 5193 cp->chip_id = ethdev->chip_id; 5194 5195 cp->cnic_ops = &cnic_bnx2_ops; 5196 cp->start_hw = cnic_start_bnx2_hw; 5197 cp->stop_hw = cnic_stop_bnx2_hw; 5198 cp->setup_pgtbl = cnic_setup_page_tbl; 5199 cp->alloc_resc = cnic_alloc_bnx2_resc; 5200 cp->free_resc = cnic_free_resc; 5201 cp->start_cm = cnic_cm_init_bnx2_hw; 5202 cp->stop_cm = cnic_cm_stop_bnx2_hw; 5203 cp->enable_int = cnic_enable_bnx2_int; 5204 cp->disable_int_sync = cnic_disable_bnx2_int_sync; 5205 cp->close_conn = cnic_close_bnx2_conn; 5206 cp->next_idx = cnic_bnx2_next_idx; 5207 cp->hw_idx = cnic_bnx2_hw_idx; 5208 return cdev; 5209 5210cnic_err: 5211 dev_put(dev); 5212 return NULL; 5213} 5214 5215static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) 5216{ 5217 struct pci_dev *pdev; 5218 struct cnic_dev *cdev; 5219 struct cnic_local *cp; 5220 struct cnic_eth_dev *ethdev = NULL; 5221 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; 5222 5223 probe = symbol_get(bnx2x_cnic_probe); 5224 if (probe) { 5225 ethdev = (*probe)(dev); 5226 symbol_put(bnx2x_cnic_probe); 5227 } 5228 if (!ethdev) 5229 return NULL; 5230 5231 pdev = ethdev->pdev; 5232 if (!pdev) 5233 return NULL; 5234 5235 dev_hold(dev); 5236 cdev = cnic_alloc_dev(dev, pdev); 5237 if (cdev == NULL) { 5238 dev_put(dev); 5239 return NULL; 5240 } 5241 5242 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags); 5243 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes; 5244 5245 cp = cdev->cnic_priv; 5246 cp->ethdev = ethdev; 5247 cdev->pcidev = pdev; 5248 cp->chip_id = ethdev->chip_id; 5249 5250 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) 5251 cdev->max_iscsi_conn = ethdev->max_iscsi_conn; 5252 if (BNX2X_CHIP_IS_E2(cp->chip_id) && 5253 !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE)) 5254 cdev->max_fcoe_conn = ethdev->max_fcoe_conn; 5255 5256 memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6); 5257 5258 cp->cnic_ops = &cnic_bnx2x_ops; 5259 cp->start_hw = cnic_start_bnx2x_hw; 5260 cp->stop_hw = cnic_stop_bnx2x_hw; 5261 cp->setup_pgtbl = cnic_setup_page_tbl_le; 5262 cp->alloc_resc = cnic_alloc_bnx2x_resc; 5263 cp->free_resc = cnic_free_resc; 5264 cp->start_cm = cnic_cm_init_bnx2x_hw; 5265 cp->stop_cm = cnic_cm_stop_bnx2x_hw; 5266 cp->enable_int = cnic_enable_bnx2x_int; 5267 cp->disable_int_sync = cnic_disable_bnx2x_int_sync; 5268 if (BNX2X_CHIP_IS_E2(cp->chip_id)) 5269 cp->ack_int = cnic_ack_bnx2x_e2_msix; 5270 else 5271 cp->ack_int = cnic_ack_bnx2x_msix; 5272 cp->close_conn = cnic_close_bnx2x_conn; 5273 cp->next_idx = cnic_bnx2x_next_idx; 5274 cp->hw_idx = cnic_bnx2x_hw_idx; 5275 return cdev; 5276} 5277 5278static struct cnic_dev *is_cnic_dev(struct net_device *dev) 5279{ 5280 struct ethtool_drvinfo drvinfo; 5281 struct cnic_dev *cdev = NULL; 5282 5283 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) { 5284 memset(&drvinfo, 0, sizeof(drvinfo)); 5285 dev->ethtool_ops->get_drvinfo(dev, &drvinfo); 5286 5287 if (!strcmp(drvinfo.driver, "bnx2")) 5288 cdev = init_bnx2_cnic(dev); 5289 if (!strcmp(drvinfo.driver, "bnx2x")) 5290 cdev = init_bnx2x_cnic(dev); 5291 if (cdev) { 5292 write_lock(&cnic_dev_lock); 5293 list_add(&cdev->list, &cnic_dev_list); 5294 write_unlock(&cnic_dev_lock); 5295 } 5296 } 5297 return cdev; 5298} 5299 5300/** 5301 * netdev event handler 5302 */ 5303static int cnic_netdev_event(struct notifier_block *this, unsigned long event, 5304 void *ptr) 5305{ 5306 struct net_device *netdev = ptr; 5307 struct cnic_dev *dev; 5308 int if_type; 5309 int new_dev = 0; 5310 5311 dev = cnic_from_netdev(netdev); 5312 5313 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) { 5314 /* Check for the hot-plug device */ 5315 dev = is_cnic_dev(netdev); 5316 if (dev) { 5317 new_dev = 1; 5318 cnic_hold(dev); 5319 } 5320 } 5321 if (dev) { 5322 struct cnic_local *cp = dev->cnic_priv; 5323 5324 if (new_dev) 5325 cnic_ulp_init(dev); 5326 else if (event == NETDEV_UNREGISTER) 5327 cnic_ulp_exit(dev); 5328 5329 if (event == NETDEV_UP) { 5330 if (cnic_register_netdev(dev) != 0) { 5331 cnic_put(dev); 5332 goto done; 5333 } 5334 if (!cnic_start_hw(dev)) 5335 cnic_ulp_start(dev); 5336 } 5337 5338 rcu_read_lock(); 5339 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 5340 struct cnic_ulp_ops *ulp_ops; 5341 void *ctx; 5342 5343 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 5344 if (!ulp_ops || !ulp_ops->indicate_netevent) 5345 continue; 5346 5347 ctx = cp->ulp_handle[if_type]; 5348 5349 ulp_ops->indicate_netevent(ctx, event); 5350 } 5351 rcu_read_unlock(); 5352 5353 if (event == NETDEV_GOING_DOWN) { 5354 cnic_ulp_stop(dev); 5355 cnic_stop_hw(dev); 5356 cnic_unregister_netdev(dev); 5357 } else if (event == NETDEV_UNREGISTER) { 5358 write_lock(&cnic_dev_lock); 5359 list_del_init(&dev->list); 5360 write_unlock(&cnic_dev_lock); 5361 5362 cnic_put(dev); 5363 cnic_free_dev(dev); 5364 goto done; 5365 } 5366 cnic_put(dev); 5367 } 5368done: 5369 return NOTIFY_DONE; 5370} 5371 5372static struct notifier_block cnic_netdev_notifier = { 5373 .notifier_call = cnic_netdev_event 5374}; 5375 5376static void cnic_release(void) 5377{ 5378 struct cnic_dev *dev; 5379 struct cnic_uio_dev *udev; 5380 5381 while (!list_empty(&cnic_dev_list)) { 5382 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); 5383 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 5384 cnic_ulp_stop(dev); 5385 cnic_stop_hw(dev); 5386 } 5387 5388 cnic_ulp_exit(dev); 5389 cnic_unregister_netdev(dev); 5390 list_del_init(&dev->list); 5391 cnic_free_dev(dev); 5392 } 5393 while (!list_empty(&cnic_udev_list)) { 5394 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev, 5395 list); 5396 cnic_free_uio(udev); 5397 } 5398} 5399 5400static int __init cnic_init(void) 5401{ 5402 int rc = 0; 5403 5404 pr_info("%s", version); 5405 5406 rc = register_netdevice_notifier(&cnic_netdev_notifier); 5407 if (rc) { 5408 cnic_release(); 5409 return rc; 5410 } 5411 5412 cnic_wq = create_singlethread_workqueue("cnic_wq"); 5413 if (!cnic_wq) { 5414 cnic_release(); 5415 unregister_netdevice_notifier(&cnic_netdev_notifier); 5416 return -ENOMEM; 5417 } 5418 5419 return 0; 5420} 5421 5422static void __exit cnic_exit(void) 5423{ 5424 unregister_netdevice_notifier(&cnic_netdev_notifier); 5425 cnic_release(); 5426 destroy_workqueue(cnic_wq); 5427} 5428 5429module_init(cnic_init); 5430module_exit(cnic_exit);