Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.37-rc4 4974 lines 129 kB view raw
1/* cnic.c: Broadcom CNIC core network driver. 2 * 3 * Copyright (c) 2006-2010 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) 10 * Modified and maintained by: Michael Chan <mchan@broadcom.com> 11 */ 12 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15#include <linux/module.h> 16 17#include <linux/kernel.h> 18#include <linux/errno.h> 19#include <linux/list.h> 20#include <linux/slab.h> 21#include <linux/pci.h> 22#include <linux/init.h> 23#include <linux/netdevice.h> 24#include <linux/uio_driver.h> 25#include <linux/in.h> 26#include <linux/dma-mapping.h> 27#include <linux/delay.h> 28#include <linux/ethtool.h> 29#include <linux/if_vlan.h> 30#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 31#define BCM_VLAN 1 32#endif 33#include <net/ip.h> 34#include <net/tcp.h> 35#include <net/route.h> 36#include <net/ipv6.h> 37#include <net/ip6_route.h> 38#include <net/ip6_checksum.h> 39#include <scsi/iscsi_if.h> 40 41#include "cnic_if.h" 42#include "bnx2.h" 43#include "bnx2x/bnx2x_reg.h" 44#include "bnx2x/bnx2x_fw_defs.h" 45#include "bnx2x/bnx2x_hsi.h" 46#include "../scsi/bnx2i/57xx_iscsi_constants.h" 47#include "../scsi/bnx2i/57xx_iscsi_hsi.h" 48#include "cnic.h" 49#include "cnic_defs.h" 50 51#define DRV_MODULE_NAME "cnic" 52 53static char version[] __devinitdata = 54 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; 55 56MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " 57 "Chen (zongxi@broadcom.com"); 58MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver"); 59MODULE_LICENSE("GPL"); 60MODULE_VERSION(CNIC_MODULE_VERSION); 61 62static LIST_HEAD(cnic_dev_list); 63static LIST_HEAD(cnic_udev_list); 64static DEFINE_RWLOCK(cnic_dev_lock); 65static DEFINE_MUTEX(cnic_lock); 66 67static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; 68 69static int cnic_service_bnx2(void *, void *); 70static int cnic_service_bnx2x(void *, void *); 71static int cnic_ctl(void *, struct cnic_ctl_info *); 72 73static struct cnic_ops cnic_bnx2_ops = { 74 .cnic_owner = THIS_MODULE, 75 .cnic_handler = cnic_service_bnx2, 76 .cnic_ctl = cnic_ctl, 77}; 78 79static struct cnic_ops cnic_bnx2x_ops = { 80 .cnic_owner = THIS_MODULE, 81 .cnic_handler = cnic_service_bnx2x, 82 .cnic_ctl = cnic_ctl, 83}; 84 85static struct workqueue_struct *cnic_wq; 86 87static void cnic_shutdown_rings(struct cnic_dev *); 88static void cnic_init_rings(struct cnic_dev *); 89static int cnic_cm_set_pg(struct cnic_sock *); 90 91static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) 92{ 93 struct cnic_uio_dev *udev = uinfo->priv; 94 struct cnic_dev *dev; 95 96 if (!capable(CAP_NET_ADMIN)) 97 return -EPERM; 98 99 if (udev->uio_dev != -1) 100 return -EBUSY; 101 102 rtnl_lock(); 103 dev = udev->dev; 104 105 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 106 rtnl_unlock(); 107 return -ENODEV; 108 } 109 110 udev->uio_dev = iminor(inode); 111 112 cnic_shutdown_rings(dev); 113 cnic_init_rings(dev); 114 rtnl_unlock(); 115 116 return 0; 117} 118 119static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) 120{ 121 struct cnic_uio_dev *udev = uinfo->priv; 122 123 udev->uio_dev = -1; 124 return 0; 125} 126 127static inline void cnic_hold(struct cnic_dev *dev) 128{ 129 atomic_inc(&dev->ref_count); 130} 131 132static inline void cnic_put(struct cnic_dev *dev) 133{ 134 atomic_dec(&dev->ref_count); 135} 136 137static inline void csk_hold(struct cnic_sock *csk) 138{ 139 atomic_inc(&csk->ref_count); 140} 141 142static inline void csk_put(struct cnic_sock *csk) 143{ 144 atomic_dec(&csk->ref_count); 145} 146 147static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) 148{ 149 struct cnic_dev *cdev; 150 151 read_lock(&cnic_dev_lock); 152 list_for_each_entry(cdev, &cnic_dev_list, list) { 153 if (netdev == cdev->netdev) { 154 cnic_hold(cdev); 155 read_unlock(&cnic_dev_lock); 156 return cdev; 157 } 158 } 159 read_unlock(&cnic_dev_lock); 160 return NULL; 161} 162 163static inline void ulp_get(struct cnic_ulp_ops *ulp_ops) 164{ 165 atomic_inc(&ulp_ops->ref_count); 166} 167 168static inline void ulp_put(struct cnic_ulp_ops *ulp_ops) 169{ 170 atomic_dec(&ulp_ops->ref_count); 171} 172 173static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) 174{ 175 struct cnic_local *cp = dev->cnic_priv; 176 struct cnic_eth_dev *ethdev = cp->ethdev; 177 struct drv_ctl_info info; 178 struct drv_ctl_io *io = &info.data.io; 179 180 info.cmd = DRV_CTL_CTX_WR_CMD; 181 io->cid_addr = cid_addr; 182 io->offset = off; 183 io->data = val; 184 ethdev->drv_ctl(dev->netdev, &info); 185} 186 187static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr) 188{ 189 struct cnic_local *cp = dev->cnic_priv; 190 struct cnic_eth_dev *ethdev = cp->ethdev; 191 struct drv_ctl_info info; 192 struct drv_ctl_io *io = &info.data.io; 193 194 info.cmd = DRV_CTL_CTXTBL_WR_CMD; 195 io->offset = off; 196 io->dma_addr = addr; 197 ethdev->drv_ctl(dev->netdev, &info); 198} 199 200static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) 201{ 202 struct cnic_local *cp = dev->cnic_priv; 203 struct cnic_eth_dev *ethdev = cp->ethdev; 204 struct drv_ctl_info info; 205 struct drv_ctl_l2_ring *ring = &info.data.ring; 206 207 if (start) 208 info.cmd = DRV_CTL_START_L2_CMD; 209 else 210 info.cmd = DRV_CTL_STOP_L2_CMD; 211 212 ring->cid = cid; 213 ring->client_id = cl_id; 214 ethdev->drv_ctl(dev->netdev, &info); 215} 216 217static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) 218{ 219 struct cnic_local *cp = dev->cnic_priv; 220 struct cnic_eth_dev *ethdev = cp->ethdev; 221 struct drv_ctl_info info; 222 struct drv_ctl_io *io = &info.data.io; 223 224 info.cmd = DRV_CTL_IO_WR_CMD; 225 io->offset = off; 226 io->data = val; 227 ethdev->drv_ctl(dev->netdev, &info); 228} 229 230static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) 231{ 232 struct cnic_local *cp = dev->cnic_priv; 233 struct cnic_eth_dev *ethdev = cp->ethdev; 234 struct drv_ctl_info info; 235 struct drv_ctl_io *io = &info.data.io; 236 237 info.cmd = DRV_CTL_IO_RD_CMD; 238 io->offset = off; 239 ethdev->drv_ctl(dev->netdev, &info); 240 return io->data; 241} 242 243static int cnic_in_use(struct cnic_sock *csk) 244{ 245 return test_bit(SK_F_INUSE, &csk->flags); 246} 247 248static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count) 249{ 250 struct cnic_local *cp = dev->cnic_priv; 251 struct cnic_eth_dev *ethdev = cp->ethdev; 252 struct drv_ctl_info info; 253 254 info.cmd = cmd; 255 info.data.credit.credit_count = count; 256 ethdev->drv_ctl(dev->netdev, &info); 257} 258 259static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) 260{ 261 u32 i; 262 263 for (i = 0; i < cp->max_cid_space; i++) { 264 if (cp->ctx_tbl[i].cid == cid) { 265 *l5_cid = i; 266 return 0; 267 } 268 } 269 return -EINVAL; 270} 271 272static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, 273 struct cnic_sock *csk) 274{ 275 struct iscsi_path path_req; 276 char *buf = NULL; 277 u16 len = 0; 278 u32 msg_type = ISCSI_KEVENT_IF_DOWN; 279 struct cnic_ulp_ops *ulp_ops; 280 struct cnic_uio_dev *udev = cp->udev; 281 282 if (!udev || udev->uio_dev == -1) 283 return -ENODEV; 284 285 if (csk) { 286 len = sizeof(path_req); 287 buf = (char *) &path_req; 288 memset(&path_req, 0, len); 289 290 msg_type = ISCSI_KEVENT_PATH_REQ; 291 path_req.handle = (u64) csk->l5_cid; 292 if (test_bit(SK_F_IPV6, &csk->flags)) { 293 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0], 294 sizeof(struct in6_addr)); 295 path_req.ip_addr_len = 16; 296 } else { 297 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0], 298 sizeof(struct in_addr)); 299 path_req.ip_addr_len = 4; 300 } 301 path_req.vlan_id = csk->vlan_id; 302 path_req.pmtu = csk->mtu; 303 } 304 305 rcu_read_lock(); 306 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); 307 if (ulp_ops) 308 ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len); 309 rcu_read_unlock(); 310 return 0; 311} 312 313static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, 314 char *buf, u16 len) 315{ 316 int rc = -EINVAL; 317 318 switch (msg_type) { 319 case ISCSI_UEVENT_PATH_UPDATE: { 320 struct cnic_local *cp; 321 u32 l5_cid; 322 struct cnic_sock *csk; 323 struct iscsi_path *path_resp; 324 325 if (len < sizeof(*path_resp)) 326 break; 327 328 path_resp = (struct iscsi_path *) buf; 329 cp = dev->cnic_priv; 330 l5_cid = (u32) path_resp->handle; 331 if (l5_cid >= MAX_CM_SK_TBL_SZ) 332 break; 333 334 rcu_read_lock(); 335 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) { 336 rc = -ENODEV; 337 rcu_read_unlock(); 338 break; 339 } 340 csk = &cp->csk_tbl[l5_cid]; 341 csk_hold(csk); 342 if (cnic_in_use(csk)) { 343 memcpy(csk->ha, path_resp->mac_addr, 6); 344 if (test_bit(SK_F_IPV6, &csk->flags)) 345 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, 346 sizeof(struct in6_addr)); 347 else 348 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, 349 sizeof(struct in_addr)); 350 if (is_valid_ether_addr(csk->ha)) 351 cnic_cm_set_pg(csk); 352 } 353 csk_put(csk); 354 rcu_read_unlock(); 355 rc = 0; 356 } 357 } 358 359 return rc; 360} 361 362static int cnic_offld_prep(struct cnic_sock *csk) 363{ 364 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 365 return 0; 366 367 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) { 368 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 369 return 0; 370 } 371 372 return 1; 373} 374 375static int cnic_close_prep(struct cnic_sock *csk) 376{ 377 clear_bit(SK_F_CONNECT_START, &csk->flags); 378 smp_mb__after_clear_bit(); 379 380 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 381 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 382 msleep(1); 383 384 return 1; 385 } 386 return 0; 387} 388 389static int cnic_abort_prep(struct cnic_sock *csk) 390{ 391 clear_bit(SK_F_CONNECT_START, &csk->flags); 392 smp_mb__after_clear_bit(); 393 394 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 395 msleep(1); 396 397 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 398 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; 399 return 1; 400 } 401 402 return 0; 403} 404 405static void cnic_uio_stop(void) 406{ 407 struct cnic_dev *dev; 408 409 read_lock(&cnic_dev_lock); 410 list_for_each_entry(dev, &cnic_dev_list, list) { 411 struct cnic_local *cp = dev->cnic_priv; 412 413 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 414 } 415 read_unlock(&cnic_dev_lock); 416} 417 418int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) 419{ 420 struct cnic_dev *dev; 421 422 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 423 pr_err("%s: Bad type %d\n", __func__, ulp_type); 424 return -EINVAL; 425 } 426 mutex_lock(&cnic_lock); 427 if (cnic_ulp_tbl[ulp_type]) { 428 pr_err("%s: Type %d has already been registered\n", 429 __func__, ulp_type); 430 mutex_unlock(&cnic_lock); 431 return -EBUSY; 432 } 433 434 read_lock(&cnic_dev_lock); 435 list_for_each_entry(dev, &cnic_dev_list, list) { 436 struct cnic_local *cp = dev->cnic_priv; 437 438 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); 439 } 440 read_unlock(&cnic_dev_lock); 441 442 atomic_set(&ulp_ops->ref_count, 0); 443 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); 444 mutex_unlock(&cnic_lock); 445 446 /* Prevent race conditions with netdev_event */ 447 rtnl_lock(); 448 read_lock(&cnic_dev_lock); 449 list_for_each_entry(dev, &cnic_dev_list, list) { 450 struct cnic_local *cp = dev->cnic_priv; 451 452 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) 453 ulp_ops->cnic_init(dev); 454 } 455 read_unlock(&cnic_dev_lock); 456 rtnl_unlock(); 457 458 return 0; 459} 460 461int cnic_unregister_driver(int ulp_type) 462{ 463 struct cnic_dev *dev; 464 struct cnic_ulp_ops *ulp_ops; 465 int i = 0; 466 467 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 468 pr_err("%s: Bad type %d\n", __func__, ulp_type); 469 return -EINVAL; 470 } 471 mutex_lock(&cnic_lock); 472 ulp_ops = cnic_ulp_tbl[ulp_type]; 473 if (!ulp_ops) { 474 pr_err("%s: Type %d has not been registered\n", 475 __func__, ulp_type); 476 goto out_unlock; 477 } 478 read_lock(&cnic_dev_lock); 479 list_for_each_entry(dev, &cnic_dev_list, list) { 480 struct cnic_local *cp = dev->cnic_priv; 481 482 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 483 pr_err("%s: Type %d still has devices registered\n", 484 __func__, ulp_type); 485 read_unlock(&cnic_dev_lock); 486 goto out_unlock; 487 } 488 } 489 read_unlock(&cnic_dev_lock); 490 491 if (ulp_type == CNIC_ULP_ISCSI) 492 cnic_uio_stop(); 493 494 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); 495 496 mutex_unlock(&cnic_lock); 497 synchronize_rcu(); 498 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) { 499 msleep(100); 500 i++; 501 } 502 503 if (atomic_read(&ulp_ops->ref_count) != 0) 504 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n"); 505 return 0; 506 507out_unlock: 508 mutex_unlock(&cnic_lock); 509 return -EINVAL; 510} 511 512static int cnic_start_hw(struct cnic_dev *); 513static void cnic_stop_hw(struct cnic_dev *); 514 515static int cnic_register_device(struct cnic_dev *dev, int ulp_type, 516 void *ulp_ctx) 517{ 518 struct cnic_local *cp = dev->cnic_priv; 519 struct cnic_ulp_ops *ulp_ops; 520 521 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 522 pr_err("%s: Bad type %d\n", __func__, ulp_type); 523 return -EINVAL; 524 } 525 mutex_lock(&cnic_lock); 526 if (cnic_ulp_tbl[ulp_type] == NULL) { 527 pr_err("%s: Driver with type %d has not been registered\n", 528 __func__, ulp_type); 529 mutex_unlock(&cnic_lock); 530 return -EAGAIN; 531 } 532 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 533 pr_err("%s: Type %d has already been registered to this device\n", 534 __func__, ulp_type); 535 mutex_unlock(&cnic_lock); 536 return -EBUSY; 537 } 538 539 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); 540 cp->ulp_handle[ulp_type] = ulp_ctx; 541 ulp_ops = cnic_ulp_tbl[ulp_type]; 542 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); 543 cnic_hold(dev); 544 545 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 546 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) 547 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); 548 549 mutex_unlock(&cnic_lock); 550 551 return 0; 552 553} 554EXPORT_SYMBOL(cnic_register_driver); 555 556static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) 557{ 558 struct cnic_local *cp = dev->cnic_priv; 559 int i = 0; 560 561 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 562 pr_err("%s: Bad type %d\n", __func__, ulp_type); 563 return -EINVAL; 564 } 565 mutex_lock(&cnic_lock); 566 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 567 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL); 568 cnic_put(dev); 569 } else { 570 pr_err("%s: device not registered to this ulp type %d\n", 571 __func__, ulp_type); 572 mutex_unlock(&cnic_lock); 573 return -EINVAL; 574 } 575 mutex_unlock(&cnic_lock); 576 577 synchronize_rcu(); 578 579 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && 580 i < 20) { 581 msleep(100); 582 i++; 583 } 584 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) 585 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n"); 586 587 return 0; 588} 589EXPORT_SYMBOL(cnic_unregister_driver); 590 591static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id) 592{ 593 id_tbl->start = start_id; 594 id_tbl->max = size; 595 id_tbl->next = 0; 596 spin_lock_init(&id_tbl->lock); 597 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); 598 if (!id_tbl->table) 599 return -ENOMEM; 600 601 return 0; 602} 603 604static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl) 605{ 606 kfree(id_tbl->table); 607 id_tbl->table = NULL; 608} 609 610static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id) 611{ 612 int ret = -1; 613 614 id -= id_tbl->start; 615 if (id >= id_tbl->max) 616 return ret; 617 618 spin_lock(&id_tbl->lock); 619 if (!test_bit(id, id_tbl->table)) { 620 set_bit(id, id_tbl->table); 621 ret = 0; 622 } 623 spin_unlock(&id_tbl->lock); 624 return ret; 625} 626 627/* Returns -1 if not successful */ 628static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl) 629{ 630 u32 id; 631 632 spin_lock(&id_tbl->lock); 633 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); 634 if (id >= id_tbl->max) { 635 id = -1; 636 if (id_tbl->next != 0) { 637 id = find_first_zero_bit(id_tbl->table, id_tbl->next); 638 if (id >= id_tbl->next) 639 id = -1; 640 } 641 } 642 643 if (id < id_tbl->max) { 644 set_bit(id, id_tbl->table); 645 id_tbl->next = (id + 1) & (id_tbl->max - 1); 646 id += id_tbl->start; 647 } 648 649 spin_unlock(&id_tbl->lock); 650 651 return id; 652} 653 654static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id) 655{ 656 if (id == -1) 657 return; 658 659 id -= id_tbl->start; 660 if (id >= id_tbl->max) 661 return; 662 663 clear_bit(id, id_tbl->table); 664} 665 666static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) 667{ 668 int i; 669 670 if (!dma->pg_arr) 671 return; 672 673 for (i = 0; i < dma->num_pages; i++) { 674 if (dma->pg_arr[i]) { 675 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE, 676 dma->pg_arr[i], dma->pg_map_arr[i]); 677 dma->pg_arr[i] = NULL; 678 } 679 } 680 if (dma->pgtbl) { 681 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size, 682 dma->pgtbl, dma->pgtbl_map); 683 dma->pgtbl = NULL; 684 } 685 kfree(dma->pg_arr); 686 dma->pg_arr = NULL; 687 dma->num_pages = 0; 688} 689 690static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) 691{ 692 int i; 693 u32 *page_table = dma->pgtbl; 694 695 for (i = 0; i < dma->num_pages; i++) { 696 /* Each entry needs to be in big endian format. */ 697 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 698 page_table++; 699 *page_table = (u32) dma->pg_map_arr[i]; 700 page_table++; 701 } 702} 703 704static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) 705{ 706 int i; 707 u32 *page_table = dma->pgtbl; 708 709 for (i = 0; i < dma->num_pages; i++) { 710 /* Each entry needs to be in little endian format. */ 711 *page_table = dma->pg_map_arr[i] & 0xffffffff; 712 page_table++; 713 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 714 page_table++; 715 } 716} 717 718static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, 719 int pages, int use_pg_tbl) 720{ 721 int i, size; 722 struct cnic_local *cp = dev->cnic_priv; 723 724 size = pages * (sizeof(void *) + sizeof(dma_addr_t)); 725 dma->pg_arr = kzalloc(size, GFP_ATOMIC); 726 if (dma->pg_arr == NULL) 727 return -ENOMEM; 728 729 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); 730 dma->num_pages = pages; 731 732 for (i = 0; i < pages; i++) { 733 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, 734 BCM_PAGE_SIZE, 735 &dma->pg_map_arr[i], 736 GFP_ATOMIC); 737 if (dma->pg_arr[i] == NULL) 738 goto error; 739 } 740 if (!use_pg_tbl) 741 return 0; 742 743 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & 744 ~(BCM_PAGE_SIZE - 1); 745 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, 746 &dma->pgtbl_map, GFP_ATOMIC); 747 if (dma->pgtbl == NULL) 748 goto error; 749 750 cp->setup_pgtbl(dev, dma); 751 752 return 0; 753 754error: 755 cnic_free_dma(dev, dma); 756 return -ENOMEM; 757} 758 759static void cnic_free_context(struct cnic_dev *dev) 760{ 761 struct cnic_local *cp = dev->cnic_priv; 762 int i; 763 764 for (i = 0; i < cp->ctx_blks; i++) { 765 if (cp->ctx_arr[i].ctx) { 766 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size, 767 cp->ctx_arr[i].ctx, 768 cp->ctx_arr[i].mapping); 769 cp->ctx_arr[i].ctx = NULL; 770 } 771 } 772} 773 774static void __cnic_free_uio(struct cnic_uio_dev *udev) 775{ 776 uio_unregister_device(&udev->cnic_uinfo); 777 778 if (udev->l2_buf) { 779 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size, 780 udev->l2_buf, udev->l2_buf_map); 781 udev->l2_buf = NULL; 782 } 783 784 if (udev->l2_ring) { 785 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, 786 udev->l2_ring, udev->l2_ring_map); 787 udev->l2_ring = NULL; 788 } 789 790 pci_dev_put(udev->pdev); 791 kfree(udev); 792} 793 794static void cnic_free_uio(struct cnic_uio_dev *udev) 795{ 796 if (!udev) 797 return; 798 799 write_lock(&cnic_dev_lock); 800 list_del_init(&udev->list); 801 write_unlock(&cnic_dev_lock); 802 __cnic_free_uio(udev); 803} 804 805static void cnic_free_resc(struct cnic_dev *dev) 806{ 807 struct cnic_local *cp = dev->cnic_priv; 808 struct cnic_uio_dev *udev = cp->udev; 809 810 if (udev) { 811 udev->dev = NULL; 812 cp->udev = NULL; 813 } 814 815 cnic_free_context(dev); 816 kfree(cp->ctx_arr); 817 cp->ctx_arr = NULL; 818 cp->ctx_blks = 0; 819 820 cnic_free_dma(dev, &cp->gbl_buf_info); 821 cnic_free_dma(dev, &cp->conn_buf_info); 822 cnic_free_dma(dev, &cp->kwq_info); 823 cnic_free_dma(dev, &cp->kwq_16_data_info); 824 cnic_free_dma(dev, &cp->kcq1.dma); 825 kfree(cp->iscsi_tbl); 826 cp->iscsi_tbl = NULL; 827 kfree(cp->ctx_tbl); 828 cp->ctx_tbl = NULL; 829 830 cnic_free_id_tbl(&cp->cid_tbl); 831} 832 833static int cnic_alloc_context(struct cnic_dev *dev) 834{ 835 struct cnic_local *cp = dev->cnic_priv; 836 837 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 838 int i, k, arr_size; 839 840 cp->ctx_blk_size = BCM_PAGE_SIZE; 841 cp->cids_per_blk = BCM_PAGE_SIZE / 128; 842 arr_size = BNX2_MAX_CID / cp->cids_per_blk * 843 sizeof(struct cnic_ctx); 844 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); 845 if (cp->ctx_arr == NULL) 846 return -ENOMEM; 847 848 k = 0; 849 for (i = 0; i < 2; i++) { 850 u32 j, reg, off, lo, hi; 851 852 if (i == 0) 853 off = BNX2_PG_CTX_MAP; 854 else 855 off = BNX2_ISCSI_CTX_MAP; 856 857 reg = cnic_reg_rd_ind(dev, off); 858 lo = reg >> 16; 859 hi = reg & 0xffff; 860 for (j = lo; j < hi; j += cp->cids_per_blk, k++) 861 cp->ctx_arr[k].cid = j; 862 } 863 864 cp->ctx_blks = k; 865 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { 866 cp->ctx_blks = 0; 867 return -ENOMEM; 868 } 869 870 for (i = 0; i < cp->ctx_blks; i++) { 871 cp->ctx_arr[i].ctx = 872 dma_alloc_coherent(&dev->pcidev->dev, 873 BCM_PAGE_SIZE, 874 &cp->ctx_arr[i].mapping, 875 GFP_KERNEL); 876 if (cp->ctx_arr[i].ctx == NULL) 877 return -ENOMEM; 878 } 879 } 880 return 0; 881} 882 883static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info) 884{ 885 int err, i, is_bnx2 = 0; 886 struct kcqe **kcq; 887 888 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) 889 is_bnx2 = 1; 890 891 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2); 892 if (err) 893 return err; 894 895 kcq = (struct kcqe **) info->dma.pg_arr; 896 info->kcq = kcq; 897 898 if (is_bnx2) 899 return 0; 900 901 for (i = 0; i < KCQ_PAGE_CNT; i++) { 902 struct bnx2x_bd_chain_next *next = 903 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT]; 904 int j = i + 1; 905 906 if (j >= KCQ_PAGE_CNT) 907 j = 0; 908 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32; 909 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff; 910 } 911 return 0; 912} 913 914static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) 915{ 916 struct cnic_local *cp = dev->cnic_priv; 917 struct cnic_uio_dev *udev; 918 919 read_lock(&cnic_dev_lock); 920 list_for_each_entry(udev, &cnic_udev_list, list) { 921 if (udev->pdev == dev->pcidev) { 922 udev->dev = dev; 923 cp->udev = udev; 924 read_unlock(&cnic_dev_lock); 925 return 0; 926 } 927 } 928 read_unlock(&cnic_dev_lock); 929 930 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); 931 if (!udev) 932 return -ENOMEM; 933 934 udev->uio_dev = -1; 935 936 udev->dev = dev; 937 udev->pdev = dev->pcidev; 938 udev->l2_ring_size = pages * BCM_PAGE_SIZE; 939 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, 940 &udev->l2_ring_map, 941 GFP_KERNEL | __GFP_COMP); 942 if (!udev->l2_ring) 943 return -ENOMEM; 944 945 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; 946 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); 947 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, 948 &udev->l2_buf_map, 949 GFP_KERNEL | __GFP_COMP); 950 if (!udev->l2_buf) 951 return -ENOMEM; 952 953 write_lock(&cnic_dev_lock); 954 list_add(&udev->list, &cnic_udev_list); 955 write_unlock(&cnic_dev_lock); 956 957 pci_dev_get(udev->pdev); 958 959 cp->udev = udev; 960 961 return 0; 962} 963 964static int cnic_init_uio(struct cnic_dev *dev) 965{ 966 struct cnic_local *cp = dev->cnic_priv; 967 struct cnic_uio_dev *udev = cp->udev; 968 struct uio_info *uinfo; 969 int ret = 0; 970 971 if (!udev) 972 return -ENOMEM; 973 974 uinfo = &udev->cnic_uinfo; 975 976 uinfo->mem[0].addr = dev->netdev->base_addr; 977 uinfo->mem[0].internal_addr = dev->regview; 978 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; 979 uinfo->mem[0].memtype = UIO_MEM_PHYS; 980 981 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 982 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & 983 PAGE_MASK; 984 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 985 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; 986 else 987 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; 988 989 uinfo->name = "bnx2_cnic"; 990 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 991 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & 992 PAGE_MASK; 993 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); 994 995 uinfo->name = "bnx2x_cnic"; 996 } 997 998 uinfo->mem[1].memtype = UIO_MEM_LOGICAL; 999 1000 uinfo->mem[2].addr = (unsigned long) udev->l2_ring; 1001 uinfo->mem[2].size = udev->l2_ring_size; 1002 uinfo->mem[2].memtype = UIO_MEM_LOGICAL; 1003 1004 uinfo->mem[3].addr = (unsigned long) udev->l2_buf; 1005 uinfo->mem[3].size = udev->l2_buf_size; 1006 uinfo->mem[3].memtype = UIO_MEM_LOGICAL; 1007 1008 uinfo->version = CNIC_MODULE_VERSION; 1009 uinfo->irq = UIO_IRQ_CUSTOM; 1010 1011 uinfo->open = cnic_uio_open; 1012 uinfo->release = cnic_uio_close; 1013 1014 if (udev->uio_dev == -1) { 1015 if (!uinfo->priv) { 1016 uinfo->priv = udev; 1017 1018 ret = uio_register_device(&udev->pdev->dev, uinfo); 1019 } 1020 } else { 1021 cnic_init_rings(dev); 1022 } 1023 1024 return ret; 1025} 1026 1027static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) 1028{ 1029 struct cnic_local *cp = dev->cnic_priv; 1030 int ret; 1031 1032 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); 1033 if (ret) 1034 goto error; 1035 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; 1036 1037 ret = cnic_alloc_kcq(dev, &cp->kcq1); 1038 if (ret) 1039 goto error; 1040 1041 ret = cnic_alloc_context(dev); 1042 if (ret) 1043 goto error; 1044 1045 ret = cnic_alloc_uio_rings(dev, 2); 1046 if (ret) 1047 goto error; 1048 1049 ret = cnic_init_uio(dev); 1050 if (ret) 1051 goto error; 1052 1053 return 0; 1054 1055error: 1056 cnic_free_resc(dev); 1057 return ret; 1058} 1059 1060static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) 1061{ 1062 struct cnic_local *cp = dev->cnic_priv; 1063 int ctx_blk_size = cp->ethdev->ctx_blk_size; 1064 int total_mem, blks, i; 1065 1066 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space; 1067 blks = total_mem / ctx_blk_size; 1068 if (total_mem % ctx_blk_size) 1069 blks++; 1070 1071 if (blks > cp->ethdev->ctx_tbl_len) 1072 return -ENOMEM; 1073 1074 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL); 1075 if (cp->ctx_arr == NULL) 1076 return -ENOMEM; 1077 1078 cp->ctx_blks = blks; 1079 cp->ctx_blk_size = ctx_blk_size; 1080 if (!BNX2X_CHIP_IS_57710(cp->chip_id)) 1081 cp->ctx_align = 0; 1082 else 1083 cp->ctx_align = ctx_blk_size; 1084 1085 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE; 1086 1087 for (i = 0; i < blks; i++) { 1088 cp->ctx_arr[i].ctx = 1089 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size, 1090 &cp->ctx_arr[i].mapping, 1091 GFP_KERNEL); 1092 if (cp->ctx_arr[i].ctx == NULL) 1093 return -ENOMEM; 1094 1095 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) { 1096 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { 1097 cnic_free_context(dev); 1098 cp->ctx_blk_size += cp->ctx_align; 1099 i = -1; 1100 continue; 1101 } 1102 } 1103 } 1104 return 0; 1105} 1106 1107static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) 1108{ 1109 struct cnic_local *cp = dev->cnic_priv; 1110 struct cnic_eth_dev *ethdev = cp->ethdev; 1111 u32 start_cid = ethdev->starting_cid; 1112 int i, j, n, ret, pages; 1113 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; 1114 1115 cp->iro_arr = ethdev->iro_arr; 1116 1117 cp->max_cid_space = MAX_ISCSI_TBL_SZ; 1118 cp->iscsi_start_cid = start_cid; 1119 if (start_cid < BNX2X_ISCSI_START_CID) { 1120 u32 delta = BNX2X_ISCSI_START_CID - start_cid; 1121 1122 cp->iscsi_start_cid = BNX2X_ISCSI_START_CID; 1123 cp->max_cid_space += delta; 1124 } 1125 1126 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, 1127 GFP_KERNEL); 1128 if (!cp->iscsi_tbl) 1129 goto error; 1130 1131 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * 1132 cp->max_cid_space, GFP_KERNEL); 1133 if (!cp->ctx_tbl) 1134 goto error; 1135 1136 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { 1137 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i]; 1138 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; 1139 } 1140 1141 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / 1142 PAGE_SIZE; 1143 1144 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); 1145 if (ret) 1146 return -ENOMEM; 1147 1148 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; 1149 for (i = 0, j = 0; i < cp->max_cid_space; i++) { 1150 long off = CNIC_KWQ16_DATA_SIZE * (i % n); 1151 1152 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; 1153 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] + 1154 off; 1155 1156 if ((i % n) == (n - 1)) 1157 j++; 1158 } 1159 1160 ret = cnic_alloc_kcq(dev, &cp->kcq1); 1161 if (ret) 1162 goto error; 1163 1164 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS * 1165 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE; 1166 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1); 1167 if (ret) 1168 goto error; 1169 1170 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; 1171 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); 1172 if (ret) 1173 goto error; 1174 1175 ret = cnic_alloc_bnx2x_context(dev); 1176 if (ret) 1177 goto error; 1178 1179 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; 1180 1181 cp->l2_rx_ring_size = 15; 1182 1183 ret = cnic_alloc_uio_rings(dev, 4); 1184 if (ret) 1185 goto error; 1186 1187 ret = cnic_init_uio(dev); 1188 if (ret) 1189 goto error; 1190 1191 return 0; 1192 1193error: 1194 cnic_free_resc(dev); 1195 return -ENOMEM; 1196} 1197 1198static inline u32 cnic_kwq_avail(struct cnic_local *cp) 1199{ 1200 return cp->max_kwq_idx - 1201 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); 1202} 1203 1204static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 1205 u32 num_wqes) 1206{ 1207 struct cnic_local *cp = dev->cnic_priv; 1208 struct kwqe *prod_qe; 1209 u16 prod, sw_prod, i; 1210 1211 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 1212 return -EAGAIN; /* bnx2 is down */ 1213 1214 spin_lock_bh(&cp->cnic_ulp_lock); 1215 if (num_wqes > cnic_kwq_avail(cp) && 1216 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) { 1217 spin_unlock_bh(&cp->cnic_ulp_lock); 1218 return -EAGAIN; 1219 } 1220 1221 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 1222 1223 prod = cp->kwq_prod_idx; 1224 sw_prod = prod & MAX_KWQ_IDX; 1225 for (i = 0; i < num_wqes; i++) { 1226 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; 1227 memcpy(prod_qe, wqes[i], sizeof(struct kwqe)); 1228 prod++; 1229 sw_prod = prod & MAX_KWQ_IDX; 1230 } 1231 cp->kwq_prod_idx = prod; 1232 1233 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); 1234 1235 spin_unlock_bh(&cp->cnic_ulp_lock); 1236 return 0; 1237} 1238 1239static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid, 1240 union l5cm_specific_data *l5_data) 1241{ 1242 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1243 dma_addr_t map; 1244 1245 map = ctx->kwqe_data_mapping; 1246 l5_data->phy_address.lo = (u64) map & 0xffffffff; 1247 l5_data->phy_address.hi = (u64) map >> 32; 1248 return ctx->kwqe_data; 1249} 1250 1251static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, 1252 u32 type, union l5cm_specific_data *l5_data) 1253{ 1254 struct cnic_local *cp = dev->cnic_priv; 1255 struct l5cm_spe kwqe; 1256 struct kwqe_16 *kwq[1]; 1257 int ret; 1258 1259 kwqe.hdr.conn_and_cmd_data = 1260 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | 1261 BNX2X_HW_CID(cp, cid))); 1262 kwqe.hdr.type = cpu_to_le16(type); 1263 kwqe.hdr.reserved1 = 0; 1264 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); 1265 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); 1266 1267 kwq[0] = (struct kwqe_16 *) &kwqe; 1268 1269 spin_lock_bh(&cp->cnic_ulp_lock); 1270 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1); 1271 spin_unlock_bh(&cp->cnic_ulp_lock); 1272 1273 if (ret == 1) 1274 return 0; 1275 1276 return -EBUSY; 1277} 1278 1279static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, 1280 struct kcqe *cqes[], u32 num_cqes) 1281{ 1282 struct cnic_local *cp = dev->cnic_priv; 1283 struct cnic_ulp_ops *ulp_ops; 1284 1285 rcu_read_lock(); 1286 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 1287 if (likely(ulp_ops)) { 1288 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 1289 cqes, num_cqes); 1290 } 1291 rcu_read_unlock(); 1292} 1293 1294static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) 1295{ 1296 struct cnic_local *cp = dev->cnic_priv; 1297 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; 1298 int hq_bds, pages; 1299 u32 pfid = cp->pfid; 1300 1301 cp->num_iscsi_tasks = req1->num_tasks_per_conn; 1302 cp->num_ccells = req1->num_ccells_per_conn; 1303 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE * 1304 cp->num_iscsi_tasks; 1305 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * 1306 BNX2X_ISCSI_R2TQE_SIZE; 1307 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; 1308 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; 1309 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); 1310 cp->num_cqs = req1->num_cqs; 1311 1312 if (!dev->max_iscsi_conn) 1313 return 0; 1314 1315 /* init Tstorm RAM */ 1316 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), 1317 req1->rq_num_wqes); 1318 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1319 PAGE_SIZE); 1320 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1321 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1322 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 1323 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1324 req1->num_tasks_per_conn); 1325 1326 /* init Ustorm RAM */ 1327 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1328 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), 1329 req1->rq_buffer_size); 1330 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1331 PAGE_SIZE); 1332 CNIC_WR8(dev, BAR_USTRORM_INTMEM + 1333 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1334 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1335 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1336 req1->num_tasks_per_conn); 1337 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid), 1338 req1->rq_num_wqes); 1339 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid), 1340 req1->cq_num_wqes); 1341 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), 1342 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1343 1344 /* init Xstorm RAM */ 1345 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1346 PAGE_SIZE); 1347 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1348 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1349 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 1350 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1351 req1->num_tasks_per_conn); 1352 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), 1353 hq_bds); 1354 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid), 1355 req1->num_tasks_per_conn); 1356 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), 1357 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1358 1359 /* init Cstorm RAM */ 1360 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1361 PAGE_SIZE); 1362 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 1363 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1364 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1365 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1366 req1->num_tasks_per_conn); 1367 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid), 1368 req1->cq_num_wqes); 1369 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), 1370 hq_bds); 1371 1372 return 0; 1373} 1374 1375static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) 1376{ 1377 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; 1378 struct cnic_local *cp = dev->cnic_priv; 1379 u32 pfid = cp->pfid; 1380 struct iscsi_kcqe kcqe; 1381 struct kcqe *cqes[1]; 1382 1383 memset(&kcqe, 0, sizeof(kcqe)); 1384 if (!dev->max_iscsi_conn) { 1385 kcqe.completion_status = 1386 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED; 1387 goto done; 1388 } 1389 1390 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1391 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); 1392 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1393 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, 1394 req2->error_bit_map[1]); 1395 1396 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1397 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); 1398 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1399 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); 1400 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1401 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, 1402 req2->error_bit_map[1]); 1403 1404 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1405 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); 1406 1407 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1408 1409done: 1410 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT; 1411 cqes[0] = (struct kcqe *) &kcqe; 1412 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1413 1414 return 0; 1415} 1416 1417static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) 1418{ 1419 struct cnic_local *cp = dev->cnic_priv; 1420 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1421 1422 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) { 1423 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1424 1425 cnic_free_dma(dev, &iscsi->hq_info); 1426 cnic_free_dma(dev, &iscsi->r2tq_info); 1427 cnic_free_dma(dev, &iscsi->task_array_info); 1428 } 1429 cnic_free_id(&cp->cid_tbl, ctx->cid); 1430 ctx->cid = 0; 1431} 1432 1433static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) 1434{ 1435 u32 cid; 1436 int ret, pages; 1437 struct cnic_local *cp = dev->cnic_priv; 1438 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1439 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1440 1441 cid = cnic_alloc_new_id(&cp->cid_tbl); 1442 if (cid == -1) { 1443 ret = -ENOMEM; 1444 goto error; 1445 } 1446 1447 ctx->cid = cid; 1448 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; 1449 1450 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); 1451 if (ret) 1452 goto error; 1453 1454 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; 1455 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); 1456 if (ret) 1457 goto error; 1458 1459 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; 1460 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); 1461 if (ret) 1462 goto error; 1463 1464 return 0; 1465 1466error: 1467 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1468 return ret; 1469} 1470 1471static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init, 1472 struct regpair *ctx_addr) 1473{ 1474 struct cnic_local *cp = dev->cnic_priv; 1475 struct cnic_eth_dev *ethdev = cp->ethdev; 1476 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk; 1477 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk; 1478 unsigned long align_off = 0; 1479 dma_addr_t ctx_map; 1480 void *ctx; 1481 1482 if (cp->ctx_align) { 1483 unsigned long mask = cp->ctx_align - 1; 1484 1485 if (cp->ctx_arr[blk].mapping & mask) 1486 align_off = cp->ctx_align - 1487 (cp->ctx_arr[blk].mapping & mask); 1488 } 1489 ctx_map = cp->ctx_arr[blk].mapping + align_off + 1490 (off * BNX2X_CONTEXT_MEM_SIZE); 1491 ctx = cp->ctx_arr[blk].ctx + align_off + 1492 (off * BNX2X_CONTEXT_MEM_SIZE); 1493 if (init) 1494 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE); 1495 1496 ctx_addr->lo = ctx_map & 0xffffffff; 1497 ctx_addr->hi = (u64) ctx_map >> 32; 1498 return ctx; 1499} 1500 1501static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], 1502 u32 num) 1503{ 1504 struct cnic_local *cp = dev->cnic_priv; 1505 struct iscsi_kwqe_conn_offload1 *req1 = 1506 (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1507 struct iscsi_kwqe_conn_offload2 *req2 = 1508 (struct iscsi_kwqe_conn_offload2 *) wqes[1]; 1509 struct iscsi_kwqe_conn_offload3 *req3; 1510 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; 1511 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1512 u32 cid = ctx->cid; 1513 u32 hw_cid = BNX2X_HW_CID(cp, cid); 1514 struct iscsi_context *ictx; 1515 struct regpair context_addr; 1516 int i, j, n = 2, n_max; 1517 1518 ctx->ctx_flags = 0; 1519 if (!req2->num_additional_wqes) 1520 return -EINVAL; 1521 1522 n_max = req2->num_additional_wqes + 2; 1523 1524 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr); 1525 if (ictx == NULL) 1526 return -ENOMEM; 1527 1528 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; 1529 1530 ictx->xstorm_ag_context.hq_prod = 1; 1531 1532 ictx->xstorm_st_context.iscsi.first_burst_length = 1533 ISCSI_DEF_FIRST_BURST_LEN; 1534 ictx->xstorm_st_context.iscsi.max_send_pdu_length = 1535 ISCSI_DEF_MAX_RECV_SEG_LEN; 1536 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo = 1537 req1->sq_page_table_addr_lo; 1538 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi = 1539 req1->sq_page_table_addr_hi; 1540 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi; 1541 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo; 1542 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo = 1543 iscsi->hq_info.pgtbl_map & 0xffffffff; 1544 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi = 1545 (u64) iscsi->hq_info.pgtbl_map >> 32; 1546 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo = 1547 iscsi->hq_info.pgtbl[0]; 1548 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi = 1549 iscsi->hq_info.pgtbl[1]; 1550 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo = 1551 iscsi->r2tq_info.pgtbl_map & 0xffffffff; 1552 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi = 1553 (u64) iscsi->r2tq_info.pgtbl_map >> 32; 1554 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo = 1555 iscsi->r2tq_info.pgtbl[0]; 1556 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = 1557 iscsi->r2tq_info.pgtbl[1]; 1558 ictx->xstorm_st_context.iscsi.task_pbl_base.lo = 1559 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1560 ictx->xstorm_st_context.iscsi.task_pbl_base.hi = 1561 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1562 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx = 1563 BNX2X_ISCSI_PBL_NOT_CACHED; 1564 ictx->xstorm_st_context.iscsi.flags.flags |= 1565 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; 1566 ictx->xstorm_st_context.iscsi.flags.flags |= 1567 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; 1568 1569 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; 1570 /* TSTORM requires the base address of RQ DB & not PTE */ 1571 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = 1572 req2->rq_page_table_addr_lo & PAGE_MASK; 1573 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = 1574 req2->rq_page_table_addr_hi; 1575 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; 1576 ictx->tstorm_st_context.tcp.cwnd = 0x5A8; 1577 ictx->tstorm_st_context.tcp.flags2 |= 1578 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; 1579 ictx->tstorm_st_context.tcp.ooo_support_mode = 1580 TCP_TSTORM_OOO_DROP_AND_PROC_ACK; 1581 1582 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; 1583 1584 ictx->ustorm_st_context.ring.rq.pbl_base.lo = 1585 req2->rq_page_table_addr_lo; 1586 ictx->ustorm_st_context.ring.rq.pbl_base.hi = 1587 req2->rq_page_table_addr_hi; 1588 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi; 1589 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo; 1590 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo = 1591 iscsi->r2tq_info.pgtbl_map & 0xffffffff; 1592 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi = 1593 (u64) iscsi->r2tq_info.pgtbl_map >> 32; 1594 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo = 1595 iscsi->r2tq_info.pgtbl[0]; 1596 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi = 1597 iscsi->r2tq_info.pgtbl[1]; 1598 ictx->ustorm_st_context.ring.cq_pbl_base.lo = 1599 req1->cq_page_table_addr_lo; 1600 ictx->ustorm_st_context.ring.cq_pbl_base.hi = 1601 req1->cq_page_table_addr_hi; 1602 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; 1603 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; 1604 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; 1605 ictx->ustorm_st_context.task_pbe_cache_index = 1606 BNX2X_ISCSI_PBL_NOT_CACHED; 1607 ictx->ustorm_st_context.task_pdu_cache_index = 1608 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED; 1609 1610 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { 1611 if (j == 3) { 1612 if (n >= n_max) 1613 break; 1614 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; 1615 j = 0; 1616 } 1617 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; 1618 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = 1619 req3->qp_first_pte[j].hi; 1620 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = 1621 req3->qp_first_pte[j].lo; 1622 } 1623 1624 ictx->ustorm_st_context.task_pbl_base.lo = 1625 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1626 ictx->ustorm_st_context.task_pbl_base.hi = 1627 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1628 ictx->ustorm_st_context.tce_phy_addr.lo = 1629 iscsi->task_array_info.pgtbl[0]; 1630 ictx->ustorm_st_context.tce_phy_addr.hi = 1631 iscsi->task_array_info.pgtbl[1]; 1632 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1633 ictx->ustorm_st_context.num_cqs = cp->num_cqs; 1634 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN; 1635 ictx->ustorm_st_context.negotiated_rx_and_flags |= 1636 ISCSI_DEF_MAX_BURST_LEN; 1637 ictx->ustorm_st_context.negotiated_rx |= 1638 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T << 1639 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT; 1640 1641 ictx->cstorm_st_context.hq_pbl_base.lo = 1642 iscsi->hq_info.pgtbl_map & 0xffffffff; 1643 ictx->cstorm_st_context.hq_pbl_base.hi = 1644 (u64) iscsi->hq_info.pgtbl_map >> 32; 1645 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0]; 1646 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1]; 1647 ictx->cstorm_st_context.task_pbl_base.lo = 1648 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1649 ictx->cstorm_st_context.task_pbl_base.hi = 1650 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1651 /* CSTORM and USTORM initialization is different, CSTORM requires 1652 * CQ DB base & not PTE addr */ 1653 ictx->cstorm_st_context.cq_db_base.lo = 1654 req1->cq_page_table_addr_lo & PAGE_MASK; 1655 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; 1656 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1657 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; 1658 for (i = 0; i < cp->num_cqs; i++) { 1659 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] = 1660 ISCSI_INITIAL_SN; 1661 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] = 1662 ISCSI_INITIAL_SN; 1663 } 1664 1665 ictx->xstorm_ag_context.cdu_reserved = 1666 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, 1667 ISCSI_CONNECTION_TYPE); 1668 ictx->ustorm_ag_context.cdu_usage = 1669 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, 1670 ISCSI_CONNECTION_TYPE); 1671 return 0; 1672 1673} 1674 1675static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], 1676 u32 num, int *work) 1677{ 1678 struct iscsi_kwqe_conn_offload1 *req1; 1679 struct iscsi_kwqe_conn_offload2 *req2; 1680 struct cnic_local *cp = dev->cnic_priv; 1681 struct cnic_context *ctx; 1682 struct iscsi_kcqe kcqe; 1683 struct kcqe *cqes[1]; 1684 u32 l5_cid; 1685 int ret = 0; 1686 1687 if (num < 2) { 1688 *work = num; 1689 return -EINVAL; 1690 } 1691 1692 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1693 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1]; 1694 if ((num - 2) < req2->num_additional_wqes) { 1695 *work = num; 1696 return -EINVAL; 1697 } 1698 *work = 2 + req2->num_additional_wqes;; 1699 1700 l5_cid = req1->iscsi_conn_id; 1701 if (l5_cid >= MAX_ISCSI_TBL_SZ) 1702 return -EINVAL; 1703 1704 memset(&kcqe, 0, sizeof(kcqe)); 1705 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN; 1706 kcqe.iscsi_conn_id = l5_cid; 1707 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; 1708 1709 ctx = &cp->ctx_tbl[l5_cid]; 1710 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) { 1711 kcqe.completion_status = 1712 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY; 1713 goto done; 1714 } 1715 1716 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { 1717 atomic_dec(&cp->iscsi_conn); 1718 goto done; 1719 } 1720 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); 1721 if (ret) { 1722 atomic_dec(&cp->iscsi_conn); 1723 ret = 0; 1724 goto done; 1725 } 1726 ret = cnic_setup_bnx2x_ctx(dev, wqes, num); 1727 if (ret < 0) { 1728 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1729 atomic_dec(&cp->iscsi_conn); 1730 goto done; 1731 } 1732 1733 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1734 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid); 1735 1736done: 1737 cqes[0] = (struct kcqe *) &kcqe; 1738 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1739 return ret; 1740} 1741 1742 1743static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) 1744{ 1745 struct cnic_local *cp = dev->cnic_priv; 1746 struct iscsi_kwqe_conn_update *req = 1747 (struct iscsi_kwqe_conn_update *) kwqe; 1748 void *data; 1749 union l5cm_specific_data l5_data; 1750 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id); 1751 int ret; 1752 1753 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0) 1754 return -EINVAL; 1755 1756 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 1757 if (!data) 1758 return -ENOMEM; 1759 1760 memcpy(data, kwqe, sizeof(struct kwqe)); 1761 1762 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN, 1763 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data); 1764 return ret; 1765} 1766 1767static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) 1768{ 1769 struct cnic_local *cp = dev->cnic_priv; 1770 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1771 union l5cm_specific_data l5_data; 1772 int ret; 1773 u32 hw_cid, type; 1774 1775 init_waitqueue_head(&ctx->waitq); 1776 ctx->wait_cond = 0; 1777 memset(&l5_data, 0, sizeof(l5_data)); 1778 hw_cid = BNX2X_HW_CID(cp, ctx->cid); 1779 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) 1780 & SPE_HDR_CONN_TYPE; 1781 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & 1782 SPE_HDR_FUNCTION_ID); 1783 1784 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, 1785 hw_cid, type, &l5_data); 1786 1787 if (ret == 0) 1788 wait_event(ctx->waitq, ctx->wait_cond); 1789 1790 return ret; 1791} 1792 1793static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 1794{ 1795 struct cnic_local *cp = dev->cnic_priv; 1796 struct iscsi_kwqe_conn_destroy *req = 1797 (struct iscsi_kwqe_conn_destroy *) kwqe; 1798 u32 l5_cid = req->reserved0; 1799 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1800 int ret = 0; 1801 struct iscsi_kcqe kcqe; 1802 struct kcqe *cqes[1]; 1803 1804 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 1805 goto skip_cfc_delete; 1806 1807 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { 1808 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies; 1809 1810 if (delta > (2 * HZ)) 1811 delta = 0; 1812 1813 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); 1814 queue_delayed_work(cnic_wq, &cp->delete_task, delta); 1815 goto destroy_reply; 1816 } 1817 1818 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid); 1819 1820skip_cfc_delete: 1821 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1822 1823 atomic_dec(&cp->iscsi_conn); 1824 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 1825 1826destroy_reply: 1827 memset(&kcqe, 0, sizeof(kcqe)); 1828 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; 1829 kcqe.iscsi_conn_id = l5_cid; 1830 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1831 kcqe.iscsi_conn_context_id = req->context_id; 1832 1833 cqes[0] = (struct kcqe *) &kcqe; 1834 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1835 1836 return ret; 1837} 1838 1839static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, 1840 struct l4_kwq_connect_req1 *kwqe1, 1841 struct l4_kwq_connect_req3 *kwqe3, 1842 struct l5cm_active_conn_buffer *conn_buf) 1843{ 1844 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf; 1845 struct l5cm_xstorm_conn_buffer *xstorm_buf = 1846 &conn_buf->xstorm_conn_buffer; 1847 struct l5cm_tstorm_conn_buffer *tstorm_buf = 1848 &conn_buf->tstorm_conn_buffer; 1849 struct regpair context_addr; 1850 u32 cid = BNX2X_SW_CID(kwqe1->cid); 1851 struct in6_addr src_ip, dst_ip; 1852 int i; 1853 u32 *addrp; 1854 1855 addrp = (u32 *) &conn_addr->local_ip_addr; 1856 for (i = 0; i < 4; i++, addrp++) 1857 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); 1858 1859 addrp = (u32 *) &conn_addr->remote_ip_addr; 1860 for (i = 0; i < 4; i++, addrp++) 1861 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); 1862 1863 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr); 1864 1865 xstorm_buf->context_addr.hi = context_addr.hi; 1866 xstorm_buf->context_addr.lo = context_addr.lo; 1867 xstorm_buf->mss = 0xffff; 1868 xstorm_buf->rcv_buf = kwqe3->rcv_buf; 1869 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE) 1870 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE; 1871 xstorm_buf->pseudo_header_checksum = 1872 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); 1873 1874 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK)) 1875 tstorm_buf->params |= 1876 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE; 1877 if (kwqe3->ka_timeout) { 1878 tstorm_buf->ka_enable = 1; 1879 tstorm_buf->ka_timeout = kwqe3->ka_timeout; 1880 tstorm_buf->ka_interval = kwqe3->ka_interval; 1881 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count; 1882 } 1883 tstorm_buf->rcv_buf = kwqe3->rcv_buf; 1884 tstorm_buf->snd_buf = kwqe3->snd_buf; 1885 tstorm_buf->max_rt_time = 0xffffffff; 1886} 1887 1888static void cnic_init_bnx2x_mac(struct cnic_dev *dev) 1889{ 1890 struct cnic_local *cp = dev->cnic_priv; 1891 u32 pfid = cp->pfid; 1892 u8 *mac = dev->mac_addr; 1893 1894 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1895 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]); 1896 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1897 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]); 1898 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1899 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]); 1900 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1901 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]); 1902 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1903 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]); 1904 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1905 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]); 1906 1907 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1908 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]); 1909 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1910 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, 1911 mac[4]); 1912 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1913 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]); 1914 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1915 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, 1916 mac[2]); 1917 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1918 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2, 1919 mac[1]); 1920 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1921 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3, 1922 mac[0]); 1923} 1924 1925static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts) 1926{ 1927 struct cnic_local *cp = dev->cnic_priv; 1928 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; 1929 u16 tstorm_flags = 0; 1930 1931 if (tcp_ts) { 1932 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; 1933 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; 1934 } 1935 1936 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1937 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags); 1938 1939 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 1940 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags); 1941} 1942 1943static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], 1944 u32 num, int *work) 1945{ 1946 struct cnic_local *cp = dev->cnic_priv; 1947 struct l4_kwq_connect_req1 *kwqe1 = 1948 (struct l4_kwq_connect_req1 *) wqes[0]; 1949 struct l4_kwq_connect_req3 *kwqe3; 1950 struct l5cm_active_conn_buffer *conn_buf; 1951 struct l5cm_conn_addr_params *conn_addr; 1952 union l5cm_specific_data l5_data; 1953 u32 l5_cid = kwqe1->pg_cid; 1954 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 1955 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1956 int ret; 1957 1958 if (num < 2) { 1959 *work = num; 1960 return -EINVAL; 1961 } 1962 1963 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) 1964 *work = 3; 1965 else 1966 *work = 2; 1967 1968 if (num < *work) { 1969 *work = num; 1970 return -EINVAL; 1971 } 1972 1973 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) { 1974 netdev_err(dev->netdev, "conn_buf size too big\n"); 1975 return -ENOMEM; 1976 } 1977 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 1978 if (!conn_buf) 1979 return -ENOMEM; 1980 1981 memset(conn_buf, 0, sizeof(*conn_buf)); 1982 1983 conn_addr = &conn_buf->conn_addr_buf; 1984 conn_addr->remote_addr_0 = csk->ha[0]; 1985 conn_addr->remote_addr_1 = csk->ha[1]; 1986 conn_addr->remote_addr_2 = csk->ha[2]; 1987 conn_addr->remote_addr_3 = csk->ha[3]; 1988 conn_addr->remote_addr_4 = csk->ha[4]; 1989 conn_addr->remote_addr_5 = csk->ha[5]; 1990 1991 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) { 1992 struct l4_kwq_connect_req2 *kwqe2 = 1993 (struct l4_kwq_connect_req2 *) wqes[1]; 1994 1995 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4; 1996 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3; 1997 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2; 1998 1999 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4; 2000 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3; 2001 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2; 2002 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION; 2003 } 2004 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1]; 2005 2006 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip; 2007 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip; 2008 conn_addr->local_tcp_port = kwqe1->src_port; 2009 conn_addr->remote_tcp_port = kwqe1->dst_port; 2010 2011 conn_addr->pmtu = kwqe3->pmtu; 2012 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); 2013 2014 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 2015 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id); 2016 2017 cnic_bnx2x_set_tcp_timestamp(dev, 2018 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP); 2019 2020 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, 2021 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2022 if (!ret) 2023 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 2024 2025 return ret; 2026} 2027 2028static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe) 2029{ 2030 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe; 2031 union l5cm_specific_data l5_data; 2032 int ret; 2033 2034 memset(&l5_data, 0, sizeof(l5_data)); 2035 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE, 2036 req->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2037 return ret; 2038} 2039 2040static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe) 2041{ 2042 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe; 2043 union l5cm_specific_data l5_data; 2044 int ret; 2045 2046 memset(&l5_data, 0, sizeof(l5_data)); 2047 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT, 2048 req->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2049 return ret; 2050} 2051static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe) 2052{ 2053 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe; 2054 struct l4_kcq kcqe; 2055 struct kcqe *cqes[1]; 2056 2057 memset(&kcqe, 0, sizeof(kcqe)); 2058 kcqe.pg_host_opaque = req->host_opaque; 2059 kcqe.pg_cid = req->host_opaque; 2060 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG; 2061 cqes[0] = (struct kcqe *) &kcqe; 2062 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); 2063 return 0; 2064} 2065 2066static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) 2067{ 2068 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe; 2069 struct l4_kcq kcqe; 2070 struct kcqe *cqes[1]; 2071 2072 memset(&kcqe, 0, sizeof(kcqe)); 2073 kcqe.pg_host_opaque = req->pg_host_opaque; 2074 kcqe.pg_cid = req->pg_cid; 2075 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG; 2076 cqes[0] = (struct kcqe *) &kcqe; 2077 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); 2078 return 0; 2079} 2080 2081static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 2082 u32 num_wqes) 2083{ 2084 int i, work, ret; 2085 u32 opcode; 2086 struct kwqe *kwqe; 2087 2088 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2089 return -EAGAIN; /* bnx2 is down */ 2090 2091 for (i = 0; i < num_wqes; ) { 2092 kwqe = wqes[i]; 2093 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); 2094 work = 1; 2095 2096 switch (opcode) { 2097 case ISCSI_KWQE_OPCODE_INIT1: 2098 ret = cnic_bnx2x_iscsi_init1(dev, kwqe); 2099 break; 2100 case ISCSI_KWQE_OPCODE_INIT2: 2101 ret = cnic_bnx2x_iscsi_init2(dev, kwqe); 2102 break; 2103 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1: 2104 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i], 2105 num_wqes - i, &work); 2106 break; 2107 case ISCSI_KWQE_OPCODE_UPDATE_CONN: 2108 ret = cnic_bnx2x_iscsi_update(dev, kwqe); 2109 break; 2110 case ISCSI_KWQE_OPCODE_DESTROY_CONN: 2111 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe); 2112 break; 2113 case L4_KWQE_OPCODE_VALUE_CONNECT1: 2114 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i, 2115 &work); 2116 break; 2117 case L4_KWQE_OPCODE_VALUE_CLOSE: 2118 ret = cnic_bnx2x_close(dev, kwqe); 2119 break; 2120 case L4_KWQE_OPCODE_VALUE_RESET: 2121 ret = cnic_bnx2x_reset(dev, kwqe); 2122 break; 2123 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG: 2124 ret = cnic_bnx2x_offload_pg(dev, kwqe); 2125 break; 2126 case L4_KWQE_OPCODE_VALUE_UPDATE_PG: 2127 ret = cnic_bnx2x_update_pg(dev, kwqe); 2128 break; 2129 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG: 2130 ret = 0; 2131 break; 2132 default: 2133 ret = 0; 2134 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", 2135 opcode); 2136 break; 2137 } 2138 if (ret < 0) 2139 netdev_err(dev->netdev, "KWQE(0x%x) failed\n", 2140 opcode); 2141 i += work; 2142 } 2143 return 0; 2144} 2145 2146static void service_kcqes(struct cnic_dev *dev, int num_cqes) 2147{ 2148 struct cnic_local *cp = dev->cnic_priv; 2149 int i, j, comp = 0; 2150 2151 i = 0; 2152 j = 1; 2153 while (num_cqes) { 2154 struct cnic_ulp_ops *ulp_ops; 2155 int ulp_type; 2156 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; 2157 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK; 2158 2159 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) 2160 comp++; 2161 2162 while (j < num_cqes) { 2163 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; 2164 2165 if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer) 2166 break; 2167 2168 if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) 2169 comp++; 2170 j++; 2171 } 2172 2173 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA) 2174 ulp_type = CNIC_ULP_RDMA; 2175 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) 2176 ulp_type = CNIC_ULP_ISCSI; 2177 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) 2178 ulp_type = CNIC_ULP_L4; 2179 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) 2180 goto end; 2181 else { 2182 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n", 2183 kcqe_op_flag); 2184 goto end; 2185 } 2186 2187 rcu_read_lock(); 2188 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 2189 if (likely(ulp_ops)) { 2190 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 2191 cp->completed_kcq + i, j); 2192 } 2193 rcu_read_unlock(); 2194end: 2195 num_cqes -= j; 2196 i += j; 2197 j = 1; 2198 } 2199 if (unlikely(comp)) 2200 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp); 2201} 2202 2203static u16 cnic_bnx2_next_idx(u16 idx) 2204{ 2205 return idx + 1; 2206} 2207 2208static u16 cnic_bnx2_hw_idx(u16 idx) 2209{ 2210 return idx; 2211} 2212 2213static u16 cnic_bnx2x_next_idx(u16 idx) 2214{ 2215 idx++; 2216 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) 2217 idx++; 2218 2219 return idx; 2220} 2221 2222static u16 cnic_bnx2x_hw_idx(u16 idx) 2223{ 2224 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) 2225 idx++; 2226 return idx; 2227} 2228 2229static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info) 2230{ 2231 struct cnic_local *cp = dev->cnic_priv; 2232 u16 i, ri, hw_prod, last; 2233 struct kcqe *kcqe; 2234 int kcqe_cnt = 0, last_cnt = 0; 2235 2236 i = ri = last = info->sw_prod_idx; 2237 ri &= MAX_KCQ_IDX; 2238 hw_prod = *info->hw_prod_idx_ptr; 2239 hw_prod = cp->hw_idx(hw_prod); 2240 2241 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { 2242 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; 2243 cp->completed_kcq[kcqe_cnt++] = kcqe; 2244 i = cp->next_idx(i); 2245 ri = i & MAX_KCQ_IDX; 2246 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) { 2247 last_cnt = kcqe_cnt; 2248 last = i; 2249 } 2250 } 2251 2252 info->sw_prod_idx = last; 2253 return last_cnt; 2254} 2255 2256static int cnic_l2_completion(struct cnic_local *cp) 2257{ 2258 u16 hw_cons, sw_cons; 2259 struct cnic_uio_dev *udev = cp->udev; 2260 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) 2261 (udev->l2_ring + (2 * BCM_PAGE_SIZE)); 2262 u32 cmd; 2263 int comp = 0; 2264 2265 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags)) 2266 return 0; 2267 2268 hw_cons = *cp->rx_cons_ptr; 2269 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT) 2270 hw_cons++; 2271 2272 sw_cons = cp->rx_cons; 2273 while (sw_cons != hw_cons) { 2274 u8 cqe_fp_flags; 2275 2276 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT]; 2277 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; 2278 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) { 2279 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data); 2280 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT; 2281 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP || 2282 cmd == RAMROD_CMD_ID_ETH_HALT) 2283 comp++; 2284 } 2285 sw_cons = BNX2X_NEXT_RCQE(sw_cons); 2286 } 2287 return comp; 2288} 2289 2290static void cnic_chk_pkt_rings(struct cnic_local *cp) 2291{ 2292 u16 rx_cons, tx_cons; 2293 int comp = 0; 2294 2295 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 2296 return; 2297 2298 rx_cons = *cp->rx_cons_ptr; 2299 tx_cons = *cp->tx_cons_ptr; 2300 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { 2301 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 2302 comp = cnic_l2_completion(cp); 2303 2304 cp->tx_cons = tx_cons; 2305 cp->rx_cons = rx_cons; 2306 2307 if (cp->udev) 2308 uio_event_notify(&cp->udev->cnic_uinfo); 2309 } 2310 if (comp) 2311 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 2312} 2313 2314static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) 2315{ 2316 struct cnic_local *cp = dev->cnic_priv; 2317 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2318 int kcqe_cnt; 2319 2320 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2321 2322 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { 2323 2324 service_kcqes(dev, kcqe_cnt); 2325 2326 /* Tell compiler that status_blk fields can change. */ 2327 barrier(); 2328 if (status_idx != *cp->kcq1.status_idx_ptr) { 2329 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2330 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2331 } else 2332 break; 2333 } 2334 2335 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx); 2336 2337 cnic_chk_pkt_rings(cp); 2338 2339 return status_idx; 2340} 2341 2342static int cnic_service_bnx2(void *data, void *status_blk) 2343{ 2344 struct cnic_dev *dev = data; 2345 struct cnic_local *cp = dev->cnic_priv; 2346 u32 status_idx = *cp->kcq1.status_idx_ptr; 2347 2348 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2349 return status_idx; 2350 2351 return cnic_service_bnx2_queues(dev); 2352} 2353 2354static void cnic_service_bnx2_msix(unsigned long data) 2355{ 2356 struct cnic_dev *dev = (struct cnic_dev *) data; 2357 struct cnic_local *cp = dev->cnic_priv; 2358 2359 cp->last_status_idx = cnic_service_bnx2_queues(dev); 2360 2361 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 2362 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 2363} 2364 2365static void cnic_doirq(struct cnic_dev *dev) 2366{ 2367 struct cnic_local *cp = dev->cnic_priv; 2368 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX; 2369 2370 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 2371 prefetch(cp->status_blk.gen); 2372 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 2373 2374 tasklet_schedule(&cp->cnic_irq_task); 2375 } 2376} 2377 2378static irqreturn_t cnic_irq(int irq, void *dev_instance) 2379{ 2380 struct cnic_dev *dev = dev_instance; 2381 struct cnic_local *cp = dev->cnic_priv; 2382 2383 if (cp->ack_int) 2384 cp->ack_int(dev); 2385 2386 cnic_doirq(dev); 2387 2388 return IRQ_HANDLED; 2389} 2390 2391static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, 2392 u16 index, u8 op, u8 update) 2393{ 2394 struct cnic_local *cp = dev->cnic_priv; 2395 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + 2396 COMMAND_REG_INT_ACK); 2397 struct igu_ack_register igu_ack; 2398 2399 igu_ack.status_block_index = index; 2400 igu_ack.sb_id_and_flags = 2401 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | 2402 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | 2403 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 2404 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 2405 2406 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); 2407} 2408 2409static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment, 2410 u16 index, u8 op, u8 update) 2411{ 2412 struct igu_regular cmd_data; 2413 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8; 2414 2415 cmd_data.sb_id_and_flags = 2416 (index << IGU_REGULAR_SB_INDEX_SHIFT) | 2417 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 2418 (update << IGU_REGULAR_BUPDATE_SHIFT) | 2419 (op << IGU_REGULAR_ENABLE_INT_SHIFT); 2420 2421 2422 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags); 2423} 2424 2425static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) 2426{ 2427 struct cnic_local *cp = dev->cnic_priv; 2428 2429 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0, 2430 IGU_INT_DISABLE, 0); 2431} 2432 2433static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev) 2434{ 2435 struct cnic_local *cp = dev->cnic_priv; 2436 2437 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0, 2438 IGU_INT_DISABLE, 0); 2439} 2440 2441static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) 2442{ 2443 u32 last_status = *info->status_idx_ptr; 2444 int kcqe_cnt; 2445 2446 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { 2447 2448 service_kcqes(dev, kcqe_cnt); 2449 2450 /* Tell compiler that sblk fields can change. */ 2451 barrier(); 2452 if (last_status == *info->status_idx_ptr) 2453 break; 2454 2455 last_status = *info->status_idx_ptr; 2456 } 2457 return last_status; 2458} 2459 2460static void cnic_service_bnx2x_bh(unsigned long data) 2461{ 2462 struct cnic_dev *dev = (struct cnic_dev *) data; 2463 struct cnic_local *cp = dev->cnic_priv; 2464 u32 status_idx; 2465 2466 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2467 return; 2468 2469 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 2470 2471 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 2472 if (BNX2X_CHIP_IS_E2(cp->chip_id)) 2473 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 2474 status_idx, IGU_INT_ENABLE, 1); 2475 else 2476 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, 2477 status_idx, IGU_INT_ENABLE, 1); 2478} 2479 2480static int cnic_service_bnx2x(void *data, void *status_blk) 2481{ 2482 struct cnic_dev *dev = data; 2483 struct cnic_local *cp = dev->cnic_priv; 2484 2485 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 2486 cnic_doirq(dev); 2487 2488 cnic_chk_pkt_rings(cp); 2489 2490 return 0; 2491} 2492 2493static void cnic_ulp_stop(struct cnic_dev *dev) 2494{ 2495 struct cnic_local *cp = dev->cnic_priv; 2496 int if_type; 2497 2498 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 2499 2500 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 2501 struct cnic_ulp_ops *ulp_ops; 2502 2503 mutex_lock(&cnic_lock); 2504 ulp_ops = cp->ulp_ops[if_type]; 2505 if (!ulp_ops) { 2506 mutex_unlock(&cnic_lock); 2507 continue; 2508 } 2509 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 2510 mutex_unlock(&cnic_lock); 2511 2512 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) 2513 ulp_ops->cnic_stop(cp->ulp_handle[if_type]); 2514 2515 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 2516 } 2517} 2518 2519static void cnic_ulp_start(struct cnic_dev *dev) 2520{ 2521 struct cnic_local *cp = dev->cnic_priv; 2522 int if_type; 2523 2524 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 2525 struct cnic_ulp_ops *ulp_ops; 2526 2527 mutex_lock(&cnic_lock); 2528 ulp_ops = cp->ulp_ops[if_type]; 2529 if (!ulp_ops || !ulp_ops->cnic_start) { 2530 mutex_unlock(&cnic_lock); 2531 continue; 2532 } 2533 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 2534 mutex_unlock(&cnic_lock); 2535 2536 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) 2537 ulp_ops->cnic_start(cp->ulp_handle[if_type]); 2538 2539 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 2540 } 2541} 2542 2543static int cnic_ctl(void *data, struct cnic_ctl_info *info) 2544{ 2545 struct cnic_dev *dev = data; 2546 2547 switch (info->cmd) { 2548 case CNIC_CTL_STOP_CMD: 2549 cnic_hold(dev); 2550 2551 cnic_ulp_stop(dev); 2552 cnic_stop_hw(dev); 2553 2554 cnic_put(dev); 2555 break; 2556 case CNIC_CTL_START_CMD: 2557 cnic_hold(dev); 2558 2559 if (!cnic_start_hw(dev)) 2560 cnic_ulp_start(dev); 2561 2562 cnic_put(dev); 2563 break; 2564 case CNIC_CTL_COMPLETION_CMD: { 2565 u32 cid = BNX2X_SW_CID(info->data.comp.cid); 2566 u32 l5_cid; 2567 struct cnic_local *cp = dev->cnic_priv; 2568 2569 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { 2570 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 2571 2572 ctx->wait_cond = 1; 2573 wake_up(&ctx->waitq); 2574 } 2575 break; 2576 } 2577 default: 2578 return -EINVAL; 2579 } 2580 return 0; 2581} 2582 2583static void cnic_ulp_init(struct cnic_dev *dev) 2584{ 2585 int i; 2586 struct cnic_local *cp = dev->cnic_priv; 2587 2588 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 2589 struct cnic_ulp_ops *ulp_ops; 2590 2591 mutex_lock(&cnic_lock); 2592 ulp_ops = cnic_ulp_tbl[i]; 2593 if (!ulp_ops || !ulp_ops->cnic_init) { 2594 mutex_unlock(&cnic_lock); 2595 continue; 2596 } 2597 ulp_get(ulp_ops); 2598 mutex_unlock(&cnic_lock); 2599 2600 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) 2601 ulp_ops->cnic_init(dev); 2602 2603 ulp_put(ulp_ops); 2604 } 2605} 2606 2607static void cnic_ulp_exit(struct cnic_dev *dev) 2608{ 2609 int i; 2610 struct cnic_local *cp = dev->cnic_priv; 2611 2612 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 2613 struct cnic_ulp_ops *ulp_ops; 2614 2615 mutex_lock(&cnic_lock); 2616 ulp_ops = cnic_ulp_tbl[i]; 2617 if (!ulp_ops || !ulp_ops->cnic_exit) { 2618 mutex_unlock(&cnic_lock); 2619 continue; 2620 } 2621 ulp_get(ulp_ops); 2622 mutex_unlock(&cnic_lock); 2623 2624 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) 2625 ulp_ops->cnic_exit(dev); 2626 2627 ulp_put(ulp_ops); 2628 } 2629} 2630 2631static int cnic_cm_offload_pg(struct cnic_sock *csk) 2632{ 2633 struct cnic_dev *dev = csk->dev; 2634 struct l4_kwq_offload_pg *l4kwqe; 2635 struct kwqe *wqes[1]; 2636 2637 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1; 2638 memset(l4kwqe, 0, sizeof(*l4kwqe)); 2639 wqes[0] = (struct kwqe *) l4kwqe; 2640 2641 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG; 2642 l4kwqe->flags = 2643 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT; 2644 l4kwqe->l2hdr_nbytes = ETH_HLEN; 2645 2646 l4kwqe->da0 = csk->ha[0]; 2647 l4kwqe->da1 = csk->ha[1]; 2648 l4kwqe->da2 = csk->ha[2]; 2649 l4kwqe->da3 = csk->ha[3]; 2650 l4kwqe->da4 = csk->ha[4]; 2651 l4kwqe->da5 = csk->ha[5]; 2652 2653 l4kwqe->sa0 = dev->mac_addr[0]; 2654 l4kwqe->sa1 = dev->mac_addr[1]; 2655 l4kwqe->sa2 = dev->mac_addr[2]; 2656 l4kwqe->sa3 = dev->mac_addr[3]; 2657 l4kwqe->sa4 = dev->mac_addr[4]; 2658 l4kwqe->sa5 = dev->mac_addr[5]; 2659 2660 l4kwqe->etype = ETH_P_IP; 2661 l4kwqe->ipid_start = DEF_IPID_START; 2662 l4kwqe->host_opaque = csk->l5_cid; 2663 2664 if (csk->vlan_id) { 2665 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING; 2666 l4kwqe->vlan_tag = csk->vlan_id; 2667 l4kwqe->l2hdr_nbytes += 4; 2668 } 2669 2670 return dev->submit_kwqes(dev, wqes, 1); 2671} 2672 2673static int cnic_cm_update_pg(struct cnic_sock *csk) 2674{ 2675 struct cnic_dev *dev = csk->dev; 2676 struct l4_kwq_update_pg *l4kwqe; 2677 struct kwqe *wqes[1]; 2678 2679 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1; 2680 memset(l4kwqe, 0, sizeof(*l4kwqe)); 2681 wqes[0] = (struct kwqe *) l4kwqe; 2682 2683 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG; 2684 l4kwqe->flags = 2685 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT; 2686 l4kwqe->pg_cid = csk->pg_cid; 2687 2688 l4kwqe->da0 = csk->ha[0]; 2689 l4kwqe->da1 = csk->ha[1]; 2690 l4kwqe->da2 = csk->ha[2]; 2691 l4kwqe->da3 = csk->ha[3]; 2692 l4kwqe->da4 = csk->ha[4]; 2693 l4kwqe->da5 = csk->ha[5]; 2694 2695 l4kwqe->pg_host_opaque = csk->l5_cid; 2696 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA; 2697 2698 return dev->submit_kwqes(dev, wqes, 1); 2699} 2700 2701static int cnic_cm_upload_pg(struct cnic_sock *csk) 2702{ 2703 struct cnic_dev *dev = csk->dev; 2704 struct l4_kwq_upload *l4kwqe; 2705 struct kwqe *wqes[1]; 2706 2707 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1; 2708 memset(l4kwqe, 0, sizeof(*l4kwqe)); 2709 wqes[0] = (struct kwqe *) l4kwqe; 2710 2711 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG; 2712 l4kwqe->flags = 2713 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT; 2714 l4kwqe->cid = csk->pg_cid; 2715 2716 return dev->submit_kwqes(dev, wqes, 1); 2717} 2718 2719static int cnic_cm_conn_req(struct cnic_sock *csk) 2720{ 2721 struct cnic_dev *dev = csk->dev; 2722 struct l4_kwq_connect_req1 *l4kwqe1; 2723 struct l4_kwq_connect_req2 *l4kwqe2; 2724 struct l4_kwq_connect_req3 *l4kwqe3; 2725 struct kwqe *wqes[3]; 2726 u8 tcp_flags = 0; 2727 int num_wqes = 2; 2728 2729 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1; 2730 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2; 2731 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3; 2732 memset(l4kwqe1, 0, sizeof(*l4kwqe1)); 2733 memset(l4kwqe2, 0, sizeof(*l4kwqe2)); 2734 memset(l4kwqe3, 0, sizeof(*l4kwqe3)); 2735 2736 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3; 2737 l4kwqe3->flags = 2738 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT; 2739 l4kwqe3->ka_timeout = csk->ka_timeout; 2740 l4kwqe3->ka_interval = csk->ka_interval; 2741 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count; 2742 l4kwqe3->tos = csk->tos; 2743 l4kwqe3->ttl = csk->ttl; 2744 l4kwqe3->snd_seq_scale = csk->snd_seq_scale; 2745 l4kwqe3->pmtu = csk->mtu; 2746 l4kwqe3->rcv_buf = csk->rcv_buf; 2747 l4kwqe3->snd_buf = csk->snd_buf; 2748 l4kwqe3->seed = csk->seed; 2749 2750 wqes[0] = (struct kwqe *) l4kwqe1; 2751 if (test_bit(SK_F_IPV6, &csk->flags)) { 2752 wqes[1] = (struct kwqe *) l4kwqe2; 2753 wqes[2] = (struct kwqe *) l4kwqe3; 2754 num_wqes = 3; 2755 2756 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6; 2757 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2; 2758 l4kwqe2->flags = 2759 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT | 2760 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT; 2761 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]); 2762 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]); 2763 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]); 2764 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]); 2765 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]); 2766 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]); 2767 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) - 2768 sizeof(struct tcphdr); 2769 } else { 2770 wqes[1] = (struct kwqe *) l4kwqe3; 2771 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) - 2772 sizeof(struct tcphdr); 2773 } 2774 2775 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1; 2776 l4kwqe1->flags = 2777 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) | 2778 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT; 2779 l4kwqe1->cid = csk->cid; 2780 l4kwqe1->pg_cid = csk->pg_cid; 2781 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]); 2782 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]); 2783 l4kwqe1->src_port = be16_to_cpu(csk->src_port); 2784 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port); 2785 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK) 2786 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK; 2787 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE) 2788 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE; 2789 if (csk->tcp_flags & SK_TCP_NAGLE) 2790 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE; 2791 if (csk->tcp_flags & SK_TCP_TIMESTAMP) 2792 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP; 2793 if (csk->tcp_flags & SK_TCP_SACK) 2794 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK; 2795 if (csk->tcp_flags & SK_TCP_SEG_SCALING) 2796 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING; 2797 2798 l4kwqe1->tcp_flags = tcp_flags; 2799 2800 return dev->submit_kwqes(dev, wqes, num_wqes); 2801} 2802 2803static int cnic_cm_close_req(struct cnic_sock *csk) 2804{ 2805 struct cnic_dev *dev = csk->dev; 2806 struct l4_kwq_close_req *l4kwqe; 2807 struct kwqe *wqes[1]; 2808 2809 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2; 2810 memset(l4kwqe, 0, sizeof(*l4kwqe)); 2811 wqes[0] = (struct kwqe *) l4kwqe; 2812 2813 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE; 2814 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT; 2815 l4kwqe->cid = csk->cid; 2816 2817 return dev->submit_kwqes(dev, wqes, 1); 2818} 2819 2820static int cnic_cm_abort_req(struct cnic_sock *csk) 2821{ 2822 struct cnic_dev *dev = csk->dev; 2823 struct l4_kwq_reset_req *l4kwqe; 2824 struct kwqe *wqes[1]; 2825 2826 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2; 2827 memset(l4kwqe, 0, sizeof(*l4kwqe)); 2828 wqes[0] = (struct kwqe *) l4kwqe; 2829 2830 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET; 2831 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT; 2832 l4kwqe->cid = csk->cid; 2833 2834 return dev->submit_kwqes(dev, wqes, 1); 2835} 2836 2837static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, 2838 u32 l5_cid, struct cnic_sock **csk, void *context) 2839{ 2840 struct cnic_local *cp = dev->cnic_priv; 2841 struct cnic_sock *csk1; 2842 2843 if (l5_cid >= MAX_CM_SK_TBL_SZ) 2844 return -EINVAL; 2845 2846 if (cp->ctx_tbl) { 2847 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 2848 2849 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 2850 return -EAGAIN; 2851 } 2852 2853 csk1 = &cp->csk_tbl[l5_cid]; 2854 if (atomic_read(&csk1->ref_count)) 2855 return -EAGAIN; 2856 2857 if (test_and_set_bit(SK_F_INUSE, &csk1->flags)) 2858 return -EBUSY; 2859 2860 csk1->dev = dev; 2861 csk1->cid = cid; 2862 csk1->l5_cid = l5_cid; 2863 csk1->ulp_type = ulp_type; 2864 csk1->context = context; 2865 2866 csk1->ka_timeout = DEF_KA_TIMEOUT; 2867 csk1->ka_interval = DEF_KA_INTERVAL; 2868 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT; 2869 csk1->tos = DEF_TOS; 2870 csk1->ttl = DEF_TTL; 2871 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE; 2872 csk1->rcv_buf = DEF_RCV_BUF; 2873 csk1->snd_buf = DEF_SND_BUF; 2874 csk1->seed = DEF_SEED; 2875 2876 *csk = csk1; 2877 return 0; 2878} 2879 2880static void cnic_cm_cleanup(struct cnic_sock *csk) 2881{ 2882 if (csk->src_port) { 2883 struct cnic_dev *dev = csk->dev; 2884 struct cnic_local *cp = dev->cnic_priv; 2885 2886 cnic_free_id(&cp->csk_port_tbl, csk->src_port); 2887 csk->src_port = 0; 2888 } 2889} 2890 2891static void cnic_close_conn(struct cnic_sock *csk) 2892{ 2893 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) { 2894 cnic_cm_upload_pg(csk); 2895 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 2896 } 2897 cnic_cm_cleanup(csk); 2898} 2899 2900static int cnic_cm_destroy(struct cnic_sock *csk) 2901{ 2902 if (!cnic_in_use(csk)) 2903 return -EINVAL; 2904 2905 csk_hold(csk); 2906 clear_bit(SK_F_INUSE, &csk->flags); 2907 smp_mb__after_clear_bit(); 2908 while (atomic_read(&csk->ref_count) != 1) 2909 msleep(1); 2910 cnic_cm_cleanup(csk); 2911 2912 csk->flags = 0; 2913 csk_put(csk); 2914 return 0; 2915} 2916 2917static inline u16 cnic_get_vlan(struct net_device *dev, 2918 struct net_device **vlan_dev) 2919{ 2920 if (dev->priv_flags & IFF_802_1Q_VLAN) { 2921 *vlan_dev = vlan_dev_real_dev(dev); 2922 return vlan_dev_vlan_id(dev); 2923 } 2924 *vlan_dev = dev; 2925 return 0; 2926} 2927 2928static int cnic_get_v4_route(struct sockaddr_in *dst_addr, 2929 struct dst_entry **dst) 2930{ 2931#if defined(CONFIG_INET) 2932 struct flowi fl; 2933 int err; 2934 struct rtable *rt; 2935 2936 memset(&fl, 0, sizeof(fl)); 2937 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr; 2938 2939 err = ip_route_output_key(&init_net, &rt, &fl); 2940 if (!err) 2941 *dst = &rt->dst; 2942 return err; 2943#else 2944 return -ENETUNREACH; 2945#endif 2946} 2947 2948static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, 2949 struct dst_entry **dst) 2950{ 2951#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) 2952 struct flowi fl; 2953 2954 memset(&fl, 0, sizeof(fl)); 2955 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr); 2956 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL) 2957 fl.oif = dst_addr->sin6_scope_id; 2958 2959 *dst = ip6_route_output(&init_net, NULL, &fl); 2960 if (*dst) 2961 return 0; 2962#endif 2963 2964 return -ENETUNREACH; 2965} 2966 2967static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr, 2968 int ulp_type) 2969{ 2970 struct cnic_dev *dev = NULL; 2971 struct dst_entry *dst; 2972 struct net_device *netdev = NULL; 2973 int err = -ENETUNREACH; 2974 2975 if (dst_addr->sin_family == AF_INET) 2976 err = cnic_get_v4_route(dst_addr, &dst); 2977 else if (dst_addr->sin_family == AF_INET6) { 2978 struct sockaddr_in6 *dst_addr6 = 2979 (struct sockaddr_in6 *) dst_addr; 2980 2981 err = cnic_get_v6_route(dst_addr6, &dst); 2982 } else 2983 return NULL; 2984 2985 if (err) 2986 return NULL; 2987 2988 if (!dst->dev) 2989 goto done; 2990 2991 cnic_get_vlan(dst->dev, &netdev); 2992 2993 dev = cnic_from_netdev(netdev); 2994 2995done: 2996 dst_release(dst); 2997 if (dev) 2998 cnic_put(dev); 2999 return dev; 3000} 3001 3002static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3003{ 3004 struct cnic_dev *dev = csk->dev; 3005 struct cnic_local *cp = dev->cnic_priv; 3006 3007 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk); 3008} 3009 3010static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3011{ 3012 struct cnic_dev *dev = csk->dev; 3013 struct cnic_local *cp = dev->cnic_priv; 3014 int is_v6, rc = 0; 3015 struct dst_entry *dst = NULL; 3016 struct net_device *realdev; 3017 u32 local_port; 3018 3019 if (saddr->local.v6.sin6_family == AF_INET6 && 3020 saddr->remote.v6.sin6_family == AF_INET6) 3021 is_v6 = 1; 3022 else if (saddr->local.v4.sin_family == AF_INET && 3023 saddr->remote.v4.sin_family == AF_INET) 3024 is_v6 = 0; 3025 else 3026 return -EINVAL; 3027 3028 clear_bit(SK_F_IPV6, &csk->flags); 3029 3030 if (is_v6) { 3031 set_bit(SK_F_IPV6, &csk->flags); 3032 cnic_get_v6_route(&saddr->remote.v6, &dst); 3033 3034 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr, 3035 sizeof(struct in6_addr)); 3036 csk->dst_port = saddr->remote.v6.sin6_port; 3037 local_port = saddr->local.v6.sin6_port; 3038 3039 } else { 3040 cnic_get_v4_route(&saddr->remote.v4, &dst); 3041 3042 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr; 3043 csk->dst_port = saddr->remote.v4.sin_port; 3044 local_port = saddr->local.v4.sin_port; 3045 } 3046 3047 csk->vlan_id = 0; 3048 csk->mtu = dev->netdev->mtu; 3049 if (dst && dst->dev) { 3050 u16 vlan = cnic_get_vlan(dst->dev, &realdev); 3051 if (realdev == dev->netdev) { 3052 csk->vlan_id = vlan; 3053 csk->mtu = dst_mtu(dst); 3054 } 3055 } 3056 3057 if (local_port >= CNIC_LOCAL_PORT_MIN && 3058 local_port < CNIC_LOCAL_PORT_MAX) { 3059 if (cnic_alloc_id(&cp->csk_port_tbl, local_port)) 3060 local_port = 0; 3061 } else 3062 local_port = 0; 3063 3064 if (!local_port) { 3065 local_port = cnic_alloc_new_id(&cp->csk_port_tbl); 3066 if (local_port == -1) { 3067 rc = -ENOMEM; 3068 goto err_out; 3069 } 3070 } 3071 csk->src_port = local_port; 3072 3073err_out: 3074 dst_release(dst); 3075 return rc; 3076} 3077 3078static void cnic_init_csk_state(struct cnic_sock *csk) 3079{ 3080 csk->state = 0; 3081 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3082 clear_bit(SK_F_CLOSING, &csk->flags); 3083} 3084 3085static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3086{ 3087 int err = 0; 3088 3089 if (!cnic_in_use(csk)) 3090 return -EINVAL; 3091 3092 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags)) 3093 return -EINVAL; 3094 3095 cnic_init_csk_state(csk); 3096 3097 err = cnic_get_route(csk, saddr); 3098 if (err) 3099 goto err_out; 3100 3101 err = cnic_resolve_addr(csk, saddr); 3102 if (!err) 3103 return 0; 3104 3105err_out: 3106 clear_bit(SK_F_CONNECT_START, &csk->flags); 3107 return err; 3108} 3109 3110static int cnic_cm_abort(struct cnic_sock *csk) 3111{ 3112 struct cnic_local *cp = csk->dev->cnic_priv; 3113 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP; 3114 3115 if (!cnic_in_use(csk)) 3116 return -EINVAL; 3117 3118 if (cnic_abort_prep(csk)) 3119 return cnic_cm_abort_req(csk); 3120 3121 /* Getting here means that we haven't started connect, or 3122 * connect was not successful. 3123 */ 3124 3125 cp->close_conn(csk, opcode); 3126 if (csk->state != opcode) 3127 return -EALREADY; 3128 3129 return 0; 3130} 3131 3132static int cnic_cm_close(struct cnic_sock *csk) 3133{ 3134 if (!cnic_in_use(csk)) 3135 return -EINVAL; 3136 3137 if (cnic_close_prep(csk)) { 3138 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 3139 return cnic_cm_close_req(csk); 3140 } else { 3141 return -EALREADY; 3142 } 3143 return 0; 3144} 3145 3146static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk, 3147 u8 opcode) 3148{ 3149 struct cnic_ulp_ops *ulp_ops; 3150 int ulp_type = csk->ulp_type; 3151 3152 rcu_read_lock(); 3153 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 3154 if (ulp_ops) { 3155 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE) 3156 ulp_ops->cm_connect_complete(csk); 3157 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) 3158 ulp_ops->cm_close_complete(csk); 3159 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) 3160 ulp_ops->cm_remote_abort(csk); 3161 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) 3162 ulp_ops->cm_abort_complete(csk); 3163 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED) 3164 ulp_ops->cm_remote_close(csk); 3165 } 3166 rcu_read_unlock(); 3167} 3168 3169static int cnic_cm_set_pg(struct cnic_sock *csk) 3170{ 3171 if (cnic_offld_prep(csk)) { 3172 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 3173 cnic_cm_update_pg(csk); 3174 else 3175 cnic_cm_offload_pg(csk); 3176 } 3177 return 0; 3178} 3179 3180static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe) 3181{ 3182 struct cnic_local *cp = dev->cnic_priv; 3183 u32 l5_cid = kcqe->pg_host_opaque; 3184 u8 opcode = kcqe->op_code; 3185 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 3186 3187 csk_hold(csk); 3188 if (!cnic_in_use(csk)) 3189 goto done; 3190 3191 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3192 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3193 goto done; 3194 } 3195 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */ 3196 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) { 3197 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3198 cnic_cm_upcall(cp, csk, 3199 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 3200 goto done; 3201 } 3202 3203 csk->pg_cid = kcqe->pg_cid; 3204 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 3205 cnic_cm_conn_req(csk); 3206 3207done: 3208 csk_put(csk); 3209} 3210 3211static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) 3212{ 3213 struct cnic_local *cp = dev->cnic_priv; 3214 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe; 3215 u8 opcode = l4kcqe->op_code; 3216 u32 l5_cid; 3217 struct cnic_sock *csk; 3218 3219 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || 3220 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3221 cnic_cm_process_offld_pg(dev, l4kcqe); 3222 return; 3223 } 3224 3225 l5_cid = l4kcqe->conn_id; 3226 if (opcode & 0x80) 3227 l5_cid = l4kcqe->cid; 3228 if (l5_cid >= MAX_CM_SK_TBL_SZ) 3229 return; 3230 3231 csk = &cp->csk_tbl[l5_cid]; 3232 csk_hold(csk); 3233 3234 if (!cnic_in_use(csk)) { 3235 csk_put(csk); 3236 return; 3237 } 3238 3239 switch (opcode) { 3240 case L5CM_RAMROD_CMD_ID_TCP_CONNECT: 3241 if (l4kcqe->status != 0) { 3242 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3243 cnic_cm_upcall(cp, csk, 3244 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 3245 } 3246 break; 3247 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE: 3248 if (l4kcqe->status == 0) 3249 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags); 3250 3251 smp_mb__before_clear_bit(); 3252 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3253 cnic_cm_upcall(cp, csk, opcode); 3254 break; 3255 3256 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3257 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3258 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3259 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 3260 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: 3261 cp->close_conn(csk, opcode); 3262 break; 3263 3264 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED: 3265 cnic_cm_upcall(cp, csk, opcode); 3266 break; 3267 } 3268 csk_put(csk); 3269} 3270 3271static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num) 3272{ 3273 struct cnic_dev *dev = data; 3274 int i; 3275 3276 for (i = 0; i < num; i++) 3277 cnic_cm_process_kcqe(dev, kcqe[i]); 3278} 3279 3280static struct cnic_ulp_ops cm_ulp_ops = { 3281 .indicate_kcqes = cnic_cm_indicate_kcqe, 3282}; 3283 3284static void cnic_cm_free_mem(struct cnic_dev *dev) 3285{ 3286 struct cnic_local *cp = dev->cnic_priv; 3287 3288 kfree(cp->csk_tbl); 3289 cp->csk_tbl = NULL; 3290 cnic_free_id_tbl(&cp->csk_port_tbl); 3291} 3292 3293static int cnic_cm_alloc_mem(struct cnic_dev *dev) 3294{ 3295 struct cnic_local *cp = dev->cnic_priv; 3296 3297 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ, 3298 GFP_KERNEL); 3299 if (!cp->csk_tbl) 3300 return -ENOMEM; 3301 3302 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, 3303 CNIC_LOCAL_PORT_MIN)) { 3304 cnic_cm_free_mem(dev); 3305 return -ENOMEM; 3306 } 3307 return 0; 3308} 3309 3310static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) 3311{ 3312 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 3313 /* Unsolicited RESET_COMP or RESET_RECEIVED */ 3314 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED; 3315 csk->state = opcode; 3316 } 3317 3318 /* 1. If event opcode matches the expected event in csk->state 3319 * 2. If the expected event is CLOSE_COMP, we accept any event 3320 * 3. If the expected event is 0, meaning the connection was never 3321 * never established, we accept the opcode from cm_abort. 3322 */ 3323 if (opcode == csk->state || csk->state == 0 || 3324 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) { 3325 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) { 3326 if (csk->state == 0) 3327 csk->state = opcode; 3328 return 1; 3329 } 3330 } 3331 return 0; 3332} 3333 3334static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode) 3335{ 3336 struct cnic_dev *dev = csk->dev; 3337 struct cnic_local *cp = dev->cnic_priv; 3338 3339 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) { 3340 cnic_cm_upcall(cp, csk, opcode); 3341 return; 3342 } 3343 3344 clear_bit(SK_F_CONNECT_START, &csk->flags); 3345 cnic_close_conn(csk); 3346 csk->state = opcode; 3347 cnic_cm_upcall(cp, csk, opcode); 3348} 3349 3350static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev) 3351{ 3352} 3353 3354static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) 3355{ 3356 u32 seed; 3357 3358 get_random_bytes(&seed, 4); 3359 cnic_ctx_wr(dev, 45, 0, seed); 3360 return 0; 3361} 3362 3363static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) 3364{ 3365 struct cnic_dev *dev = csk->dev; 3366 struct cnic_local *cp = dev->cnic_priv; 3367 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid]; 3368 union l5cm_specific_data l5_data; 3369 u32 cmd = 0; 3370 int close_complete = 0; 3371 3372 switch (opcode) { 3373 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3374 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3375 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3376 if (cnic_ready_to_close(csk, opcode)) { 3377 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 3378 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; 3379 else 3380 close_complete = 1; 3381 } 3382 break; 3383 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 3384 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; 3385 break; 3386 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: 3387 close_complete = 1; 3388 break; 3389 } 3390 if (cmd) { 3391 memset(&l5_data, 0, sizeof(l5_data)); 3392 3393 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE, 3394 &l5_data); 3395 } else if (close_complete) { 3396 ctx->timestamp = jiffies; 3397 cnic_close_conn(csk); 3398 cnic_cm_upcall(cp, csk, csk->state); 3399 } 3400} 3401 3402static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) 3403{ 3404 struct cnic_local *cp = dev->cnic_priv; 3405 int i; 3406 3407 if (!cp->ctx_tbl) 3408 return; 3409 3410 if (!netif_running(dev->netdev)) 3411 return; 3412 3413 for (i = 0; i < cp->max_cid_space; i++) { 3414 struct cnic_context *ctx = &cp->ctx_tbl[i]; 3415 3416 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 3417 msleep(10); 3418 3419 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 3420 netdev_warn(dev->netdev, "CID %x not deleted\n", 3421 ctx->cid); 3422 } 3423 3424 cancel_delayed_work(&cp->delete_task); 3425 flush_workqueue(cnic_wq); 3426 3427 if (atomic_read(&cp->iscsi_conn) != 0) 3428 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n", 3429 atomic_read(&cp->iscsi_conn)); 3430} 3431 3432static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) 3433{ 3434 struct cnic_local *cp = dev->cnic_priv; 3435 u32 pfid = cp->pfid; 3436 u32 port = CNIC_PORT(cp); 3437 3438 cnic_init_bnx2x_mac(dev); 3439 cnic_bnx2x_set_tcp_timestamp(dev, 1); 3440 3441 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 3442 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); 3443 3444 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3445 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1); 3446 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3447 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port), 3448 DEF_MAX_DA_COUNT); 3449 3450 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3451 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL); 3452 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3453 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS); 3454 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3455 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2); 3456 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3457 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER); 3458 3459 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid), 3460 DEF_MAX_CWND); 3461 return 0; 3462} 3463 3464static void cnic_delete_task(struct work_struct *work) 3465{ 3466 struct cnic_local *cp; 3467 struct cnic_dev *dev; 3468 u32 i; 3469 int need_resched = 0; 3470 3471 cp = container_of(work, struct cnic_local, delete_task.work); 3472 dev = cp->dev; 3473 3474 for (i = 0; i < cp->max_cid_space; i++) { 3475 struct cnic_context *ctx = &cp->ctx_tbl[i]; 3476 3477 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) || 3478 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 3479 continue; 3480 3481 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { 3482 need_resched = 1; 3483 continue; 3484 } 3485 3486 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 3487 continue; 3488 3489 cnic_bnx2x_destroy_ramrod(dev, i); 3490 3491 cnic_free_bnx2x_conn_resc(dev, i); 3492 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) 3493 atomic_dec(&cp->iscsi_conn); 3494 3495 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 3496 } 3497 3498 if (need_resched) 3499 queue_delayed_work(cnic_wq, &cp->delete_task, 3500 msecs_to_jiffies(10)); 3501 3502} 3503 3504static int cnic_cm_open(struct cnic_dev *dev) 3505{ 3506 struct cnic_local *cp = dev->cnic_priv; 3507 int err; 3508 3509 err = cnic_cm_alloc_mem(dev); 3510 if (err) 3511 return err; 3512 3513 err = cp->start_cm(dev); 3514 3515 if (err) 3516 goto err_out; 3517 3518 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task); 3519 3520 dev->cm_create = cnic_cm_create; 3521 dev->cm_destroy = cnic_cm_destroy; 3522 dev->cm_connect = cnic_cm_connect; 3523 dev->cm_abort = cnic_cm_abort; 3524 dev->cm_close = cnic_cm_close; 3525 dev->cm_select_dev = cnic_cm_select_dev; 3526 3527 cp->ulp_handle[CNIC_ULP_L4] = dev; 3528 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops); 3529 return 0; 3530 3531err_out: 3532 cnic_cm_free_mem(dev); 3533 return err; 3534} 3535 3536static int cnic_cm_shutdown(struct cnic_dev *dev) 3537{ 3538 struct cnic_local *cp = dev->cnic_priv; 3539 int i; 3540 3541 cp->stop_cm(dev); 3542 3543 if (!cp->csk_tbl) 3544 return 0; 3545 3546 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) { 3547 struct cnic_sock *csk = &cp->csk_tbl[i]; 3548 3549 clear_bit(SK_F_INUSE, &csk->flags); 3550 cnic_cm_cleanup(csk); 3551 } 3552 cnic_cm_free_mem(dev); 3553 3554 return 0; 3555} 3556 3557static void cnic_init_context(struct cnic_dev *dev, u32 cid) 3558{ 3559 u32 cid_addr; 3560 int i; 3561 3562 cid_addr = GET_CID_ADDR(cid); 3563 3564 for (i = 0; i < CTX_SIZE; i += 4) 3565 cnic_ctx_wr(dev, cid_addr, i, 0); 3566} 3567 3568static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) 3569{ 3570 struct cnic_local *cp = dev->cnic_priv; 3571 int ret = 0, i; 3572 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0; 3573 3574 if (CHIP_NUM(cp) != CHIP_NUM_5709) 3575 return 0; 3576 3577 for (i = 0; i < cp->ctx_blks; i++) { 3578 int j; 3579 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; 3580 u32 val; 3581 3582 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE); 3583 3584 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, 3585 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); 3586 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1, 3587 (u64) cp->ctx_arr[i].mapping >> 32); 3588 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx | 3589 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3590 for (j = 0; j < 10; j++) { 3591 3592 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL); 3593 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) 3594 break; 3595 udelay(5); 3596 } 3597 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { 3598 ret = -EBUSY; 3599 break; 3600 } 3601 } 3602 return ret; 3603} 3604 3605static void cnic_free_irq(struct cnic_dev *dev) 3606{ 3607 struct cnic_local *cp = dev->cnic_priv; 3608 struct cnic_eth_dev *ethdev = cp->ethdev; 3609 3610 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3611 cp->disable_int_sync(dev); 3612 tasklet_kill(&cp->cnic_irq_task); 3613 free_irq(ethdev->irq_arr[0].vector, dev); 3614 } 3615} 3616 3617static int cnic_request_irq(struct cnic_dev *dev) 3618{ 3619 struct cnic_local *cp = dev->cnic_priv; 3620 struct cnic_eth_dev *ethdev = cp->ethdev; 3621 int err; 3622 3623 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev); 3624 if (err) 3625 tasklet_disable(&cp->cnic_irq_task); 3626 3627 return err; 3628} 3629 3630static int cnic_init_bnx2_irq(struct cnic_dev *dev) 3631{ 3632 struct cnic_local *cp = dev->cnic_priv; 3633 struct cnic_eth_dev *ethdev = cp->ethdev; 3634 3635 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3636 int err, i = 0; 3637 int sblk_num = cp->status_blk_num; 3638 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) + 3639 BNX2_HC_SB_CONFIG_1; 3640 3641 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT); 3642 3643 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8); 3644 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220); 3645 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); 3646 3647 cp->last_status_idx = cp->status_blk.bnx2->status_idx; 3648 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, 3649 (unsigned long) dev); 3650 err = cnic_request_irq(dev); 3651 if (err) 3652 return err; 3653 3654 while (cp->status_blk.bnx2->status_completion_producer_index && 3655 i < 10) { 3656 CNIC_WR(dev, BNX2_HC_COALESCE_NOW, 3657 1 << (11 + sblk_num)); 3658 udelay(10); 3659 i++; 3660 barrier(); 3661 } 3662 if (cp->status_blk.bnx2->status_completion_producer_index) { 3663 cnic_free_irq(dev); 3664 goto failed; 3665 } 3666 3667 } else { 3668 struct status_block *sblk = cp->status_blk.gen; 3669 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND); 3670 int i = 0; 3671 3672 while (sblk->status_completion_producer_index && i < 10) { 3673 CNIC_WR(dev, BNX2_HC_COMMAND, 3674 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 3675 udelay(10); 3676 i++; 3677 barrier(); 3678 } 3679 if (sblk->status_completion_producer_index) 3680 goto failed; 3681 3682 } 3683 return 0; 3684 3685failed: 3686 netdev_err(dev->netdev, "KCQ index not resetting to 0\n"); 3687 return -EBUSY; 3688} 3689 3690static void cnic_enable_bnx2_int(struct cnic_dev *dev) 3691{ 3692 struct cnic_local *cp = dev->cnic_priv; 3693 struct cnic_eth_dev *ethdev = cp->ethdev; 3694 3695 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 3696 return; 3697 3698 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 3699 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 3700} 3701 3702static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) 3703{ 3704 struct cnic_local *cp = dev->cnic_priv; 3705 struct cnic_eth_dev *ethdev = cp->ethdev; 3706 3707 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 3708 return; 3709 3710 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 3711 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 3712 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD); 3713 synchronize_irq(ethdev->irq_arr[0].vector); 3714} 3715 3716static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) 3717{ 3718 struct cnic_local *cp = dev->cnic_priv; 3719 struct cnic_eth_dev *ethdev = cp->ethdev; 3720 struct cnic_uio_dev *udev = cp->udev; 3721 u32 cid_addr, tx_cid, sb_id; 3722 u32 val, offset0, offset1, offset2, offset3; 3723 int i; 3724 struct tx_bd *txbd; 3725 dma_addr_t buf_map, ring_map = udev->l2_ring_map; 3726 struct status_block *s_blk = cp->status_blk.gen; 3727 3728 sb_id = cp->status_blk_num; 3729 tx_cid = 20; 3730 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; 3731 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3732 struct status_block_msix *sblk = cp->status_blk.bnx2; 3733 3734 tx_cid = TX_TSS_CID + sb_id - 1; 3735 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | 3736 (TX_TSS_CID << 7)); 3737 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; 3738 } 3739 cp->tx_cons = *cp->tx_cons_ptr; 3740 3741 cid_addr = GET_CID_ADDR(tx_cid); 3742 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 3743 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40; 3744 3745 for (i = 0; i < PHY_CTX_SIZE; i += 4) 3746 cnic_ctx_wr(dev, cid_addr2, i, 0); 3747 3748 offset0 = BNX2_L2CTX_TYPE_XI; 3749 offset1 = BNX2_L2CTX_CMD_TYPE_XI; 3750 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; 3751 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; 3752 } else { 3753 cnic_init_context(dev, tx_cid); 3754 cnic_init_context(dev, tx_cid + 1); 3755 3756 offset0 = BNX2_L2CTX_TYPE; 3757 offset1 = BNX2_L2CTX_CMD_TYPE; 3758 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; 3759 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; 3760 } 3761 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; 3762 cnic_ctx_wr(dev, cid_addr, offset0, val); 3763 3764 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 3765 cnic_ctx_wr(dev, cid_addr, offset1, val); 3766 3767 txbd = (struct tx_bd *) udev->l2_ring; 3768 3769 buf_map = udev->l2_buf_map; 3770 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { 3771 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; 3772 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 3773 } 3774 val = (u64) ring_map >> 32; 3775 cnic_ctx_wr(dev, cid_addr, offset2, val); 3776 txbd->tx_bd_haddr_hi = val; 3777 3778 val = (u64) ring_map & 0xffffffff; 3779 cnic_ctx_wr(dev, cid_addr, offset3, val); 3780 txbd->tx_bd_haddr_lo = val; 3781} 3782 3783static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) 3784{ 3785 struct cnic_local *cp = dev->cnic_priv; 3786 struct cnic_eth_dev *ethdev = cp->ethdev; 3787 struct cnic_uio_dev *udev = cp->udev; 3788 u32 cid_addr, sb_id, val, coal_reg, coal_val; 3789 int i; 3790 struct rx_bd *rxbd; 3791 struct status_block *s_blk = cp->status_blk.gen; 3792 dma_addr_t ring_map = udev->l2_ring_map; 3793 3794 sb_id = cp->status_blk_num; 3795 cnic_init_context(dev, 2); 3796 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2; 3797 coal_reg = BNX2_HC_COMMAND; 3798 coal_val = CNIC_RD(dev, coal_reg); 3799 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3800 struct status_block_msix *sblk = cp->status_blk.bnx2; 3801 3802 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; 3803 coal_reg = BNX2_HC_COALESCE_NOW; 3804 coal_val = 1 << (11 + sb_id); 3805 } 3806 i = 0; 3807 while (!(*cp->rx_cons_ptr != 0) && i < 10) { 3808 CNIC_WR(dev, coal_reg, coal_val); 3809 udelay(10); 3810 i++; 3811 barrier(); 3812 } 3813 cp->rx_cons = *cp->rx_cons_ptr; 3814 3815 cid_addr = GET_CID_ADDR(2); 3816 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 3817 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 3818 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); 3819 3820 if (sb_id == 0) 3821 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT; 3822 else 3823 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); 3824 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 3825 3826 rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE); 3827 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { 3828 dma_addr_t buf_map; 3829 int n = (i % cp->l2_rx_ring_size) + 1; 3830 3831 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); 3832 rxbd->rx_bd_len = cp->l2_single_buf_size; 3833 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; 3834 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; 3835 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 3836 } 3837 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; 3838 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); 3839 rxbd->rx_bd_haddr_hi = val; 3840 3841 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; 3842 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); 3843 rxbd->rx_bd_haddr_lo = val; 3844 3845 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD); 3846 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2)); 3847} 3848 3849static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) 3850{ 3851 struct kwqe *wqes[1], l2kwqe; 3852 3853 memset(&l2kwqe, 0, sizeof(l2kwqe)); 3854 wqes[0] = &l2kwqe; 3855 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) | 3856 (L2_KWQE_OPCODE_VALUE_FLUSH << 3857 KWQE_OPCODE_SHIFT) | 2; 3858 dev->submit_kwqes(dev, wqes, 1); 3859} 3860 3861static void cnic_set_bnx2_mac(struct cnic_dev *dev) 3862{ 3863 struct cnic_local *cp = dev->cnic_priv; 3864 u32 val; 3865 3866 val = cp->func << 2; 3867 3868 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val); 3869 3870 val = cnic_reg_rd_ind(dev, cp->shmem_base + 3871 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER); 3872 dev->mac_addr[0] = (u8) (val >> 8); 3873 dev->mac_addr[1] = (u8) val; 3874 3875 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val); 3876 3877 val = cnic_reg_rd_ind(dev, cp->shmem_base + 3878 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER); 3879 dev->mac_addr[2] = (u8) (val >> 24); 3880 dev->mac_addr[3] = (u8) (val >> 16); 3881 dev->mac_addr[4] = (u8) (val >> 8); 3882 dev->mac_addr[5] = (u8) val; 3883 3884 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val); 3885 3886 val = 4 | BNX2_RPM_SORT_USER2_BC_EN; 3887 if (CHIP_NUM(cp) != CHIP_NUM_5709) 3888 val |= BNX2_RPM_SORT_USER2_PROM_VLAN; 3889 3890 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0); 3891 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val); 3892 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA); 3893} 3894 3895static int cnic_start_bnx2_hw(struct cnic_dev *dev) 3896{ 3897 struct cnic_local *cp = dev->cnic_priv; 3898 struct cnic_eth_dev *ethdev = cp->ethdev; 3899 struct status_block *sblk = cp->status_blk.gen; 3900 u32 val, kcq_cid_addr, kwq_cid_addr; 3901 int err; 3902 3903 cnic_set_bnx2_mac(dev); 3904 3905 val = CNIC_RD(dev, BNX2_MQ_CONFIG); 3906 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3907 if (BCM_PAGE_BITS > 12) 3908 val |= (12 - 8) << 4; 3909 else 3910 val |= (BCM_PAGE_BITS - 8) << 4; 3911 3912 CNIC_WR(dev, BNX2_MQ_CONFIG, val); 3913 3914 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8); 3915 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220); 3916 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220); 3917 3918 err = cnic_setup_5709_context(dev, 1); 3919 if (err) 3920 return err; 3921 3922 cnic_init_context(dev, KWQ_CID); 3923 cnic_init_context(dev, KCQ_CID); 3924 3925 kwq_cid_addr = GET_CID_ADDR(KWQ_CID); 3926 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; 3927 3928 cp->max_kwq_idx = MAX_KWQ_IDX; 3929 cp->kwq_prod_idx = 0; 3930 cp->kwq_con_idx = 0; 3931 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 3932 3933 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708) 3934 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; 3935 else 3936 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; 3937 3938 /* Initialize the kernel work queue context. */ 3939 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 3940 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 3941 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); 3942 3943 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; 3944 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 3945 3946 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; 3947 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 3948 3949 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); 3950 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 3951 3952 val = (u32) cp->kwq_info.pgtbl_map; 3953 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 3954 3955 kcq_cid_addr = GET_CID_ADDR(KCQ_CID); 3956 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; 3957 3958 cp->kcq1.sw_prod_idx = 0; 3959 cp->kcq1.hw_prod_idx_ptr = 3960 (u16 *) &sblk->status_completion_producer_index; 3961 3962 cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx; 3963 3964 /* Initialize the kernel complete queue context. */ 3965 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 3966 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 3967 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); 3968 3969 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; 3970 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 3971 3972 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; 3973 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 3974 3975 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); 3976 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 3977 3978 val = (u32) cp->kcq1.dma.pgtbl_map; 3979 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 3980 3981 cp->int_num = 0; 3982 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3983 struct status_block_msix *msblk = cp->status_blk.bnx2; 3984 u32 sb_id = cp->status_blk_num; 3985 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); 3986 3987 cp->kcq1.hw_prod_idx_ptr = 3988 (u16 *) &msblk->status_completion_producer_index; 3989 cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx; 3990 cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index; 3991 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 3992 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 3993 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 3994 } 3995 3996 /* Enable Commnad Scheduler notification when we write to the 3997 * host producer index of the kernel contexts. */ 3998 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2); 3999 4000 /* Enable Command Scheduler notification when we write to either 4001 * the Send Queue or Receive Queue producer indexes of the kernel 4002 * bypass contexts. */ 4003 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7); 4004 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7); 4005 4006 /* Notify COM when the driver post an application buffer. */ 4007 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000); 4008 4009 /* Set the CP and COM doorbells. These two processors polls the 4010 * doorbell for a non zero value before running. This must be done 4011 * after setting up the kernel queue contexts. */ 4012 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1); 4013 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1); 4014 4015 cnic_init_bnx2_tx_ring(dev); 4016 cnic_init_bnx2_rx_ring(dev); 4017 4018 err = cnic_init_bnx2_irq(dev); 4019 if (err) { 4020 netdev_err(dev->netdev, "cnic_init_irq failed\n"); 4021 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 4022 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 4023 return err; 4024 } 4025 4026 return 0; 4027} 4028 4029static void cnic_setup_bnx2x_context(struct cnic_dev *dev) 4030{ 4031 struct cnic_local *cp = dev->cnic_priv; 4032 struct cnic_eth_dev *ethdev = cp->ethdev; 4033 u32 start_offset = ethdev->ctx_tbl_offset; 4034 int i; 4035 4036 for (i = 0; i < cp->ctx_blks; i++) { 4037 struct cnic_ctx *ctx = &cp->ctx_arr[i]; 4038 dma_addr_t map = ctx->mapping; 4039 4040 if (cp->ctx_align) { 4041 unsigned long mask = cp->ctx_align - 1; 4042 4043 map = (map + mask) & ~mask; 4044 } 4045 4046 cnic_ctx_tbl_wr(dev, start_offset + i, map); 4047 } 4048} 4049 4050static int cnic_init_bnx2x_irq(struct cnic_dev *dev) 4051{ 4052 struct cnic_local *cp = dev->cnic_priv; 4053 struct cnic_eth_dev *ethdev = cp->ethdev; 4054 int err = 0; 4055 4056 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, 4057 (unsigned long) dev); 4058 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 4059 err = cnic_request_irq(dev); 4060 4061 return err; 4062} 4063 4064static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev, 4065 u16 sb_id, u8 sb_index, 4066 u8 disable) 4067{ 4068 4069 u32 addr = BAR_CSTRORM_INTMEM + 4070 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + 4071 offsetof(struct hc_status_block_data_e1x, index_data) + 4072 sizeof(struct hc_index_data)*sb_index + 4073 offsetof(struct hc_index_data, flags); 4074 u16 flags = CNIC_RD16(dev, addr); 4075 /* clear and set */ 4076 flags &= ~HC_INDEX_DATA_HC_ENABLED; 4077 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) & 4078 HC_INDEX_DATA_HC_ENABLED); 4079 CNIC_WR16(dev, addr, flags); 4080} 4081 4082static void cnic_enable_bnx2x_int(struct cnic_dev *dev) 4083{ 4084 struct cnic_local *cp = dev->cnic_priv; 4085 u8 sb_id = cp->status_blk_num; 4086 4087 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4088 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + 4089 offsetof(struct hc_status_block_data_e1x, index_data) + 4090 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS + 4091 offsetof(struct hc_index_data, timeout), 64 / 12); 4092 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0); 4093} 4094 4095static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) 4096{ 4097} 4098 4099static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, 4100 struct client_init_ramrod_data *data) 4101{ 4102 struct cnic_local *cp = dev->cnic_priv; 4103 struct cnic_uio_dev *udev = cp->udev; 4104 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; 4105 dma_addr_t buf_map, ring_map = udev->l2_ring_map; 4106 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4107 int port = CNIC_PORT(cp); 4108 int i; 4109 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4110 u32 val; 4111 4112 memset(txbd, 0, BCM_PAGE_SIZE); 4113 4114 buf_map = udev->l2_buf_map; 4115 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { 4116 struct eth_tx_start_bd *start_bd = &txbd->start_bd; 4117 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); 4118 4119 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 4120 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 4121 reg_bd->addr_hi = start_bd->addr_hi; 4122 reg_bd->addr_lo = start_bd->addr_lo + 0x10; 4123 start_bd->nbytes = cpu_to_le16(0x10); 4124 start_bd->nbd = cpu_to_le16(3); 4125 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 4126 start_bd->general_data = (UNICAST_ADDRESS << 4127 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); 4128 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 4129 4130 } 4131 4132 val = (u64) ring_map >> 32; 4133 txbd->next_bd.addr_hi = cpu_to_le32(val); 4134 4135 data->tx.tx_bd_page_base.hi = cpu_to_le32(val); 4136 4137 val = (u64) ring_map & 0xffffffff; 4138 txbd->next_bd.addr_lo = cpu_to_le32(val); 4139 4140 data->tx.tx_bd_page_base.lo = cpu_to_le32(val); 4141 4142 /* Other ramrod params */ 4143 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS; 4144 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID; 4145 4146 /* reset xstorm per client statistics */ 4147 if (cli < MAX_STAT_COUNTER_ID) { 4148 val = BAR_XSTRORM_INTMEM + 4149 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 4150 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) 4151 CNIC_WR(dev, val + i * 4, 0); 4152 } 4153 4154 cp->tx_cons_ptr = 4155 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS]; 4156} 4157 4158static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, 4159 struct client_init_ramrod_data *data) 4160{ 4161 struct cnic_local *cp = dev->cnic_priv; 4162 struct cnic_uio_dev *udev = cp->udev; 4163 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + 4164 BCM_PAGE_SIZE); 4165 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) 4166 (udev->l2_ring + (2 * BCM_PAGE_SIZE)); 4167 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4168 int i; 4169 int port = CNIC_PORT(cp); 4170 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4171 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); 4172 u32 val; 4173 dma_addr_t ring_map = udev->l2_ring_map; 4174 4175 /* General data */ 4176 data->general.client_id = cli; 4177 data->general.statistics_en_flg = 1; 4178 data->general.statistics_counter_id = cli; 4179 data->general.activate_flg = 1; 4180 data->general.sp_client_id = cli; 4181 4182 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { 4183 dma_addr_t buf_map; 4184 int n = (i % cp->l2_rx_ring_size) + 1; 4185 4186 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); 4187 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 4188 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 4189 } 4190 4191 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; 4192 rxbd->addr_hi = cpu_to_le32(val); 4193 data->rx.bd_page_base.hi = cpu_to_le32(val); 4194 4195 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; 4196 rxbd->addr_lo = cpu_to_le32(val); 4197 data->rx.bd_page_base.lo = cpu_to_le32(val); 4198 4199 rxcqe += BNX2X_MAX_RCQ_DESC_CNT; 4200 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32; 4201 rxcqe->addr_hi = cpu_to_le32(val); 4202 data->rx.cqe_page_base.hi = cpu_to_le32(val); 4203 4204 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff; 4205 rxcqe->addr_lo = cpu_to_le32(val); 4206 data->rx.cqe_page_base.lo = cpu_to_le32(val); 4207 4208 /* Other ramrod params */ 4209 data->rx.client_qzone_id = cl_qzone_id; 4210 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS; 4211 data->rx.status_block_id = BNX2X_DEF_SB_ID; 4212 4213 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT; 4214 data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size); 4215 4216 data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); 4217 data->rx.outer_vlan_removal_enable_flg = 1; 4218 4219 /* reset tstorm and ustorm per client statistics */ 4220 if (cli < MAX_STAT_COUNTER_ID) { 4221 val = BAR_TSTRORM_INTMEM + 4222 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 4223 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) 4224 CNIC_WR(dev, val + i * 4, 0); 4225 4226 val = BAR_USTRORM_INTMEM + 4227 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 4228 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++) 4229 CNIC_WR(dev, val + i * 4, 0); 4230 } 4231 4232 cp->rx_cons_ptr = 4233 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS]; 4234} 4235 4236static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) 4237{ 4238 struct cnic_local *cp = dev->cnic_priv; 4239 u32 base, base2, addr, val; 4240 int port = CNIC_PORT(cp); 4241 4242 dev->max_iscsi_conn = 0; 4243 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR); 4244 if (base == 0) 4245 return; 4246 4247 base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 : 4248 MISC_REG_GENERIC_CR_0)); 4249 addr = BNX2X_SHMEM_ADDR(base, 4250 dev_info.port_hw_config[port].iscsi_mac_upper); 4251 4252 val = CNIC_RD(dev, addr); 4253 4254 dev->mac_addr[0] = (u8) (val >> 8); 4255 dev->mac_addr[1] = (u8) val; 4256 4257 addr = BNX2X_SHMEM_ADDR(base, 4258 dev_info.port_hw_config[port].iscsi_mac_lower); 4259 4260 val = CNIC_RD(dev, addr); 4261 4262 dev->mac_addr[2] = (u8) (val >> 24); 4263 dev->mac_addr[3] = (u8) (val >> 16); 4264 dev->mac_addr[4] = (u8) (val >> 8); 4265 dev->mac_addr[5] = (u8) val; 4266 4267 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]); 4268 val = CNIC_RD(dev, addr); 4269 4270 if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) { 4271 u16 val16; 4272 4273 addr = BNX2X_SHMEM_ADDR(base, 4274 drv_lic_key[port].max_iscsi_init_conn); 4275 val16 = CNIC_RD16(dev, addr); 4276 4277 if (val16) 4278 val16 ^= 0x1e1e; 4279 dev->max_iscsi_conn = val16; 4280 } 4281 if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) { 4282 int func = CNIC_FUNC(cp); 4283 u32 mf_cfg_addr; 4284 4285 if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr)) 4286 mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2, 4287 mf_cfg_addr)); 4288 else 4289 mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET; 4290 4291 addr = mf_cfg_addr + 4292 offsetof(struct mf_cfg, func_mf_config[func].e1hov_tag); 4293 4294 val = CNIC_RD(dev, addr); 4295 val &= FUNC_MF_CFG_E1HOV_TAG_MASK; 4296 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 4297 addr = mf_cfg_addr + 4298 offsetof(struct mf_cfg, 4299 func_mf_config[func].config); 4300 val = CNIC_RD(dev, addr); 4301 val &= FUNC_MF_CFG_PROTOCOL_MASK; 4302 if (val != FUNC_MF_CFG_PROTOCOL_ISCSI) 4303 dev->max_iscsi_conn = 0; 4304 } 4305 } 4306} 4307 4308static int cnic_start_bnx2x_hw(struct cnic_dev *dev) 4309{ 4310 struct cnic_local *cp = dev->cnic_priv; 4311 struct cnic_eth_dev *ethdev = cp->ethdev; 4312 int func = CNIC_FUNC(cp), ret, i; 4313 u32 pfid; 4314 4315 if (BNX2X_CHIP_IS_E2(cp->chip_id)) { 4316 u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR); 4317 4318 if (!(val & 1)) 4319 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN); 4320 else 4321 val = (val >> 1) & 1; 4322 4323 if (val) 4324 cp->pfid = func >> 1; 4325 else 4326 cp->pfid = func & 0x6; 4327 } else { 4328 cp->pfid = func; 4329 } 4330 pfid = cp->pfid; 4331 4332 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, 4333 cp->iscsi_start_cid); 4334 4335 if (ret) 4336 return -ENOMEM; 4337 4338 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2; 4339 4340 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + 4341 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); 4342 cp->kcq1.sw_prod_idx = 0; 4343 4344 if (BNX2X_CHIP_IS_E2(cp->chip_id)) { 4345 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; 4346 4347 cp->kcq1.hw_prod_idx_ptr = 4348 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; 4349 cp->kcq1.status_idx_ptr = 4350 &sb->sb.running_index[SM_RX_ID]; 4351 } else { 4352 struct host_hc_status_block_e1x *sb = cp->status_blk.gen; 4353 4354 cp->kcq1.hw_prod_idx_ptr = 4355 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; 4356 cp->kcq1.status_idx_ptr = 4357 &sb->sb.running_index[SM_RX_ID]; 4358 } 4359 4360 cnic_get_bnx2x_iscsi_info(dev); 4361 4362 /* Only 1 EQ */ 4363 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); 4364 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4365 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0); 4366 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4367 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0), 4368 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff); 4369 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4370 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4, 4371 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32); 4372 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4373 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0), 4374 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff); 4375 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4376 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4, 4377 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32); 4378 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4379 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1); 4380 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 4381 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num); 4382 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4383 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0), 4384 HC_INDEX_ISCSI_EQ_CONS); 4385 4386 for (i = 0; i < cp->conn_buf_info.num_pages; i++) { 4387 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4388 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i), 4389 cp->conn_buf_info.pgtbl[2 * i]); 4390 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4391 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4, 4392 cp->conn_buf_info.pgtbl[(2 * i) + 1]); 4393 } 4394 4395 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4396 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid), 4397 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); 4398 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4399 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4, 4400 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); 4401 4402 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4403 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF); 4404 4405 cnic_setup_bnx2x_context(dev); 4406 4407 ret = cnic_init_bnx2x_irq(dev); 4408 if (ret) 4409 return ret; 4410 4411 return 0; 4412} 4413 4414static void cnic_init_rings(struct cnic_dev *dev) 4415{ 4416 struct cnic_local *cp = dev->cnic_priv; 4417 struct cnic_uio_dev *udev = cp->udev; 4418 4419 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 4420 return; 4421 4422 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 4423 cnic_init_bnx2_tx_ring(dev); 4424 cnic_init_bnx2_rx_ring(dev); 4425 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 4426 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 4427 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4428 u32 cl_qzone_id, type; 4429 struct client_init_ramrod_data *data; 4430 union l5cm_specific_data l5_data; 4431 struct ustorm_eth_rx_producers rx_prods = {0}; 4432 u32 off, i; 4433 4434 rx_prods.bd_prod = 0; 4435 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; 4436 barrier(); 4437 4438 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); 4439 4440 off = BAR_USTRORM_INTMEM + 4441 (BNX2X_CHIP_IS_E2(cp->chip_id) ? 4442 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : 4443 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli)); 4444 4445 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) 4446 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); 4447 4448 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 4449 4450 data = udev->l2_buf; 4451 4452 memset(data, 0, sizeof(*data)); 4453 4454 cnic_init_bnx2x_tx_ring(dev, data); 4455 cnic_init_bnx2x_rx_ring(dev, data); 4456 4457 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff; 4458 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32; 4459 4460 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) 4461 & SPE_HDR_CONN_TYPE; 4462 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & 4463 SPE_HDR_FUNCTION_ID); 4464 4465 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 4466 4467 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, 4468 BNX2X_ISCSI_L2_CID, type, &l5_data); 4469 4470 i = 0; 4471 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 4472 ++i < 10) 4473 msleep(1); 4474 4475 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 4476 netdev_err(dev->netdev, 4477 "iSCSI CLIENT_SETUP did not complete\n"); 4478 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 4479 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1); 4480 } 4481} 4482 4483static void cnic_shutdown_rings(struct cnic_dev *dev) 4484{ 4485 struct cnic_local *cp = dev->cnic_priv; 4486 4487 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 4488 return; 4489 4490 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 4491 cnic_shutdown_bnx2_rx_ring(dev); 4492 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 4493 struct cnic_local *cp = dev->cnic_priv; 4494 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4495 union l5cm_specific_data l5_data; 4496 int i; 4497 u32 type; 4498 4499 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0); 4500 4501 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 4502 4503 l5_data.phy_address.lo = cli; 4504 l5_data.phy_address.hi = 0; 4505 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT, 4506 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); 4507 i = 0; 4508 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 4509 ++i < 10) 4510 msleep(1); 4511 4512 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 4513 netdev_err(dev->netdev, 4514 "iSCSI CLIENT_HALT did not complete\n"); 4515 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 4516 4517 memset(&l5_data, 0, sizeof(l5_data)); 4518 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) 4519 & SPE_HDR_CONN_TYPE; 4520 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & 4521 SPE_HDR_FUNCTION_ID); 4522 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, 4523 BNX2X_ISCSI_L2_CID, type, &l5_data); 4524 msleep(10); 4525 } 4526 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 4527} 4528 4529static int cnic_register_netdev(struct cnic_dev *dev) 4530{ 4531 struct cnic_local *cp = dev->cnic_priv; 4532 struct cnic_eth_dev *ethdev = cp->ethdev; 4533 int err; 4534 4535 if (!ethdev) 4536 return -ENODEV; 4537 4538 if (ethdev->drv_state & CNIC_DRV_STATE_REGD) 4539 return 0; 4540 4541 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); 4542 if (err) 4543 netdev_err(dev->netdev, "register_cnic failed\n"); 4544 4545 return err; 4546} 4547 4548static void cnic_unregister_netdev(struct cnic_dev *dev) 4549{ 4550 struct cnic_local *cp = dev->cnic_priv; 4551 struct cnic_eth_dev *ethdev = cp->ethdev; 4552 4553 if (!ethdev) 4554 return; 4555 4556 ethdev->drv_unregister_cnic(dev->netdev); 4557} 4558 4559static int cnic_start_hw(struct cnic_dev *dev) 4560{ 4561 struct cnic_local *cp = dev->cnic_priv; 4562 struct cnic_eth_dev *ethdev = cp->ethdev; 4563 int err; 4564 4565 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 4566 return -EALREADY; 4567 4568 dev->regview = ethdev->io_base; 4569 pci_dev_get(dev->pcidev); 4570 cp->func = PCI_FUNC(dev->pcidev->devfn); 4571 cp->status_blk.gen = ethdev->irq_arr[0].status_blk; 4572 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; 4573 4574 err = cp->alloc_resc(dev); 4575 if (err) { 4576 netdev_err(dev->netdev, "allocate resource failure\n"); 4577 goto err1; 4578 } 4579 4580 err = cp->start_hw(dev); 4581 if (err) 4582 goto err1; 4583 4584 err = cnic_cm_open(dev); 4585 if (err) 4586 goto err1; 4587 4588 set_bit(CNIC_F_CNIC_UP, &dev->flags); 4589 4590 cp->enable_int(dev); 4591 4592 return 0; 4593 4594err1: 4595 cp->free_resc(dev); 4596 pci_dev_put(dev->pcidev); 4597 return err; 4598} 4599 4600static void cnic_stop_bnx2_hw(struct cnic_dev *dev) 4601{ 4602 cnic_disable_bnx2_int_sync(dev); 4603 4604 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 4605 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 4606 4607 cnic_init_context(dev, KWQ_CID); 4608 cnic_init_context(dev, KCQ_CID); 4609 4610 cnic_setup_5709_context(dev, 0); 4611 cnic_free_irq(dev); 4612 4613 cnic_free_resc(dev); 4614} 4615 4616 4617static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) 4618{ 4619 struct cnic_local *cp = dev->cnic_priv; 4620 4621 cnic_free_irq(dev); 4622 *cp->kcq1.hw_prod_idx_ptr = 0; 4623 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4624 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0); 4625 CNIC_WR16(dev, cp->kcq1.io_addr, 0); 4626 cnic_free_resc(dev); 4627} 4628 4629static void cnic_stop_hw(struct cnic_dev *dev) 4630{ 4631 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 4632 struct cnic_local *cp = dev->cnic_priv; 4633 int i = 0; 4634 4635 /* Need to wait for the ring shutdown event to complete 4636 * before clearing the CNIC_UP flag. 4637 */ 4638 while (cp->udev->uio_dev != -1 && i < 15) { 4639 msleep(100); 4640 i++; 4641 } 4642 cnic_shutdown_rings(dev); 4643 clear_bit(CNIC_F_CNIC_UP, &dev->flags); 4644 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); 4645 synchronize_rcu(); 4646 cnic_cm_shutdown(dev); 4647 cp->stop_hw(dev); 4648 pci_dev_put(dev->pcidev); 4649 } 4650} 4651 4652static void cnic_free_dev(struct cnic_dev *dev) 4653{ 4654 int i = 0; 4655 4656 while ((atomic_read(&dev->ref_count) != 0) && i < 10) { 4657 msleep(100); 4658 i++; 4659 } 4660 if (atomic_read(&dev->ref_count) != 0) 4661 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n"); 4662 4663 netdev_info(dev->netdev, "Removed CNIC device\n"); 4664 dev_put(dev->netdev); 4665 kfree(dev); 4666} 4667 4668static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, 4669 struct pci_dev *pdev) 4670{ 4671 struct cnic_dev *cdev; 4672 struct cnic_local *cp; 4673 int alloc_size; 4674 4675 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local); 4676 4677 cdev = kzalloc(alloc_size , GFP_KERNEL); 4678 if (cdev == NULL) { 4679 netdev_err(dev, "allocate dev struct failure\n"); 4680 return NULL; 4681 } 4682 4683 cdev->netdev = dev; 4684 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev); 4685 cdev->register_device = cnic_register_device; 4686 cdev->unregister_device = cnic_unregister_device; 4687 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; 4688 4689 cp = cdev->cnic_priv; 4690 cp->dev = cdev; 4691 cp->l2_single_buf_size = 0x400; 4692 cp->l2_rx_ring_size = 3; 4693 4694 spin_lock_init(&cp->cnic_ulp_lock); 4695 4696 netdev_info(dev, "Added CNIC device\n"); 4697 4698 return cdev; 4699} 4700 4701static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) 4702{ 4703 struct pci_dev *pdev; 4704 struct cnic_dev *cdev; 4705 struct cnic_local *cp; 4706 struct cnic_eth_dev *ethdev = NULL; 4707 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; 4708 4709 probe = symbol_get(bnx2_cnic_probe); 4710 if (probe) { 4711 ethdev = (*probe)(dev); 4712 symbol_put(bnx2_cnic_probe); 4713 } 4714 if (!ethdev) 4715 return NULL; 4716 4717 pdev = ethdev->pdev; 4718 if (!pdev) 4719 return NULL; 4720 4721 dev_hold(dev); 4722 pci_dev_get(pdev); 4723 if (pdev->device == PCI_DEVICE_ID_NX2_5709 || 4724 pdev->device == PCI_DEVICE_ID_NX2_5709S) { 4725 u8 rev; 4726 4727 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 4728 if (rev < 0x10) { 4729 pci_dev_put(pdev); 4730 goto cnic_err; 4731 } 4732 } 4733 pci_dev_put(pdev); 4734 4735 cdev = cnic_alloc_dev(dev, pdev); 4736 if (cdev == NULL) 4737 goto cnic_err; 4738 4739 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags); 4740 cdev->submit_kwqes = cnic_submit_bnx2_kwqes; 4741 4742 cp = cdev->cnic_priv; 4743 cp->ethdev = ethdev; 4744 cdev->pcidev = pdev; 4745 cp->chip_id = ethdev->chip_id; 4746 4747 cp->cnic_ops = &cnic_bnx2_ops; 4748 cp->start_hw = cnic_start_bnx2_hw; 4749 cp->stop_hw = cnic_stop_bnx2_hw; 4750 cp->setup_pgtbl = cnic_setup_page_tbl; 4751 cp->alloc_resc = cnic_alloc_bnx2_resc; 4752 cp->free_resc = cnic_free_resc; 4753 cp->start_cm = cnic_cm_init_bnx2_hw; 4754 cp->stop_cm = cnic_cm_stop_bnx2_hw; 4755 cp->enable_int = cnic_enable_bnx2_int; 4756 cp->disable_int_sync = cnic_disable_bnx2_int_sync; 4757 cp->close_conn = cnic_close_bnx2_conn; 4758 cp->next_idx = cnic_bnx2_next_idx; 4759 cp->hw_idx = cnic_bnx2_hw_idx; 4760 return cdev; 4761 4762cnic_err: 4763 dev_put(dev); 4764 return NULL; 4765} 4766 4767static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) 4768{ 4769 struct pci_dev *pdev; 4770 struct cnic_dev *cdev; 4771 struct cnic_local *cp; 4772 struct cnic_eth_dev *ethdev = NULL; 4773 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; 4774 4775 probe = symbol_get(bnx2x_cnic_probe); 4776 if (probe) { 4777 ethdev = (*probe)(dev); 4778 symbol_put(bnx2x_cnic_probe); 4779 } 4780 if (!ethdev) 4781 return NULL; 4782 4783 pdev = ethdev->pdev; 4784 if (!pdev) 4785 return NULL; 4786 4787 dev_hold(dev); 4788 cdev = cnic_alloc_dev(dev, pdev); 4789 if (cdev == NULL) { 4790 dev_put(dev); 4791 return NULL; 4792 } 4793 4794 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags); 4795 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes; 4796 4797 cp = cdev->cnic_priv; 4798 cp->ethdev = ethdev; 4799 cdev->pcidev = pdev; 4800 cp->chip_id = ethdev->chip_id; 4801 4802 cp->cnic_ops = &cnic_bnx2x_ops; 4803 cp->start_hw = cnic_start_bnx2x_hw; 4804 cp->stop_hw = cnic_stop_bnx2x_hw; 4805 cp->setup_pgtbl = cnic_setup_page_tbl_le; 4806 cp->alloc_resc = cnic_alloc_bnx2x_resc; 4807 cp->free_resc = cnic_free_resc; 4808 cp->start_cm = cnic_cm_init_bnx2x_hw; 4809 cp->stop_cm = cnic_cm_stop_bnx2x_hw; 4810 cp->enable_int = cnic_enable_bnx2x_int; 4811 cp->disable_int_sync = cnic_disable_bnx2x_int_sync; 4812 if (BNX2X_CHIP_IS_E2(cp->chip_id)) 4813 cp->ack_int = cnic_ack_bnx2x_e2_msix; 4814 else 4815 cp->ack_int = cnic_ack_bnx2x_msix; 4816 cp->close_conn = cnic_close_bnx2x_conn; 4817 cp->next_idx = cnic_bnx2x_next_idx; 4818 cp->hw_idx = cnic_bnx2x_hw_idx; 4819 return cdev; 4820} 4821 4822static struct cnic_dev *is_cnic_dev(struct net_device *dev) 4823{ 4824 struct ethtool_drvinfo drvinfo; 4825 struct cnic_dev *cdev = NULL; 4826 4827 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) { 4828 memset(&drvinfo, 0, sizeof(drvinfo)); 4829 dev->ethtool_ops->get_drvinfo(dev, &drvinfo); 4830 4831 if (!strcmp(drvinfo.driver, "bnx2")) 4832 cdev = init_bnx2_cnic(dev); 4833 if (!strcmp(drvinfo.driver, "bnx2x")) 4834 cdev = init_bnx2x_cnic(dev); 4835 if (cdev) { 4836 write_lock(&cnic_dev_lock); 4837 list_add(&cdev->list, &cnic_dev_list); 4838 write_unlock(&cnic_dev_lock); 4839 } 4840 } 4841 return cdev; 4842} 4843 4844/** 4845 * netdev event handler 4846 */ 4847static int cnic_netdev_event(struct notifier_block *this, unsigned long event, 4848 void *ptr) 4849{ 4850 struct net_device *netdev = ptr; 4851 struct cnic_dev *dev; 4852 int if_type; 4853 int new_dev = 0; 4854 4855 dev = cnic_from_netdev(netdev); 4856 4857 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) { 4858 /* Check for the hot-plug device */ 4859 dev = is_cnic_dev(netdev); 4860 if (dev) { 4861 new_dev = 1; 4862 cnic_hold(dev); 4863 } 4864 } 4865 if (dev) { 4866 struct cnic_local *cp = dev->cnic_priv; 4867 4868 if (new_dev) 4869 cnic_ulp_init(dev); 4870 else if (event == NETDEV_UNREGISTER) 4871 cnic_ulp_exit(dev); 4872 4873 if (event == NETDEV_UP) { 4874 if (cnic_register_netdev(dev) != 0) { 4875 cnic_put(dev); 4876 goto done; 4877 } 4878 if (!cnic_start_hw(dev)) 4879 cnic_ulp_start(dev); 4880 } 4881 4882 rcu_read_lock(); 4883 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 4884 struct cnic_ulp_ops *ulp_ops; 4885 void *ctx; 4886 4887 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 4888 if (!ulp_ops || !ulp_ops->indicate_netevent) 4889 continue; 4890 4891 ctx = cp->ulp_handle[if_type]; 4892 4893 ulp_ops->indicate_netevent(ctx, event); 4894 } 4895 rcu_read_unlock(); 4896 4897 if (event == NETDEV_GOING_DOWN) { 4898 cnic_ulp_stop(dev); 4899 cnic_stop_hw(dev); 4900 cnic_unregister_netdev(dev); 4901 } else if (event == NETDEV_UNREGISTER) { 4902 write_lock(&cnic_dev_lock); 4903 list_del_init(&dev->list); 4904 write_unlock(&cnic_dev_lock); 4905 4906 cnic_put(dev); 4907 cnic_free_dev(dev); 4908 goto done; 4909 } 4910 cnic_put(dev); 4911 } 4912done: 4913 return NOTIFY_DONE; 4914} 4915 4916static struct notifier_block cnic_netdev_notifier = { 4917 .notifier_call = cnic_netdev_event 4918}; 4919 4920static void cnic_release(void) 4921{ 4922 struct cnic_dev *dev; 4923 struct cnic_uio_dev *udev; 4924 4925 while (!list_empty(&cnic_dev_list)) { 4926 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); 4927 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 4928 cnic_ulp_stop(dev); 4929 cnic_stop_hw(dev); 4930 } 4931 4932 cnic_ulp_exit(dev); 4933 cnic_unregister_netdev(dev); 4934 list_del_init(&dev->list); 4935 cnic_free_dev(dev); 4936 } 4937 while (!list_empty(&cnic_udev_list)) { 4938 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev, 4939 list); 4940 cnic_free_uio(udev); 4941 } 4942} 4943 4944static int __init cnic_init(void) 4945{ 4946 int rc = 0; 4947 4948 pr_info("%s", version); 4949 4950 rc = register_netdevice_notifier(&cnic_netdev_notifier); 4951 if (rc) { 4952 cnic_release(); 4953 return rc; 4954 } 4955 4956 cnic_wq = create_singlethread_workqueue("cnic_wq"); 4957 if (!cnic_wq) { 4958 cnic_release(); 4959 unregister_netdevice_notifier(&cnic_netdev_notifier); 4960 return -ENOMEM; 4961 } 4962 4963 return 0; 4964} 4965 4966static void __exit cnic_exit(void) 4967{ 4968 unregister_netdevice_notifier(&cnic_netdev_notifier); 4969 cnic_release(); 4970 destroy_workqueue(cnic_wq); 4971} 4972 4973module_init(cnic_init); 4974module_exit(cnic_exit);