Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 17431928194b36a0f88082df875e2e036da7fddf 4701 lines 123 kB view raw
1/* cnic.c: Broadcom CNIC core network driver. 2 * 3 * Copyright (c) 2006-2010 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) 10 * Modified and maintained by: Michael Chan <mchan@broadcom.com> 11 */ 12 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15#include <linux/module.h> 16 17#include <linux/kernel.h> 18#include <linux/errno.h> 19#include <linux/list.h> 20#include <linux/slab.h> 21#include <linux/pci.h> 22#include <linux/init.h> 23#include <linux/netdevice.h> 24#include <linux/uio_driver.h> 25#include <linux/in.h> 26#include <linux/dma-mapping.h> 27#include <linux/delay.h> 28#include <linux/ethtool.h> 29#include <linux/if_vlan.h> 30#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 31#define BCM_VLAN 1 32#endif 33#include <net/ip.h> 34#include <net/tcp.h> 35#include <net/route.h> 36#include <net/ipv6.h> 37#include <net/ip6_route.h> 38#include <net/ip6_checksum.h> 39#include <scsi/iscsi_if.h> 40 41#include "cnic_if.h" 42#include "bnx2.h" 43#include "bnx2x_reg.h" 44#include "bnx2x_fw_defs.h" 45#include "bnx2x_hsi.h" 46#include "../scsi/bnx2i/57xx_iscsi_constants.h" 47#include "../scsi/bnx2i/57xx_iscsi_hsi.h" 48#include "cnic.h" 49#include "cnic_defs.h" 50 51#define DRV_MODULE_NAME "cnic" 52 53static char version[] __devinitdata = 54 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; 55 56MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " 57 "Chen (zongxi@broadcom.com"); 58MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver"); 59MODULE_LICENSE("GPL"); 60MODULE_VERSION(CNIC_MODULE_VERSION); 61 62static LIST_HEAD(cnic_dev_list); 63static DEFINE_RWLOCK(cnic_dev_lock); 64static DEFINE_MUTEX(cnic_lock); 65 66static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; 67 68static int cnic_service_bnx2(void *, void *); 69static int cnic_service_bnx2x(void *, void *); 70static int cnic_ctl(void *, struct cnic_ctl_info *); 71 72static struct cnic_ops cnic_bnx2_ops = { 73 .cnic_owner = THIS_MODULE, 74 .cnic_handler = cnic_service_bnx2, 75 .cnic_ctl = cnic_ctl, 76}; 77 78static struct cnic_ops cnic_bnx2x_ops = { 79 .cnic_owner = THIS_MODULE, 80 .cnic_handler = cnic_service_bnx2x, 81 .cnic_ctl = cnic_ctl, 82}; 83 84static void cnic_shutdown_rings(struct cnic_dev *); 85static void cnic_init_rings(struct cnic_dev *); 86static int cnic_cm_set_pg(struct cnic_sock *); 87 88static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) 89{ 90 struct cnic_dev *dev = uinfo->priv; 91 struct cnic_local *cp = dev->cnic_priv; 92 93 if (!capable(CAP_NET_ADMIN)) 94 return -EPERM; 95 96 if (cp->uio_dev != -1) 97 return -EBUSY; 98 99 rtnl_lock(); 100 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 101 rtnl_unlock(); 102 return -ENODEV; 103 } 104 105 cp->uio_dev = iminor(inode); 106 107 cnic_init_rings(dev); 108 rtnl_unlock(); 109 110 return 0; 111} 112 113static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) 114{ 115 struct cnic_dev *dev = uinfo->priv; 116 struct cnic_local *cp = dev->cnic_priv; 117 118 cnic_shutdown_rings(dev); 119 120 cp->uio_dev = -1; 121 return 0; 122} 123 124static inline void cnic_hold(struct cnic_dev *dev) 125{ 126 atomic_inc(&dev->ref_count); 127} 128 129static inline void cnic_put(struct cnic_dev *dev) 130{ 131 atomic_dec(&dev->ref_count); 132} 133 134static inline void csk_hold(struct cnic_sock *csk) 135{ 136 atomic_inc(&csk->ref_count); 137} 138 139static inline void csk_put(struct cnic_sock *csk) 140{ 141 atomic_dec(&csk->ref_count); 142} 143 144static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) 145{ 146 struct cnic_dev *cdev; 147 148 read_lock(&cnic_dev_lock); 149 list_for_each_entry(cdev, &cnic_dev_list, list) { 150 if (netdev == cdev->netdev) { 151 cnic_hold(cdev); 152 read_unlock(&cnic_dev_lock); 153 return cdev; 154 } 155 } 156 read_unlock(&cnic_dev_lock); 157 return NULL; 158} 159 160static inline void ulp_get(struct cnic_ulp_ops *ulp_ops) 161{ 162 atomic_inc(&ulp_ops->ref_count); 163} 164 165static inline void ulp_put(struct cnic_ulp_ops *ulp_ops) 166{ 167 atomic_dec(&ulp_ops->ref_count); 168} 169 170static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) 171{ 172 struct cnic_local *cp = dev->cnic_priv; 173 struct cnic_eth_dev *ethdev = cp->ethdev; 174 struct drv_ctl_info info; 175 struct drv_ctl_io *io = &info.data.io; 176 177 info.cmd = DRV_CTL_CTX_WR_CMD; 178 io->cid_addr = cid_addr; 179 io->offset = off; 180 io->data = val; 181 ethdev->drv_ctl(dev->netdev, &info); 182} 183 184static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr) 185{ 186 struct cnic_local *cp = dev->cnic_priv; 187 struct cnic_eth_dev *ethdev = cp->ethdev; 188 struct drv_ctl_info info; 189 struct drv_ctl_io *io = &info.data.io; 190 191 info.cmd = DRV_CTL_CTXTBL_WR_CMD; 192 io->offset = off; 193 io->dma_addr = addr; 194 ethdev->drv_ctl(dev->netdev, &info); 195} 196 197static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) 198{ 199 struct cnic_local *cp = dev->cnic_priv; 200 struct cnic_eth_dev *ethdev = cp->ethdev; 201 struct drv_ctl_info info; 202 struct drv_ctl_l2_ring *ring = &info.data.ring; 203 204 if (start) 205 info.cmd = DRV_CTL_START_L2_CMD; 206 else 207 info.cmd = DRV_CTL_STOP_L2_CMD; 208 209 ring->cid = cid; 210 ring->client_id = cl_id; 211 ethdev->drv_ctl(dev->netdev, &info); 212} 213 214static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) 215{ 216 struct cnic_local *cp = dev->cnic_priv; 217 struct cnic_eth_dev *ethdev = cp->ethdev; 218 struct drv_ctl_info info; 219 struct drv_ctl_io *io = &info.data.io; 220 221 info.cmd = DRV_CTL_IO_WR_CMD; 222 io->offset = off; 223 io->data = val; 224 ethdev->drv_ctl(dev->netdev, &info); 225} 226 227static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) 228{ 229 struct cnic_local *cp = dev->cnic_priv; 230 struct cnic_eth_dev *ethdev = cp->ethdev; 231 struct drv_ctl_info info; 232 struct drv_ctl_io *io = &info.data.io; 233 234 info.cmd = DRV_CTL_IO_RD_CMD; 235 io->offset = off; 236 ethdev->drv_ctl(dev->netdev, &info); 237 return io->data; 238} 239 240static int cnic_in_use(struct cnic_sock *csk) 241{ 242 return test_bit(SK_F_INUSE, &csk->flags); 243} 244 245static void cnic_kwq_completion(struct cnic_dev *dev, u32 count) 246{ 247 struct cnic_local *cp = dev->cnic_priv; 248 struct cnic_eth_dev *ethdev = cp->ethdev; 249 struct drv_ctl_info info; 250 251 info.cmd = DRV_CTL_COMPLETION_CMD; 252 info.data.comp.comp_count = count; 253 ethdev->drv_ctl(dev->netdev, &info); 254} 255 256static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) 257{ 258 u32 i; 259 260 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { 261 if (cp->ctx_tbl[i].cid == cid) { 262 *l5_cid = i; 263 return 0; 264 } 265 } 266 return -EINVAL; 267} 268 269static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, 270 struct cnic_sock *csk) 271{ 272 struct iscsi_path path_req; 273 char *buf = NULL; 274 u16 len = 0; 275 u32 msg_type = ISCSI_KEVENT_IF_DOWN; 276 struct cnic_ulp_ops *ulp_ops; 277 278 if (cp->uio_dev == -1) 279 return -ENODEV; 280 281 if (csk) { 282 len = sizeof(path_req); 283 buf = (char *) &path_req; 284 memset(&path_req, 0, len); 285 286 msg_type = ISCSI_KEVENT_PATH_REQ; 287 path_req.handle = (u64) csk->l5_cid; 288 if (test_bit(SK_F_IPV6, &csk->flags)) { 289 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0], 290 sizeof(struct in6_addr)); 291 path_req.ip_addr_len = 16; 292 } else { 293 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0], 294 sizeof(struct in_addr)); 295 path_req.ip_addr_len = 4; 296 } 297 path_req.vlan_id = csk->vlan_id; 298 path_req.pmtu = csk->mtu; 299 } 300 301 rcu_read_lock(); 302 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); 303 if (ulp_ops) 304 ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len); 305 rcu_read_unlock(); 306 return 0; 307} 308 309static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, 310 char *buf, u16 len) 311{ 312 int rc = -EINVAL; 313 314 switch (msg_type) { 315 case ISCSI_UEVENT_PATH_UPDATE: { 316 struct cnic_local *cp; 317 u32 l5_cid; 318 struct cnic_sock *csk; 319 struct iscsi_path *path_resp; 320 321 if (len < sizeof(*path_resp)) 322 break; 323 324 path_resp = (struct iscsi_path *) buf; 325 cp = dev->cnic_priv; 326 l5_cid = (u32) path_resp->handle; 327 if (l5_cid >= MAX_CM_SK_TBL_SZ) 328 break; 329 330 rcu_read_lock(); 331 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) { 332 rc = -ENODEV; 333 rcu_read_unlock(); 334 break; 335 } 336 csk = &cp->csk_tbl[l5_cid]; 337 csk_hold(csk); 338 if (cnic_in_use(csk)) { 339 memcpy(csk->ha, path_resp->mac_addr, 6); 340 if (test_bit(SK_F_IPV6, &csk->flags)) 341 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, 342 sizeof(struct in6_addr)); 343 else 344 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, 345 sizeof(struct in_addr)); 346 if (is_valid_ether_addr(csk->ha)) 347 cnic_cm_set_pg(csk); 348 } 349 csk_put(csk); 350 rcu_read_unlock(); 351 rc = 0; 352 } 353 } 354 355 return rc; 356} 357 358static int cnic_offld_prep(struct cnic_sock *csk) 359{ 360 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 361 return 0; 362 363 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) { 364 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 365 return 0; 366 } 367 368 return 1; 369} 370 371static int cnic_close_prep(struct cnic_sock *csk) 372{ 373 clear_bit(SK_F_CONNECT_START, &csk->flags); 374 smp_mb__after_clear_bit(); 375 376 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 377 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 378 msleep(1); 379 380 return 1; 381 } 382 return 0; 383} 384 385static int cnic_abort_prep(struct cnic_sock *csk) 386{ 387 clear_bit(SK_F_CONNECT_START, &csk->flags); 388 smp_mb__after_clear_bit(); 389 390 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 391 msleep(1); 392 393 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 394 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; 395 return 1; 396 } 397 398 return 0; 399} 400 401static void cnic_uio_stop(void) 402{ 403 struct cnic_dev *dev; 404 405 read_lock(&cnic_dev_lock); 406 list_for_each_entry(dev, &cnic_dev_list, list) { 407 struct cnic_local *cp = dev->cnic_priv; 408 409 if (cp->cnic_uinfo) 410 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 411 } 412 read_unlock(&cnic_dev_lock); 413} 414 415int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) 416{ 417 struct cnic_dev *dev; 418 419 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 420 pr_err("%s: Bad type %d\n", __func__, ulp_type); 421 return -EINVAL; 422 } 423 mutex_lock(&cnic_lock); 424 if (cnic_ulp_tbl[ulp_type]) { 425 pr_err("%s: Type %d has already been registered\n", 426 __func__, ulp_type); 427 mutex_unlock(&cnic_lock); 428 return -EBUSY; 429 } 430 431 read_lock(&cnic_dev_lock); 432 list_for_each_entry(dev, &cnic_dev_list, list) { 433 struct cnic_local *cp = dev->cnic_priv; 434 435 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); 436 } 437 read_unlock(&cnic_dev_lock); 438 439 atomic_set(&ulp_ops->ref_count, 0); 440 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); 441 mutex_unlock(&cnic_lock); 442 443 /* Prevent race conditions with netdev_event */ 444 rtnl_lock(); 445 read_lock(&cnic_dev_lock); 446 list_for_each_entry(dev, &cnic_dev_list, list) { 447 struct cnic_local *cp = dev->cnic_priv; 448 449 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) 450 ulp_ops->cnic_init(dev); 451 } 452 read_unlock(&cnic_dev_lock); 453 rtnl_unlock(); 454 455 return 0; 456} 457 458int cnic_unregister_driver(int ulp_type) 459{ 460 struct cnic_dev *dev; 461 struct cnic_ulp_ops *ulp_ops; 462 int i = 0; 463 464 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 465 pr_err("%s: Bad type %d\n", __func__, ulp_type); 466 return -EINVAL; 467 } 468 mutex_lock(&cnic_lock); 469 ulp_ops = cnic_ulp_tbl[ulp_type]; 470 if (!ulp_ops) { 471 pr_err("%s: Type %d has not been registered\n", 472 __func__, ulp_type); 473 goto out_unlock; 474 } 475 read_lock(&cnic_dev_lock); 476 list_for_each_entry(dev, &cnic_dev_list, list) { 477 struct cnic_local *cp = dev->cnic_priv; 478 479 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 480 pr_err("%s: Type %d still has devices registered\n", 481 __func__, ulp_type); 482 read_unlock(&cnic_dev_lock); 483 goto out_unlock; 484 } 485 } 486 read_unlock(&cnic_dev_lock); 487 488 if (ulp_type == CNIC_ULP_ISCSI) 489 cnic_uio_stop(); 490 491 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); 492 493 mutex_unlock(&cnic_lock); 494 synchronize_rcu(); 495 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) { 496 msleep(100); 497 i++; 498 } 499 500 if (atomic_read(&ulp_ops->ref_count) != 0) 501 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n"); 502 return 0; 503 504out_unlock: 505 mutex_unlock(&cnic_lock); 506 return -EINVAL; 507} 508 509static int cnic_start_hw(struct cnic_dev *); 510static void cnic_stop_hw(struct cnic_dev *); 511 512static int cnic_register_device(struct cnic_dev *dev, int ulp_type, 513 void *ulp_ctx) 514{ 515 struct cnic_local *cp = dev->cnic_priv; 516 struct cnic_ulp_ops *ulp_ops; 517 518 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 519 pr_err("%s: Bad type %d\n", __func__, ulp_type); 520 return -EINVAL; 521 } 522 mutex_lock(&cnic_lock); 523 if (cnic_ulp_tbl[ulp_type] == NULL) { 524 pr_err("%s: Driver with type %d has not been registered\n", 525 __func__, ulp_type); 526 mutex_unlock(&cnic_lock); 527 return -EAGAIN; 528 } 529 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 530 pr_err("%s: Type %d has already been registered to this device\n", 531 __func__, ulp_type); 532 mutex_unlock(&cnic_lock); 533 return -EBUSY; 534 } 535 536 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); 537 cp->ulp_handle[ulp_type] = ulp_ctx; 538 ulp_ops = cnic_ulp_tbl[ulp_type]; 539 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); 540 cnic_hold(dev); 541 542 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 543 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) 544 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); 545 546 mutex_unlock(&cnic_lock); 547 548 return 0; 549 550} 551EXPORT_SYMBOL(cnic_register_driver); 552 553static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) 554{ 555 struct cnic_local *cp = dev->cnic_priv; 556 int i = 0; 557 558 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 559 pr_err("%s: Bad type %d\n", __func__, ulp_type); 560 return -EINVAL; 561 } 562 mutex_lock(&cnic_lock); 563 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 564 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL); 565 cnic_put(dev); 566 } else { 567 pr_err("%s: device not registered to this ulp type %d\n", 568 __func__, ulp_type); 569 mutex_unlock(&cnic_lock); 570 return -EINVAL; 571 } 572 mutex_unlock(&cnic_lock); 573 574 synchronize_rcu(); 575 576 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && 577 i < 20) { 578 msleep(100); 579 i++; 580 } 581 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) 582 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n"); 583 584 return 0; 585} 586EXPORT_SYMBOL(cnic_unregister_driver); 587 588static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id) 589{ 590 id_tbl->start = start_id; 591 id_tbl->max = size; 592 id_tbl->next = 0; 593 spin_lock_init(&id_tbl->lock); 594 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); 595 if (!id_tbl->table) 596 return -ENOMEM; 597 598 return 0; 599} 600 601static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl) 602{ 603 kfree(id_tbl->table); 604 id_tbl->table = NULL; 605} 606 607static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id) 608{ 609 int ret = -1; 610 611 id -= id_tbl->start; 612 if (id >= id_tbl->max) 613 return ret; 614 615 spin_lock(&id_tbl->lock); 616 if (!test_bit(id, id_tbl->table)) { 617 set_bit(id, id_tbl->table); 618 ret = 0; 619 } 620 spin_unlock(&id_tbl->lock); 621 return ret; 622} 623 624/* Returns -1 if not successful */ 625static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl) 626{ 627 u32 id; 628 629 spin_lock(&id_tbl->lock); 630 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); 631 if (id >= id_tbl->max) { 632 id = -1; 633 if (id_tbl->next != 0) { 634 id = find_first_zero_bit(id_tbl->table, id_tbl->next); 635 if (id >= id_tbl->next) 636 id = -1; 637 } 638 } 639 640 if (id < id_tbl->max) { 641 set_bit(id, id_tbl->table); 642 id_tbl->next = (id + 1) & (id_tbl->max - 1); 643 id += id_tbl->start; 644 } 645 646 spin_unlock(&id_tbl->lock); 647 648 return id; 649} 650 651static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id) 652{ 653 if (id == -1) 654 return; 655 656 id -= id_tbl->start; 657 if (id >= id_tbl->max) 658 return; 659 660 clear_bit(id, id_tbl->table); 661} 662 663static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) 664{ 665 int i; 666 667 if (!dma->pg_arr) 668 return; 669 670 for (i = 0; i < dma->num_pages; i++) { 671 if (dma->pg_arr[i]) { 672 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE, 673 dma->pg_arr[i], dma->pg_map_arr[i]); 674 dma->pg_arr[i] = NULL; 675 } 676 } 677 if (dma->pgtbl) { 678 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size, 679 dma->pgtbl, dma->pgtbl_map); 680 dma->pgtbl = NULL; 681 } 682 kfree(dma->pg_arr); 683 dma->pg_arr = NULL; 684 dma->num_pages = 0; 685} 686 687static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) 688{ 689 int i; 690 u32 *page_table = dma->pgtbl; 691 692 for (i = 0; i < dma->num_pages; i++) { 693 /* Each entry needs to be in big endian format. */ 694 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 695 page_table++; 696 *page_table = (u32) dma->pg_map_arr[i]; 697 page_table++; 698 } 699} 700 701static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) 702{ 703 int i; 704 u32 *page_table = dma->pgtbl; 705 706 for (i = 0; i < dma->num_pages; i++) { 707 /* Each entry needs to be in little endian format. */ 708 *page_table = dma->pg_map_arr[i] & 0xffffffff; 709 page_table++; 710 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 711 page_table++; 712 } 713} 714 715static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, 716 int pages, int use_pg_tbl) 717{ 718 int i, size; 719 struct cnic_local *cp = dev->cnic_priv; 720 721 size = pages * (sizeof(void *) + sizeof(dma_addr_t)); 722 dma->pg_arr = kzalloc(size, GFP_ATOMIC); 723 if (dma->pg_arr == NULL) 724 return -ENOMEM; 725 726 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); 727 dma->num_pages = pages; 728 729 for (i = 0; i < pages; i++) { 730 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, 731 BCM_PAGE_SIZE, 732 &dma->pg_map_arr[i], 733 GFP_ATOMIC); 734 if (dma->pg_arr[i] == NULL) 735 goto error; 736 } 737 if (!use_pg_tbl) 738 return 0; 739 740 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & 741 ~(BCM_PAGE_SIZE - 1); 742 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, 743 &dma->pgtbl_map, GFP_ATOMIC); 744 if (dma->pgtbl == NULL) 745 goto error; 746 747 cp->setup_pgtbl(dev, dma); 748 749 return 0; 750 751error: 752 cnic_free_dma(dev, dma); 753 return -ENOMEM; 754} 755 756static void cnic_free_context(struct cnic_dev *dev) 757{ 758 struct cnic_local *cp = dev->cnic_priv; 759 int i; 760 761 for (i = 0; i < cp->ctx_blks; i++) { 762 if (cp->ctx_arr[i].ctx) { 763 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size, 764 cp->ctx_arr[i].ctx, 765 cp->ctx_arr[i].mapping); 766 cp->ctx_arr[i].ctx = NULL; 767 } 768 } 769} 770 771static void cnic_free_resc(struct cnic_dev *dev) 772{ 773 struct cnic_local *cp = dev->cnic_priv; 774 int i = 0; 775 776 if (cp->cnic_uinfo) { 777 while (cp->uio_dev != -1 && i < 15) { 778 msleep(100); 779 i++; 780 } 781 uio_unregister_device(cp->cnic_uinfo); 782 kfree(cp->cnic_uinfo); 783 cp->cnic_uinfo = NULL; 784 } 785 786 if (cp->l2_buf) { 787 dma_free_coherent(&dev->pcidev->dev, cp->l2_buf_size, 788 cp->l2_buf, cp->l2_buf_map); 789 cp->l2_buf = NULL; 790 } 791 792 if (cp->l2_ring) { 793 dma_free_coherent(&dev->pcidev->dev, cp->l2_ring_size, 794 cp->l2_ring, cp->l2_ring_map); 795 cp->l2_ring = NULL; 796 } 797 798 cnic_free_context(dev); 799 kfree(cp->ctx_arr); 800 cp->ctx_arr = NULL; 801 cp->ctx_blks = 0; 802 803 cnic_free_dma(dev, &cp->gbl_buf_info); 804 cnic_free_dma(dev, &cp->conn_buf_info); 805 cnic_free_dma(dev, &cp->kwq_info); 806 cnic_free_dma(dev, &cp->kwq_16_data_info); 807 cnic_free_dma(dev, &cp->kcq_info); 808 kfree(cp->iscsi_tbl); 809 cp->iscsi_tbl = NULL; 810 kfree(cp->ctx_tbl); 811 cp->ctx_tbl = NULL; 812 813 cnic_free_id_tbl(&cp->cid_tbl); 814} 815 816static int cnic_alloc_context(struct cnic_dev *dev) 817{ 818 struct cnic_local *cp = dev->cnic_priv; 819 820 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 821 int i, k, arr_size; 822 823 cp->ctx_blk_size = BCM_PAGE_SIZE; 824 cp->cids_per_blk = BCM_PAGE_SIZE / 128; 825 arr_size = BNX2_MAX_CID / cp->cids_per_blk * 826 sizeof(struct cnic_ctx); 827 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); 828 if (cp->ctx_arr == NULL) 829 return -ENOMEM; 830 831 k = 0; 832 for (i = 0; i < 2; i++) { 833 u32 j, reg, off, lo, hi; 834 835 if (i == 0) 836 off = BNX2_PG_CTX_MAP; 837 else 838 off = BNX2_ISCSI_CTX_MAP; 839 840 reg = cnic_reg_rd_ind(dev, off); 841 lo = reg >> 16; 842 hi = reg & 0xffff; 843 for (j = lo; j < hi; j += cp->cids_per_blk, k++) 844 cp->ctx_arr[k].cid = j; 845 } 846 847 cp->ctx_blks = k; 848 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { 849 cp->ctx_blks = 0; 850 return -ENOMEM; 851 } 852 853 for (i = 0; i < cp->ctx_blks; i++) { 854 cp->ctx_arr[i].ctx = 855 dma_alloc_coherent(&dev->pcidev->dev, 856 BCM_PAGE_SIZE, 857 &cp->ctx_arr[i].mapping, 858 GFP_KERNEL); 859 if (cp->ctx_arr[i].ctx == NULL) 860 return -ENOMEM; 861 } 862 } 863 return 0; 864} 865 866static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages) 867{ 868 struct cnic_local *cp = dev->cnic_priv; 869 870 cp->l2_ring_size = pages * BCM_PAGE_SIZE; 871 cp->l2_ring = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_ring_size, 872 &cp->l2_ring_map, 873 GFP_KERNEL | __GFP_COMP); 874 if (!cp->l2_ring) 875 return -ENOMEM; 876 877 cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; 878 cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size); 879 cp->l2_buf = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_buf_size, 880 &cp->l2_buf_map, 881 GFP_KERNEL | __GFP_COMP); 882 if (!cp->l2_buf) 883 return -ENOMEM; 884 885 return 0; 886} 887 888static int cnic_alloc_uio(struct cnic_dev *dev) { 889 struct cnic_local *cp = dev->cnic_priv; 890 struct uio_info *uinfo; 891 int ret; 892 893 uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC); 894 if (!uinfo) 895 return -ENOMEM; 896 897 uinfo->mem[0].addr = dev->netdev->base_addr; 898 uinfo->mem[0].internal_addr = dev->regview; 899 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; 900 uinfo->mem[0].memtype = UIO_MEM_PHYS; 901 902 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 903 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & 904 PAGE_MASK; 905 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 906 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; 907 else 908 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; 909 910 uinfo->name = "bnx2_cnic"; 911 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 912 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & 913 PAGE_MASK; 914 uinfo->mem[1].size = sizeof(struct host_def_status_block); 915 916 uinfo->name = "bnx2x_cnic"; 917 } 918 919 uinfo->mem[1].memtype = UIO_MEM_LOGICAL; 920 921 uinfo->mem[2].addr = (unsigned long) cp->l2_ring; 922 uinfo->mem[2].size = cp->l2_ring_size; 923 uinfo->mem[2].memtype = UIO_MEM_LOGICAL; 924 925 uinfo->mem[3].addr = (unsigned long) cp->l2_buf; 926 uinfo->mem[3].size = cp->l2_buf_size; 927 uinfo->mem[3].memtype = UIO_MEM_LOGICAL; 928 929 uinfo->version = CNIC_MODULE_VERSION; 930 uinfo->irq = UIO_IRQ_CUSTOM; 931 932 uinfo->open = cnic_uio_open; 933 uinfo->release = cnic_uio_close; 934 935 uinfo->priv = dev; 936 937 ret = uio_register_device(&dev->pcidev->dev, uinfo); 938 if (ret) { 939 kfree(uinfo); 940 return ret; 941 } 942 943 cp->cnic_uinfo = uinfo; 944 return 0; 945} 946 947static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) 948{ 949 struct cnic_local *cp = dev->cnic_priv; 950 int ret; 951 952 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); 953 if (ret) 954 goto error; 955 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; 956 957 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1); 958 if (ret) 959 goto error; 960 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr; 961 962 ret = cnic_alloc_context(dev); 963 if (ret) 964 goto error; 965 966 ret = cnic_alloc_l2_rings(dev, 2); 967 if (ret) 968 goto error; 969 970 ret = cnic_alloc_uio(dev); 971 if (ret) 972 goto error; 973 974 return 0; 975 976error: 977 cnic_free_resc(dev); 978 return ret; 979} 980 981static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) 982{ 983 struct cnic_local *cp = dev->cnic_priv; 984 struct cnic_eth_dev *ethdev = cp->ethdev; 985 int ctx_blk_size = cp->ethdev->ctx_blk_size; 986 int total_mem, blks, i, cid_space; 987 988 if (BNX2X_ISCSI_START_CID < ethdev->starting_cid) 989 return -EINVAL; 990 991 cid_space = MAX_ISCSI_TBL_SZ + 992 (BNX2X_ISCSI_START_CID - ethdev->starting_cid); 993 994 total_mem = BNX2X_CONTEXT_MEM_SIZE * cid_space; 995 blks = total_mem / ctx_blk_size; 996 if (total_mem % ctx_blk_size) 997 blks++; 998 999 if (blks > cp->ethdev->ctx_tbl_len) 1000 return -ENOMEM; 1001 1002 cp->ctx_arr = kzalloc(blks * sizeof(struct cnic_ctx), GFP_KERNEL); 1003 if (cp->ctx_arr == NULL) 1004 return -ENOMEM; 1005 1006 cp->ctx_blks = blks; 1007 cp->ctx_blk_size = ctx_blk_size; 1008 if (BNX2X_CHIP_IS_E1H(cp->chip_id)) 1009 cp->ctx_align = 0; 1010 else 1011 cp->ctx_align = ctx_blk_size; 1012 1013 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE; 1014 1015 for (i = 0; i < blks; i++) { 1016 cp->ctx_arr[i].ctx = 1017 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size, 1018 &cp->ctx_arr[i].mapping, 1019 GFP_KERNEL); 1020 if (cp->ctx_arr[i].ctx == NULL) 1021 return -ENOMEM; 1022 1023 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) { 1024 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { 1025 cnic_free_context(dev); 1026 cp->ctx_blk_size += cp->ctx_align; 1027 i = -1; 1028 continue; 1029 } 1030 } 1031 } 1032 return 0; 1033} 1034 1035static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) 1036{ 1037 struct cnic_local *cp = dev->cnic_priv; 1038 int i, j, n, ret, pages; 1039 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; 1040 1041 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, 1042 GFP_KERNEL); 1043 if (!cp->iscsi_tbl) 1044 goto error; 1045 1046 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * 1047 MAX_CNIC_L5_CONTEXT, GFP_KERNEL); 1048 if (!cp->ctx_tbl) 1049 goto error; 1050 1051 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { 1052 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i]; 1053 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; 1054 } 1055 1056 pages = PAGE_ALIGN(MAX_CNIC_L5_CONTEXT * CNIC_KWQ16_DATA_SIZE) / 1057 PAGE_SIZE; 1058 1059 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); 1060 if (ret) 1061 return -ENOMEM; 1062 1063 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; 1064 for (i = 0, j = 0; i < MAX_ISCSI_TBL_SZ; i++) { 1065 long off = CNIC_KWQ16_DATA_SIZE * (i % n); 1066 1067 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; 1068 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] + 1069 off; 1070 1071 if ((i % n) == (n - 1)) 1072 j++; 1073 } 1074 1075 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 0); 1076 if (ret) 1077 goto error; 1078 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr; 1079 1080 for (i = 0; i < KCQ_PAGE_CNT; i++) { 1081 struct bnx2x_bd_chain_next *next = 1082 (struct bnx2x_bd_chain_next *) 1083 &cp->kcq[i][MAX_KCQE_CNT]; 1084 int j = i + 1; 1085 1086 if (j >= KCQ_PAGE_CNT) 1087 j = 0; 1088 next->addr_hi = (u64) cp->kcq_info.pg_map_arr[j] >> 32; 1089 next->addr_lo = cp->kcq_info.pg_map_arr[j] & 0xffffffff; 1090 } 1091 1092 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS * 1093 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE; 1094 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1); 1095 if (ret) 1096 goto error; 1097 1098 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; 1099 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); 1100 if (ret) 1101 goto error; 1102 1103 ret = cnic_alloc_bnx2x_context(dev); 1104 if (ret) 1105 goto error; 1106 1107 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; 1108 1109 memset(cp->status_blk.bnx2x, 0, sizeof(*cp->status_blk.bnx2x)); 1110 1111 cp->l2_rx_ring_size = 15; 1112 1113 ret = cnic_alloc_l2_rings(dev, 4); 1114 if (ret) 1115 goto error; 1116 1117 ret = cnic_alloc_uio(dev); 1118 if (ret) 1119 goto error; 1120 1121 return 0; 1122 1123error: 1124 cnic_free_resc(dev); 1125 return -ENOMEM; 1126} 1127 1128static inline u32 cnic_kwq_avail(struct cnic_local *cp) 1129{ 1130 return cp->max_kwq_idx - 1131 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); 1132} 1133 1134static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 1135 u32 num_wqes) 1136{ 1137 struct cnic_local *cp = dev->cnic_priv; 1138 struct kwqe *prod_qe; 1139 u16 prod, sw_prod, i; 1140 1141 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 1142 return -EAGAIN; /* bnx2 is down */ 1143 1144 spin_lock_bh(&cp->cnic_ulp_lock); 1145 if (num_wqes > cnic_kwq_avail(cp) && 1146 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) { 1147 spin_unlock_bh(&cp->cnic_ulp_lock); 1148 return -EAGAIN; 1149 } 1150 1151 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 1152 1153 prod = cp->kwq_prod_idx; 1154 sw_prod = prod & MAX_KWQ_IDX; 1155 for (i = 0; i < num_wqes; i++) { 1156 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; 1157 memcpy(prod_qe, wqes[i], sizeof(struct kwqe)); 1158 prod++; 1159 sw_prod = prod & MAX_KWQ_IDX; 1160 } 1161 cp->kwq_prod_idx = prod; 1162 1163 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); 1164 1165 spin_unlock_bh(&cp->cnic_ulp_lock); 1166 return 0; 1167} 1168 1169static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid, 1170 union l5cm_specific_data *l5_data) 1171{ 1172 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1173 dma_addr_t map; 1174 1175 map = ctx->kwqe_data_mapping; 1176 l5_data->phy_address.lo = (u64) map & 0xffffffff; 1177 l5_data->phy_address.hi = (u64) map >> 32; 1178 return ctx->kwqe_data; 1179} 1180 1181static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, 1182 u32 type, union l5cm_specific_data *l5_data) 1183{ 1184 struct cnic_local *cp = dev->cnic_priv; 1185 struct l5cm_spe kwqe; 1186 struct kwqe_16 *kwq[1]; 1187 int ret; 1188 1189 kwqe.hdr.conn_and_cmd_data = 1190 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | 1191 BNX2X_HW_CID(cid, cp->func))); 1192 kwqe.hdr.type = cpu_to_le16(type); 1193 kwqe.hdr.reserved = 0; 1194 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); 1195 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); 1196 1197 kwq[0] = (struct kwqe_16 *) &kwqe; 1198 1199 spin_lock_bh(&cp->cnic_ulp_lock); 1200 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1); 1201 spin_unlock_bh(&cp->cnic_ulp_lock); 1202 1203 if (ret == 1) 1204 return 0; 1205 1206 return -EBUSY; 1207} 1208 1209static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, 1210 struct kcqe *cqes[], u32 num_cqes) 1211{ 1212 struct cnic_local *cp = dev->cnic_priv; 1213 struct cnic_ulp_ops *ulp_ops; 1214 1215 rcu_read_lock(); 1216 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 1217 if (likely(ulp_ops)) { 1218 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 1219 cqes, num_cqes); 1220 } 1221 rcu_read_unlock(); 1222} 1223 1224static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) 1225{ 1226 struct cnic_local *cp = dev->cnic_priv; 1227 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; 1228 int func = cp->func, pages; 1229 int hq_bds; 1230 1231 cp->num_iscsi_tasks = req1->num_tasks_per_conn; 1232 cp->num_ccells = req1->num_ccells_per_conn; 1233 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE * 1234 cp->num_iscsi_tasks; 1235 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * 1236 BNX2X_ISCSI_R2TQE_SIZE; 1237 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; 1238 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; 1239 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); 1240 cp->num_cqs = req1->num_cqs; 1241 1242 if (!dev->max_iscsi_conn) 1243 return 0; 1244 1245 /* init Tstorm RAM */ 1246 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(func), 1247 req1->rq_num_wqes); 1248 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(func), 1249 PAGE_SIZE); 1250 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1251 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); 1252 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 1253 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), 1254 req1->num_tasks_per_conn); 1255 1256 /* init Ustorm RAM */ 1257 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1258 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(func), 1259 req1->rq_buffer_size); 1260 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(func), 1261 PAGE_SIZE); 1262 CNIC_WR8(dev, BAR_USTRORM_INTMEM + 1263 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); 1264 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1265 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), 1266 req1->num_tasks_per_conn); 1267 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(func), 1268 req1->rq_num_wqes); 1269 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(func), 1270 req1->cq_num_wqes); 1271 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(func), 1272 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1273 1274 /* init Xstorm RAM */ 1275 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(func), 1276 PAGE_SIZE); 1277 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1278 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); 1279 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 1280 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), 1281 req1->num_tasks_per_conn); 1282 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(func), 1283 hq_bds); 1284 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(func), 1285 req1->num_tasks_per_conn); 1286 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(func), 1287 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1288 1289 /* init Cstorm RAM */ 1290 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(func), 1291 PAGE_SIZE); 1292 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 1293 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); 1294 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1295 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), 1296 req1->num_tasks_per_conn); 1297 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(func), 1298 req1->cq_num_wqes); 1299 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(func), 1300 hq_bds); 1301 1302 return 0; 1303} 1304 1305static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) 1306{ 1307 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; 1308 struct cnic_local *cp = dev->cnic_priv; 1309 int func = cp->func; 1310 struct iscsi_kcqe kcqe; 1311 struct kcqe *cqes[1]; 1312 1313 memset(&kcqe, 0, sizeof(kcqe)); 1314 if (!dev->max_iscsi_conn) { 1315 kcqe.completion_status = 1316 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED; 1317 goto done; 1318 } 1319 1320 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1321 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]); 1322 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1323 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4, 1324 req2->error_bit_map[1]); 1325 1326 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1327 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn); 1328 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1329 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]); 1330 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1331 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4, 1332 req2->error_bit_map[1]); 1333 1334 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1335 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn); 1336 1337 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1338 1339done: 1340 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT; 1341 cqes[0] = (struct kcqe *) &kcqe; 1342 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1343 1344 return 0; 1345} 1346 1347static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) 1348{ 1349 struct cnic_local *cp = dev->cnic_priv; 1350 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1351 1352 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) { 1353 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1354 1355 cnic_free_dma(dev, &iscsi->hq_info); 1356 cnic_free_dma(dev, &iscsi->r2tq_info); 1357 cnic_free_dma(dev, &iscsi->task_array_info); 1358 } 1359 cnic_free_id(&cp->cid_tbl, ctx->cid); 1360 ctx->cid = 0; 1361} 1362 1363static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) 1364{ 1365 u32 cid; 1366 int ret, pages; 1367 struct cnic_local *cp = dev->cnic_priv; 1368 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1369 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1370 1371 cid = cnic_alloc_new_id(&cp->cid_tbl); 1372 if (cid == -1) { 1373 ret = -ENOMEM; 1374 goto error; 1375 } 1376 1377 ctx->cid = cid; 1378 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; 1379 1380 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); 1381 if (ret) 1382 goto error; 1383 1384 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; 1385 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); 1386 if (ret) 1387 goto error; 1388 1389 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; 1390 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); 1391 if (ret) 1392 goto error; 1393 1394 return 0; 1395 1396error: 1397 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1398 return ret; 1399} 1400 1401static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init, 1402 struct regpair *ctx_addr) 1403{ 1404 struct cnic_local *cp = dev->cnic_priv; 1405 struct cnic_eth_dev *ethdev = cp->ethdev; 1406 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk; 1407 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk; 1408 unsigned long align_off = 0; 1409 dma_addr_t ctx_map; 1410 void *ctx; 1411 1412 if (cp->ctx_align) { 1413 unsigned long mask = cp->ctx_align - 1; 1414 1415 if (cp->ctx_arr[blk].mapping & mask) 1416 align_off = cp->ctx_align - 1417 (cp->ctx_arr[blk].mapping & mask); 1418 } 1419 ctx_map = cp->ctx_arr[blk].mapping + align_off + 1420 (off * BNX2X_CONTEXT_MEM_SIZE); 1421 ctx = cp->ctx_arr[blk].ctx + align_off + 1422 (off * BNX2X_CONTEXT_MEM_SIZE); 1423 if (init) 1424 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE); 1425 1426 ctx_addr->lo = ctx_map & 0xffffffff; 1427 ctx_addr->hi = (u64) ctx_map >> 32; 1428 return ctx; 1429} 1430 1431static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], 1432 u32 num) 1433{ 1434 struct cnic_local *cp = dev->cnic_priv; 1435 struct iscsi_kwqe_conn_offload1 *req1 = 1436 (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1437 struct iscsi_kwqe_conn_offload2 *req2 = 1438 (struct iscsi_kwqe_conn_offload2 *) wqes[1]; 1439 struct iscsi_kwqe_conn_offload3 *req3; 1440 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; 1441 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1442 u32 cid = ctx->cid; 1443 u32 hw_cid = BNX2X_HW_CID(cid, cp->func); 1444 struct iscsi_context *ictx; 1445 struct regpair context_addr; 1446 int i, j, n = 2, n_max; 1447 1448 ctx->ctx_flags = 0; 1449 if (!req2->num_additional_wqes) 1450 return -EINVAL; 1451 1452 n_max = req2->num_additional_wqes + 2; 1453 1454 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr); 1455 if (ictx == NULL) 1456 return -ENOMEM; 1457 1458 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; 1459 1460 ictx->xstorm_ag_context.hq_prod = 1; 1461 1462 ictx->xstorm_st_context.iscsi.first_burst_length = 1463 ISCSI_DEF_FIRST_BURST_LEN; 1464 ictx->xstorm_st_context.iscsi.max_send_pdu_length = 1465 ISCSI_DEF_MAX_RECV_SEG_LEN; 1466 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo = 1467 req1->sq_page_table_addr_lo; 1468 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi = 1469 req1->sq_page_table_addr_hi; 1470 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi; 1471 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo; 1472 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo = 1473 iscsi->hq_info.pgtbl_map & 0xffffffff; 1474 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi = 1475 (u64) iscsi->hq_info.pgtbl_map >> 32; 1476 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo = 1477 iscsi->hq_info.pgtbl[0]; 1478 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi = 1479 iscsi->hq_info.pgtbl[1]; 1480 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo = 1481 iscsi->r2tq_info.pgtbl_map & 0xffffffff; 1482 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi = 1483 (u64) iscsi->r2tq_info.pgtbl_map >> 32; 1484 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo = 1485 iscsi->r2tq_info.pgtbl[0]; 1486 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = 1487 iscsi->r2tq_info.pgtbl[1]; 1488 ictx->xstorm_st_context.iscsi.task_pbl_base.lo = 1489 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1490 ictx->xstorm_st_context.iscsi.task_pbl_base.hi = 1491 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1492 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx = 1493 BNX2X_ISCSI_PBL_NOT_CACHED; 1494 ictx->xstorm_st_context.iscsi.flags.flags |= 1495 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; 1496 ictx->xstorm_st_context.iscsi.flags.flags |= 1497 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; 1498 1499 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; 1500 /* TSTORM requires the base address of RQ DB & not PTE */ 1501 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = 1502 req2->rq_page_table_addr_lo & PAGE_MASK; 1503 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = 1504 req2->rq_page_table_addr_hi; 1505 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; 1506 ictx->tstorm_st_context.tcp.cwnd = 0x5A8; 1507 ictx->tstorm_st_context.tcp.flags2 |= 1508 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; 1509 1510 ictx->timers_context.flags |= ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; 1511 1512 ictx->ustorm_st_context.ring.rq.pbl_base.lo = 1513 req2->rq_page_table_addr_lo; 1514 ictx->ustorm_st_context.ring.rq.pbl_base.hi = 1515 req2->rq_page_table_addr_hi; 1516 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi; 1517 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo; 1518 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo = 1519 iscsi->r2tq_info.pgtbl_map & 0xffffffff; 1520 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi = 1521 (u64) iscsi->r2tq_info.pgtbl_map >> 32; 1522 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo = 1523 iscsi->r2tq_info.pgtbl[0]; 1524 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi = 1525 iscsi->r2tq_info.pgtbl[1]; 1526 ictx->ustorm_st_context.ring.cq_pbl_base.lo = 1527 req1->cq_page_table_addr_lo; 1528 ictx->ustorm_st_context.ring.cq_pbl_base.hi = 1529 req1->cq_page_table_addr_hi; 1530 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; 1531 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; 1532 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; 1533 ictx->ustorm_st_context.task_pbe_cache_index = 1534 BNX2X_ISCSI_PBL_NOT_CACHED; 1535 ictx->ustorm_st_context.task_pdu_cache_index = 1536 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED; 1537 1538 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { 1539 if (j == 3) { 1540 if (n >= n_max) 1541 break; 1542 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; 1543 j = 0; 1544 } 1545 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; 1546 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = 1547 req3->qp_first_pte[j].hi; 1548 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = 1549 req3->qp_first_pte[j].lo; 1550 } 1551 1552 ictx->ustorm_st_context.task_pbl_base.lo = 1553 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1554 ictx->ustorm_st_context.task_pbl_base.hi = 1555 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1556 ictx->ustorm_st_context.tce_phy_addr.lo = 1557 iscsi->task_array_info.pgtbl[0]; 1558 ictx->ustorm_st_context.tce_phy_addr.hi = 1559 iscsi->task_array_info.pgtbl[1]; 1560 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1561 ictx->ustorm_st_context.num_cqs = cp->num_cqs; 1562 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN; 1563 ictx->ustorm_st_context.negotiated_rx_and_flags |= 1564 ISCSI_DEF_MAX_BURST_LEN; 1565 ictx->ustorm_st_context.negotiated_rx |= 1566 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T << 1567 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT; 1568 1569 ictx->cstorm_st_context.hq_pbl_base.lo = 1570 iscsi->hq_info.pgtbl_map & 0xffffffff; 1571 ictx->cstorm_st_context.hq_pbl_base.hi = 1572 (u64) iscsi->hq_info.pgtbl_map >> 32; 1573 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0]; 1574 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1]; 1575 ictx->cstorm_st_context.task_pbl_base.lo = 1576 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1577 ictx->cstorm_st_context.task_pbl_base.hi = 1578 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1579 /* CSTORM and USTORM initialization is different, CSTORM requires 1580 * CQ DB base & not PTE addr */ 1581 ictx->cstorm_st_context.cq_db_base.lo = 1582 req1->cq_page_table_addr_lo & PAGE_MASK; 1583 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; 1584 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1585 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; 1586 for (i = 0; i < cp->num_cqs; i++) { 1587 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] = 1588 ISCSI_INITIAL_SN; 1589 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] = 1590 ISCSI_INITIAL_SN; 1591 } 1592 1593 ictx->xstorm_ag_context.cdu_reserved = 1594 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, 1595 ISCSI_CONNECTION_TYPE); 1596 ictx->ustorm_ag_context.cdu_usage = 1597 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, 1598 ISCSI_CONNECTION_TYPE); 1599 return 0; 1600 1601} 1602 1603static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], 1604 u32 num, int *work) 1605{ 1606 struct iscsi_kwqe_conn_offload1 *req1; 1607 struct iscsi_kwqe_conn_offload2 *req2; 1608 struct cnic_local *cp = dev->cnic_priv; 1609 struct iscsi_kcqe kcqe; 1610 struct kcqe *cqes[1]; 1611 u32 l5_cid; 1612 int ret; 1613 1614 if (num < 2) { 1615 *work = num; 1616 return -EINVAL; 1617 } 1618 1619 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1620 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1]; 1621 if ((num - 2) < req2->num_additional_wqes) { 1622 *work = num; 1623 return -EINVAL; 1624 } 1625 *work = 2 + req2->num_additional_wqes;; 1626 1627 l5_cid = req1->iscsi_conn_id; 1628 if (l5_cid >= MAX_ISCSI_TBL_SZ) 1629 return -EINVAL; 1630 1631 memset(&kcqe, 0, sizeof(kcqe)); 1632 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN; 1633 kcqe.iscsi_conn_id = l5_cid; 1634 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; 1635 1636 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { 1637 atomic_dec(&cp->iscsi_conn); 1638 ret = 0; 1639 goto done; 1640 } 1641 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); 1642 if (ret) { 1643 atomic_dec(&cp->iscsi_conn); 1644 ret = 0; 1645 goto done; 1646 } 1647 ret = cnic_setup_bnx2x_ctx(dev, wqes, num); 1648 if (ret < 0) { 1649 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1650 atomic_dec(&cp->iscsi_conn); 1651 goto done; 1652 } 1653 1654 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1655 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp->ctx_tbl[l5_cid].cid, 1656 cp->func); 1657 1658done: 1659 cqes[0] = (struct kcqe *) &kcqe; 1660 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1661 return ret; 1662} 1663 1664 1665static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) 1666{ 1667 struct cnic_local *cp = dev->cnic_priv; 1668 struct iscsi_kwqe_conn_update *req = 1669 (struct iscsi_kwqe_conn_update *) kwqe; 1670 void *data; 1671 union l5cm_specific_data l5_data; 1672 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id); 1673 int ret; 1674 1675 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0) 1676 return -EINVAL; 1677 1678 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 1679 if (!data) 1680 return -ENOMEM; 1681 1682 memcpy(data, kwqe, sizeof(struct kwqe)); 1683 1684 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN, 1685 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data); 1686 return ret; 1687} 1688 1689static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 1690{ 1691 struct cnic_local *cp = dev->cnic_priv; 1692 struct iscsi_kwqe_conn_destroy *req = 1693 (struct iscsi_kwqe_conn_destroy *) kwqe; 1694 union l5cm_specific_data l5_data; 1695 u32 l5_cid = req->reserved0; 1696 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1697 int ret = 0; 1698 struct iscsi_kcqe kcqe; 1699 struct kcqe *cqes[1]; 1700 1701 if (!(ctx->ctx_flags & CTX_FL_OFFLD_START)) 1702 goto skip_cfc_delete; 1703 1704 while (!time_after(jiffies, ctx->timestamp + (2 * HZ))) 1705 msleep(250); 1706 1707 init_waitqueue_head(&ctx->waitq); 1708 ctx->wait_cond = 0; 1709 memset(&l5_data, 0, sizeof(l5_data)); 1710 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL, 1711 req->context_id, 1712 ETH_CONNECTION_TYPE | 1713 (1 << SPE_HDR_COMMON_RAMROD_SHIFT), 1714 &l5_data); 1715 if (ret == 0) 1716 wait_event(ctx->waitq, ctx->wait_cond); 1717 1718skip_cfc_delete: 1719 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1720 1721 atomic_dec(&cp->iscsi_conn); 1722 1723 memset(&kcqe, 0, sizeof(kcqe)); 1724 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; 1725 kcqe.iscsi_conn_id = l5_cid; 1726 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1727 kcqe.iscsi_conn_context_id = req->context_id; 1728 1729 cqes[0] = (struct kcqe *) &kcqe; 1730 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1731 1732 return ret; 1733} 1734 1735static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, 1736 struct l4_kwq_connect_req1 *kwqe1, 1737 struct l4_kwq_connect_req3 *kwqe3, 1738 struct l5cm_active_conn_buffer *conn_buf) 1739{ 1740 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf; 1741 struct l5cm_xstorm_conn_buffer *xstorm_buf = 1742 &conn_buf->xstorm_conn_buffer; 1743 struct l5cm_tstorm_conn_buffer *tstorm_buf = 1744 &conn_buf->tstorm_conn_buffer; 1745 struct regpair context_addr; 1746 u32 cid = BNX2X_SW_CID(kwqe1->cid); 1747 struct in6_addr src_ip, dst_ip; 1748 int i; 1749 u32 *addrp; 1750 1751 addrp = (u32 *) &conn_addr->local_ip_addr; 1752 for (i = 0; i < 4; i++, addrp++) 1753 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); 1754 1755 addrp = (u32 *) &conn_addr->remote_ip_addr; 1756 for (i = 0; i < 4; i++, addrp++) 1757 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); 1758 1759 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr); 1760 1761 xstorm_buf->context_addr.hi = context_addr.hi; 1762 xstorm_buf->context_addr.lo = context_addr.lo; 1763 xstorm_buf->mss = 0xffff; 1764 xstorm_buf->rcv_buf = kwqe3->rcv_buf; 1765 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE) 1766 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE; 1767 xstorm_buf->pseudo_header_checksum = 1768 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); 1769 1770 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK)) 1771 tstorm_buf->params |= 1772 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE; 1773 if (kwqe3->ka_timeout) { 1774 tstorm_buf->ka_enable = 1; 1775 tstorm_buf->ka_timeout = kwqe3->ka_timeout; 1776 tstorm_buf->ka_interval = kwqe3->ka_interval; 1777 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count; 1778 } 1779 tstorm_buf->rcv_buf = kwqe3->rcv_buf; 1780 tstorm_buf->snd_buf = kwqe3->snd_buf; 1781 tstorm_buf->max_rt_time = 0xffffffff; 1782} 1783 1784static void cnic_init_bnx2x_mac(struct cnic_dev *dev) 1785{ 1786 struct cnic_local *cp = dev->cnic_priv; 1787 int func = CNIC_FUNC(cp); 1788 u8 *mac = dev->mac_addr; 1789 1790 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1791 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(func), mac[0]); 1792 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1793 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(func), mac[1]); 1794 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1795 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(func), mac[2]); 1796 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1797 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(func), mac[3]); 1798 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1799 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(func), mac[4]); 1800 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1801 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(func), mac[5]); 1802 1803 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1804 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func), mac[5]); 1805 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1806 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func) + 1, 1807 mac[4]); 1808 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1809 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func), mac[3]); 1810 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1811 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 1, 1812 mac[2]); 1813 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1814 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 2, 1815 mac[1]); 1816 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1817 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 3, 1818 mac[0]); 1819} 1820 1821static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts) 1822{ 1823 struct cnic_local *cp = dev->cnic_priv; 1824 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; 1825 u16 tstorm_flags = 0; 1826 1827 if (tcp_ts) { 1828 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; 1829 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; 1830 } 1831 1832 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1833 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), xstorm_flags); 1834 1835 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 1836 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), tstorm_flags); 1837} 1838 1839static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], 1840 u32 num, int *work) 1841{ 1842 struct cnic_local *cp = dev->cnic_priv; 1843 struct l4_kwq_connect_req1 *kwqe1 = 1844 (struct l4_kwq_connect_req1 *) wqes[0]; 1845 struct l4_kwq_connect_req3 *kwqe3; 1846 struct l5cm_active_conn_buffer *conn_buf; 1847 struct l5cm_conn_addr_params *conn_addr; 1848 union l5cm_specific_data l5_data; 1849 u32 l5_cid = kwqe1->pg_cid; 1850 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 1851 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1852 int ret; 1853 1854 if (num < 2) { 1855 *work = num; 1856 return -EINVAL; 1857 } 1858 1859 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) 1860 *work = 3; 1861 else 1862 *work = 2; 1863 1864 if (num < *work) { 1865 *work = num; 1866 return -EINVAL; 1867 } 1868 1869 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) { 1870 netdev_err(dev->netdev, "conn_buf size too big\n"); 1871 return -ENOMEM; 1872 } 1873 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 1874 if (!conn_buf) 1875 return -ENOMEM; 1876 1877 memset(conn_buf, 0, sizeof(*conn_buf)); 1878 1879 conn_addr = &conn_buf->conn_addr_buf; 1880 conn_addr->remote_addr_0 = csk->ha[0]; 1881 conn_addr->remote_addr_1 = csk->ha[1]; 1882 conn_addr->remote_addr_2 = csk->ha[2]; 1883 conn_addr->remote_addr_3 = csk->ha[3]; 1884 conn_addr->remote_addr_4 = csk->ha[4]; 1885 conn_addr->remote_addr_5 = csk->ha[5]; 1886 1887 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) { 1888 struct l4_kwq_connect_req2 *kwqe2 = 1889 (struct l4_kwq_connect_req2 *) wqes[1]; 1890 1891 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4; 1892 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3; 1893 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2; 1894 1895 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4; 1896 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3; 1897 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2; 1898 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION; 1899 } 1900 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1]; 1901 1902 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip; 1903 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip; 1904 conn_addr->local_tcp_port = kwqe1->src_port; 1905 conn_addr->remote_tcp_port = kwqe1->dst_port; 1906 1907 conn_addr->pmtu = kwqe3->pmtu; 1908 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); 1909 1910 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 1911 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->func), csk->vlan_id); 1912 1913 cnic_bnx2x_set_tcp_timestamp(dev, 1914 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP); 1915 1916 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, 1917 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); 1918 if (!ret) 1919 ctx->ctx_flags |= CTX_FL_OFFLD_START; 1920 1921 return ret; 1922} 1923 1924static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe) 1925{ 1926 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe; 1927 union l5cm_specific_data l5_data; 1928 int ret; 1929 1930 memset(&l5_data, 0, sizeof(l5_data)); 1931 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE, 1932 req->cid, ISCSI_CONNECTION_TYPE, &l5_data); 1933 return ret; 1934} 1935 1936static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe) 1937{ 1938 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe; 1939 union l5cm_specific_data l5_data; 1940 int ret; 1941 1942 memset(&l5_data, 0, sizeof(l5_data)); 1943 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT, 1944 req->cid, ISCSI_CONNECTION_TYPE, &l5_data); 1945 return ret; 1946} 1947static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe) 1948{ 1949 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe; 1950 struct l4_kcq kcqe; 1951 struct kcqe *cqes[1]; 1952 1953 memset(&kcqe, 0, sizeof(kcqe)); 1954 kcqe.pg_host_opaque = req->host_opaque; 1955 kcqe.pg_cid = req->host_opaque; 1956 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG; 1957 cqes[0] = (struct kcqe *) &kcqe; 1958 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); 1959 return 0; 1960} 1961 1962static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) 1963{ 1964 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe; 1965 struct l4_kcq kcqe; 1966 struct kcqe *cqes[1]; 1967 1968 memset(&kcqe, 0, sizeof(kcqe)); 1969 kcqe.pg_host_opaque = req->pg_host_opaque; 1970 kcqe.pg_cid = req->pg_cid; 1971 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG; 1972 cqes[0] = (struct kcqe *) &kcqe; 1973 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); 1974 return 0; 1975} 1976 1977static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 1978 u32 num_wqes) 1979{ 1980 int i, work, ret; 1981 u32 opcode; 1982 struct kwqe *kwqe; 1983 1984 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 1985 return -EAGAIN; /* bnx2 is down */ 1986 1987 for (i = 0; i < num_wqes; ) { 1988 kwqe = wqes[i]; 1989 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); 1990 work = 1; 1991 1992 switch (opcode) { 1993 case ISCSI_KWQE_OPCODE_INIT1: 1994 ret = cnic_bnx2x_iscsi_init1(dev, kwqe); 1995 break; 1996 case ISCSI_KWQE_OPCODE_INIT2: 1997 ret = cnic_bnx2x_iscsi_init2(dev, kwqe); 1998 break; 1999 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1: 2000 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i], 2001 num_wqes - i, &work); 2002 break; 2003 case ISCSI_KWQE_OPCODE_UPDATE_CONN: 2004 ret = cnic_bnx2x_iscsi_update(dev, kwqe); 2005 break; 2006 case ISCSI_KWQE_OPCODE_DESTROY_CONN: 2007 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe); 2008 break; 2009 case L4_KWQE_OPCODE_VALUE_CONNECT1: 2010 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i, 2011 &work); 2012 break; 2013 case L4_KWQE_OPCODE_VALUE_CLOSE: 2014 ret = cnic_bnx2x_close(dev, kwqe); 2015 break; 2016 case L4_KWQE_OPCODE_VALUE_RESET: 2017 ret = cnic_bnx2x_reset(dev, kwqe); 2018 break; 2019 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG: 2020 ret = cnic_bnx2x_offload_pg(dev, kwqe); 2021 break; 2022 case L4_KWQE_OPCODE_VALUE_UPDATE_PG: 2023 ret = cnic_bnx2x_update_pg(dev, kwqe); 2024 break; 2025 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG: 2026 ret = 0; 2027 break; 2028 default: 2029 ret = 0; 2030 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", 2031 opcode); 2032 break; 2033 } 2034 if (ret < 0) 2035 netdev_err(dev->netdev, "KWQE(0x%x) failed\n", 2036 opcode); 2037 i += work; 2038 } 2039 return 0; 2040} 2041 2042static void service_kcqes(struct cnic_dev *dev, int num_cqes) 2043{ 2044 struct cnic_local *cp = dev->cnic_priv; 2045 int i, j; 2046 2047 i = 0; 2048 j = 1; 2049 while (num_cqes) { 2050 struct cnic_ulp_ops *ulp_ops; 2051 int ulp_type; 2052 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; 2053 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK; 2054 2055 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) 2056 cnic_kwq_completion(dev, 1); 2057 2058 while (j < num_cqes) { 2059 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; 2060 2061 if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer) 2062 break; 2063 2064 if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) 2065 cnic_kwq_completion(dev, 1); 2066 j++; 2067 } 2068 2069 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA) 2070 ulp_type = CNIC_ULP_RDMA; 2071 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) 2072 ulp_type = CNIC_ULP_ISCSI; 2073 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) 2074 ulp_type = CNIC_ULP_L4; 2075 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) 2076 goto end; 2077 else { 2078 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n", 2079 kcqe_op_flag); 2080 goto end; 2081 } 2082 2083 rcu_read_lock(); 2084 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 2085 if (likely(ulp_ops)) { 2086 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 2087 cp->completed_kcq + i, j); 2088 } 2089 rcu_read_unlock(); 2090end: 2091 num_cqes -= j; 2092 i += j; 2093 j = 1; 2094 } 2095} 2096 2097static u16 cnic_bnx2_next_idx(u16 idx) 2098{ 2099 return idx + 1; 2100} 2101 2102static u16 cnic_bnx2_hw_idx(u16 idx) 2103{ 2104 return idx; 2105} 2106 2107static u16 cnic_bnx2x_next_idx(u16 idx) 2108{ 2109 idx++; 2110 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) 2111 idx++; 2112 2113 return idx; 2114} 2115 2116static u16 cnic_bnx2x_hw_idx(u16 idx) 2117{ 2118 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) 2119 idx++; 2120 return idx; 2121} 2122 2123static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) 2124{ 2125 struct cnic_local *cp = dev->cnic_priv; 2126 u16 i, ri, last; 2127 struct kcqe *kcqe; 2128 int kcqe_cnt = 0, last_cnt = 0; 2129 2130 i = ri = last = *sw_prod; 2131 ri &= MAX_KCQ_IDX; 2132 2133 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { 2134 kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; 2135 cp->completed_kcq[kcqe_cnt++] = kcqe; 2136 i = cp->next_idx(i); 2137 ri = i & MAX_KCQ_IDX; 2138 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) { 2139 last_cnt = kcqe_cnt; 2140 last = i; 2141 } 2142 } 2143 2144 *sw_prod = last; 2145 return last_cnt; 2146} 2147 2148static int cnic_l2_completion(struct cnic_local *cp) 2149{ 2150 u16 hw_cons, sw_cons; 2151 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) 2152 (cp->l2_ring + (2 * BCM_PAGE_SIZE)); 2153 u32 cmd; 2154 int comp = 0; 2155 2156 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags)) 2157 return 0; 2158 2159 hw_cons = *cp->rx_cons_ptr; 2160 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT) 2161 hw_cons++; 2162 2163 sw_cons = cp->rx_cons; 2164 while (sw_cons != hw_cons) { 2165 u8 cqe_fp_flags; 2166 2167 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT]; 2168 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; 2169 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) { 2170 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data); 2171 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT; 2172 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP || 2173 cmd == RAMROD_CMD_ID_ETH_HALT) 2174 comp++; 2175 } 2176 sw_cons = BNX2X_NEXT_RCQE(sw_cons); 2177 } 2178 return comp; 2179} 2180 2181static void cnic_chk_pkt_rings(struct cnic_local *cp) 2182{ 2183 u16 rx_cons = *cp->rx_cons_ptr; 2184 u16 tx_cons = *cp->tx_cons_ptr; 2185 int comp = 0; 2186 2187 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { 2188 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 2189 comp = cnic_l2_completion(cp); 2190 2191 cp->tx_cons = tx_cons; 2192 cp->rx_cons = rx_cons; 2193 2194 uio_event_notify(cp->cnic_uinfo); 2195 } 2196 if (comp) 2197 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 2198} 2199 2200static int cnic_service_bnx2(void *data, void *status_blk) 2201{ 2202 struct cnic_dev *dev = data; 2203 struct status_block *sblk = status_blk; 2204 struct cnic_local *cp = dev->cnic_priv; 2205 u32 status_idx = sblk->status_idx; 2206 u16 hw_prod, sw_prod; 2207 int kcqe_cnt; 2208 2209 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2210 return status_idx; 2211 2212 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2213 2214 hw_prod = sblk->status_completion_producer_index; 2215 sw_prod = cp->kcq_prod_idx; 2216 while (sw_prod != hw_prod) { 2217 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); 2218 if (kcqe_cnt == 0) 2219 goto done; 2220 2221 service_kcqes(dev, kcqe_cnt); 2222 2223 /* Tell compiler that status_blk fields can change. */ 2224 barrier(); 2225 if (status_idx != sblk->status_idx) { 2226 status_idx = sblk->status_idx; 2227 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2228 hw_prod = sblk->status_completion_producer_index; 2229 } else 2230 break; 2231 } 2232 2233done: 2234 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); 2235 2236 cp->kcq_prod_idx = sw_prod; 2237 2238 cnic_chk_pkt_rings(cp); 2239 return status_idx; 2240} 2241 2242static void cnic_service_bnx2_msix(unsigned long data) 2243{ 2244 struct cnic_dev *dev = (struct cnic_dev *) data; 2245 struct cnic_local *cp = dev->cnic_priv; 2246 struct status_block_msix *status_blk = cp->status_blk.bnx2; 2247 u32 status_idx = status_blk->status_idx; 2248 u16 hw_prod, sw_prod; 2249 int kcqe_cnt; 2250 2251 cp->kwq_con_idx = status_blk->status_cmd_consumer_index; 2252 2253 hw_prod = status_blk->status_completion_producer_index; 2254 sw_prod = cp->kcq_prod_idx; 2255 while (sw_prod != hw_prod) { 2256 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); 2257 if (kcqe_cnt == 0) 2258 goto done; 2259 2260 service_kcqes(dev, kcqe_cnt); 2261 2262 /* Tell compiler that status_blk fields can change. */ 2263 barrier(); 2264 if (status_idx != status_blk->status_idx) { 2265 status_idx = status_blk->status_idx; 2266 cp->kwq_con_idx = status_blk->status_cmd_consumer_index; 2267 hw_prod = status_blk->status_completion_producer_index; 2268 } else 2269 break; 2270 } 2271 2272done: 2273 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); 2274 cp->kcq_prod_idx = sw_prod; 2275 2276 cnic_chk_pkt_rings(cp); 2277 2278 cp->last_status_idx = status_idx; 2279 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 2280 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 2281} 2282 2283static irqreturn_t cnic_irq(int irq, void *dev_instance) 2284{ 2285 struct cnic_dev *dev = dev_instance; 2286 struct cnic_local *cp = dev->cnic_priv; 2287 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; 2288 2289 if (cp->ack_int) 2290 cp->ack_int(dev); 2291 2292 prefetch(cp->status_blk.gen); 2293 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 2294 2295 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2296 tasklet_schedule(&cp->cnic_irq_task); 2297 2298 return IRQ_HANDLED; 2299} 2300 2301static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, 2302 u16 index, u8 op, u8 update) 2303{ 2304 struct cnic_local *cp = dev->cnic_priv; 2305 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + 2306 COMMAND_REG_INT_ACK); 2307 struct igu_ack_register igu_ack; 2308 2309 igu_ack.status_block_index = index; 2310 igu_ack.sb_id_and_flags = 2311 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | 2312 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | 2313 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 2314 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 2315 2316 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); 2317} 2318 2319static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) 2320{ 2321 struct cnic_local *cp = dev->cnic_priv; 2322 2323 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 0, 2324 IGU_INT_DISABLE, 0); 2325} 2326 2327static void cnic_service_bnx2x_bh(unsigned long data) 2328{ 2329 struct cnic_dev *dev = (struct cnic_dev *) data; 2330 struct cnic_local *cp = dev->cnic_priv; 2331 u16 hw_prod, sw_prod; 2332 struct cstorm_status_block_c *sblk = 2333 &cp->status_blk.bnx2x->c_status_block; 2334 u32 status_idx = sblk->status_block_index; 2335 int kcqe_cnt; 2336 2337 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2338 return; 2339 2340 hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS]; 2341 hw_prod = cp->hw_idx(hw_prod); 2342 sw_prod = cp->kcq_prod_idx; 2343 while (sw_prod != hw_prod) { 2344 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); 2345 if (kcqe_cnt == 0) 2346 goto done; 2347 2348 service_kcqes(dev, kcqe_cnt); 2349 2350 /* Tell compiler that sblk fields can change. */ 2351 barrier(); 2352 if (status_idx == sblk->status_block_index) 2353 break; 2354 2355 status_idx = sblk->status_block_index; 2356 hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS]; 2357 hw_prod = cp->hw_idx(hw_prod); 2358 } 2359 2360done: 2361 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod + MAX_KCQ_IDX); 2362 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 2363 status_idx, IGU_INT_ENABLE, 1); 2364 2365 cp->kcq_prod_idx = sw_prod; 2366} 2367 2368static int cnic_service_bnx2x(void *data, void *status_blk) 2369{ 2370 struct cnic_dev *dev = data; 2371 struct cnic_local *cp = dev->cnic_priv; 2372 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; 2373 2374 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 2375 prefetch(cp->status_blk.bnx2x); 2376 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 2377 2378 tasklet_schedule(&cp->cnic_irq_task); 2379 cnic_chk_pkt_rings(cp); 2380 } 2381 2382 return 0; 2383} 2384 2385static void cnic_ulp_stop(struct cnic_dev *dev) 2386{ 2387 struct cnic_local *cp = dev->cnic_priv; 2388 int if_type; 2389 2390 if (cp->cnic_uinfo) 2391 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 2392 2393 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 2394 struct cnic_ulp_ops *ulp_ops; 2395 2396 mutex_lock(&cnic_lock); 2397 ulp_ops = cp->ulp_ops[if_type]; 2398 if (!ulp_ops) { 2399 mutex_unlock(&cnic_lock); 2400 continue; 2401 } 2402 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 2403 mutex_unlock(&cnic_lock); 2404 2405 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) 2406 ulp_ops->cnic_stop(cp->ulp_handle[if_type]); 2407 2408 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 2409 } 2410} 2411 2412static void cnic_ulp_start(struct cnic_dev *dev) 2413{ 2414 struct cnic_local *cp = dev->cnic_priv; 2415 int if_type; 2416 2417 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 2418 struct cnic_ulp_ops *ulp_ops; 2419 2420 mutex_lock(&cnic_lock); 2421 ulp_ops = cp->ulp_ops[if_type]; 2422 if (!ulp_ops || !ulp_ops->cnic_start) { 2423 mutex_unlock(&cnic_lock); 2424 continue; 2425 } 2426 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 2427 mutex_unlock(&cnic_lock); 2428 2429 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) 2430 ulp_ops->cnic_start(cp->ulp_handle[if_type]); 2431 2432 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 2433 } 2434} 2435 2436static int cnic_ctl(void *data, struct cnic_ctl_info *info) 2437{ 2438 struct cnic_dev *dev = data; 2439 2440 switch (info->cmd) { 2441 case CNIC_CTL_STOP_CMD: 2442 cnic_hold(dev); 2443 2444 cnic_ulp_stop(dev); 2445 cnic_stop_hw(dev); 2446 2447 cnic_put(dev); 2448 break; 2449 case CNIC_CTL_START_CMD: 2450 cnic_hold(dev); 2451 2452 if (!cnic_start_hw(dev)) 2453 cnic_ulp_start(dev); 2454 2455 cnic_put(dev); 2456 break; 2457 case CNIC_CTL_COMPLETION_CMD: { 2458 u32 cid = BNX2X_SW_CID(info->data.comp.cid); 2459 u32 l5_cid; 2460 struct cnic_local *cp = dev->cnic_priv; 2461 2462 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { 2463 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 2464 2465 ctx->wait_cond = 1; 2466 wake_up(&ctx->waitq); 2467 } 2468 break; 2469 } 2470 default: 2471 return -EINVAL; 2472 } 2473 return 0; 2474} 2475 2476static void cnic_ulp_init(struct cnic_dev *dev) 2477{ 2478 int i; 2479 struct cnic_local *cp = dev->cnic_priv; 2480 2481 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 2482 struct cnic_ulp_ops *ulp_ops; 2483 2484 mutex_lock(&cnic_lock); 2485 ulp_ops = cnic_ulp_tbl[i]; 2486 if (!ulp_ops || !ulp_ops->cnic_init) { 2487 mutex_unlock(&cnic_lock); 2488 continue; 2489 } 2490 ulp_get(ulp_ops); 2491 mutex_unlock(&cnic_lock); 2492 2493 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) 2494 ulp_ops->cnic_init(dev); 2495 2496 ulp_put(ulp_ops); 2497 } 2498} 2499 2500static void cnic_ulp_exit(struct cnic_dev *dev) 2501{ 2502 int i; 2503 struct cnic_local *cp = dev->cnic_priv; 2504 2505 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 2506 struct cnic_ulp_ops *ulp_ops; 2507 2508 mutex_lock(&cnic_lock); 2509 ulp_ops = cnic_ulp_tbl[i]; 2510 if (!ulp_ops || !ulp_ops->cnic_exit) { 2511 mutex_unlock(&cnic_lock); 2512 continue; 2513 } 2514 ulp_get(ulp_ops); 2515 mutex_unlock(&cnic_lock); 2516 2517 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) 2518 ulp_ops->cnic_exit(dev); 2519 2520 ulp_put(ulp_ops); 2521 } 2522} 2523 2524static int cnic_cm_offload_pg(struct cnic_sock *csk) 2525{ 2526 struct cnic_dev *dev = csk->dev; 2527 struct l4_kwq_offload_pg *l4kwqe; 2528 struct kwqe *wqes[1]; 2529 2530 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1; 2531 memset(l4kwqe, 0, sizeof(*l4kwqe)); 2532 wqes[0] = (struct kwqe *) l4kwqe; 2533 2534 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG; 2535 l4kwqe->flags = 2536 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT; 2537 l4kwqe->l2hdr_nbytes = ETH_HLEN; 2538 2539 l4kwqe->da0 = csk->ha[0]; 2540 l4kwqe->da1 = csk->ha[1]; 2541 l4kwqe->da2 = csk->ha[2]; 2542 l4kwqe->da3 = csk->ha[3]; 2543 l4kwqe->da4 = csk->ha[4]; 2544 l4kwqe->da5 = csk->ha[5]; 2545 2546 l4kwqe->sa0 = dev->mac_addr[0]; 2547 l4kwqe->sa1 = dev->mac_addr[1]; 2548 l4kwqe->sa2 = dev->mac_addr[2]; 2549 l4kwqe->sa3 = dev->mac_addr[3]; 2550 l4kwqe->sa4 = dev->mac_addr[4]; 2551 l4kwqe->sa5 = dev->mac_addr[5]; 2552 2553 l4kwqe->etype = ETH_P_IP; 2554 l4kwqe->ipid_start = DEF_IPID_START; 2555 l4kwqe->host_opaque = csk->l5_cid; 2556 2557 if (csk->vlan_id) { 2558 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING; 2559 l4kwqe->vlan_tag = csk->vlan_id; 2560 l4kwqe->l2hdr_nbytes += 4; 2561 } 2562 2563 return dev->submit_kwqes(dev, wqes, 1); 2564} 2565 2566static int cnic_cm_update_pg(struct cnic_sock *csk) 2567{ 2568 struct cnic_dev *dev = csk->dev; 2569 struct l4_kwq_update_pg *l4kwqe; 2570 struct kwqe *wqes[1]; 2571 2572 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1; 2573 memset(l4kwqe, 0, sizeof(*l4kwqe)); 2574 wqes[0] = (struct kwqe *) l4kwqe; 2575 2576 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG; 2577 l4kwqe->flags = 2578 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT; 2579 l4kwqe->pg_cid = csk->pg_cid; 2580 2581 l4kwqe->da0 = csk->ha[0]; 2582 l4kwqe->da1 = csk->ha[1]; 2583 l4kwqe->da2 = csk->ha[2]; 2584 l4kwqe->da3 = csk->ha[3]; 2585 l4kwqe->da4 = csk->ha[4]; 2586 l4kwqe->da5 = csk->ha[5]; 2587 2588 l4kwqe->pg_host_opaque = csk->l5_cid; 2589 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA; 2590 2591 return dev->submit_kwqes(dev, wqes, 1); 2592} 2593 2594static int cnic_cm_upload_pg(struct cnic_sock *csk) 2595{ 2596 struct cnic_dev *dev = csk->dev; 2597 struct l4_kwq_upload *l4kwqe; 2598 struct kwqe *wqes[1]; 2599 2600 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1; 2601 memset(l4kwqe, 0, sizeof(*l4kwqe)); 2602 wqes[0] = (struct kwqe *) l4kwqe; 2603 2604 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG; 2605 l4kwqe->flags = 2606 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT; 2607 l4kwqe->cid = csk->pg_cid; 2608 2609 return dev->submit_kwqes(dev, wqes, 1); 2610} 2611 2612static int cnic_cm_conn_req(struct cnic_sock *csk) 2613{ 2614 struct cnic_dev *dev = csk->dev; 2615 struct l4_kwq_connect_req1 *l4kwqe1; 2616 struct l4_kwq_connect_req2 *l4kwqe2; 2617 struct l4_kwq_connect_req3 *l4kwqe3; 2618 struct kwqe *wqes[3]; 2619 u8 tcp_flags = 0; 2620 int num_wqes = 2; 2621 2622 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1; 2623 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2; 2624 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3; 2625 memset(l4kwqe1, 0, sizeof(*l4kwqe1)); 2626 memset(l4kwqe2, 0, sizeof(*l4kwqe2)); 2627 memset(l4kwqe3, 0, sizeof(*l4kwqe3)); 2628 2629 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3; 2630 l4kwqe3->flags = 2631 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT; 2632 l4kwqe3->ka_timeout = csk->ka_timeout; 2633 l4kwqe3->ka_interval = csk->ka_interval; 2634 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count; 2635 l4kwqe3->tos = csk->tos; 2636 l4kwqe3->ttl = csk->ttl; 2637 l4kwqe3->snd_seq_scale = csk->snd_seq_scale; 2638 l4kwqe3->pmtu = csk->mtu; 2639 l4kwqe3->rcv_buf = csk->rcv_buf; 2640 l4kwqe3->snd_buf = csk->snd_buf; 2641 l4kwqe3->seed = csk->seed; 2642 2643 wqes[0] = (struct kwqe *) l4kwqe1; 2644 if (test_bit(SK_F_IPV6, &csk->flags)) { 2645 wqes[1] = (struct kwqe *) l4kwqe2; 2646 wqes[2] = (struct kwqe *) l4kwqe3; 2647 num_wqes = 3; 2648 2649 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6; 2650 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2; 2651 l4kwqe2->flags = 2652 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT | 2653 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT; 2654 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]); 2655 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]); 2656 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]); 2657 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]); 2658 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]); 2659 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]); 2660 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) - 2661 sizeof(struct tcphdr); 2662 } else { 2663 wqes[1] = (struct kwqe *) l4kwqe3; 2664 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) - 2665 sizeof(struct tcphdr); 2666 } 2667 2668 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1; 2669 l4kwqe1->flags = 2670 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) | 2671 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT; 2672 l4kwqe1->cid = csk->cid; 2673 l4kwqe1->pg_cid = csk->pg_cid; 2674 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]); 2675 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]); 2676 l4kwqe1->src_port = be16_to_cpu(csk->src_port); 2677 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port); 2678 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK) 2679 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK; 2680 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE) 2681 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE; 2682 if (csk->tcp_flags & SK_TCP_NAGLE) 2683 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE; 2684 if (csk->tcp_flags & SK_TCP_TIMESTAMP) 2685 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP; 2686 if (csk->tcp_flags & SK_TCP_SACK) 2687 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK; 2688 if (csk->tcp_flags & SK_TCP_SEG_SCALING) 2689 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING; 2690 2691 l4kwqe1->tcp_flags = tcp_flags; 2692 2693 return dev->submit_kwqes(dev, wqes, num_wqes); 2694} 2695 2696static int cnic_cm_close_req(struct cnic_sock *csk) 2697{ 2698 struct cnic_dev *dev = csk->dev; 2699 struct l4_kwq_close_req *l4kwqe; 2700 struct kwqe *wqes[1]; 2701 2702 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2; 2703 memset(l4kwqe, 0, sizeof(*l4kwqe)); 2704 wqes[0] = (struct kwqe *) l4kwqe; 2705 2706 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE; 2707 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT; 2708 l4kwqe->cid = csk->cid; 2709 2710 return dev->submit_kwqes(dev, wqes, 1); 2711} 2712 2713static int cnic_cm_abort_req(struct cnic_sock *csk) 2714{ 2715 struct cnic_dev *dev = csk->dev; 2716 struct l4_kwq_reset_req *l4kwqe; 2717 struct kwqe *wqes[1]; 2718 2719 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2; 2720 memset(l4kwqe, 0, sizeof(*l4kwqe)); 2721 wqes[0] = (struct kwqe *) l4kwqe; 2722 2723 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET; 2724 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT; 2725 l4kwqe->cid = csk->cid; 2726 2727 return dev->submit_kwqes(dev, wqes, 1); 2728} 2729 2730static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, 2731 u32 l5_cid, struct cnic_sock **csk, void *context) 2732{ 2733 struct cnic_local *cp = dev->cnic_priv; 2734 struct cnic_sock *csk1; 2735 2736 if (l5_cid >= MAX_CM_SK_TBL_SZ) 2737 return -EINVAL; 2738 2739 csk1 = &cp->csk_tbl[l5_cid]; 2740 if (atomic_read(&csk1->ref_count)) 2741 return -EAGAIN; 2742 2743 if (test_and_set_bit(SK_F_INUSE, &csk1->flags)) 2744 return -EBUSY; 2745 2746 csk1->dev = dev; 2747 csk1->cid = cid; 2748 csk1->l5_cid = l5_cid; 2749 csk1->ulp_type = ulp_type; 2750 csk1->context = context; 2751 2752 csk1->ka_timeout = DEF_KA_TIMEOUT; 2753 csk1->ka_interval = DEF_KA_INTERVAL; 2754 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT; 2755 csk1->tos = DEF_TOS; 2756 csk1->ttl = DEF_TTL; 2757 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE; 2758 csk1->rcv_buf = DEF_RCV_BUF; 2759 csk1->snd_buf = DEF_SND_BUF; 2760 csk1->seed = DEF_SEED; 2761 2762 *csk = csk1; 2763 return 0; 2764} 2765 2766static void cnic_cm_cleanup(struct cnic_sock *csk) 2767{ 2768 if (csk->src_port) { 2769 struct cnic_dev *dev = csk->dev; 2770 struct cnic_local *cp = dev->cnic_priv; 2771 2772 cnic_free_id(&cp->csk_port_tbl, csk->src_port); 2773 csk->src_port = 0; 2774 } 2775} 2776 2777static void cnic_close_conn(struct cnic_sock *csk) 2778{ 2779 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) { 2780 cnic_cm_upload_pg(csk); 2781 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 2782 } 2783 cnic_cm_cleanup(csk); 2784} 2785 2786static int cnic_cm_destroy(struct cnic_sock *csk) 2787{ 2788 if (!cnic_in_use(csk)) 2789 return -EINVAL; 2790 2791 csk_hold(csk); 2792 clear_bit(SK_F_INUSE, &csk->flags); 2793 smp_mb__after_clear_bit(); 2794 while (atomic_read(&csk->ref_count) != 1) 2795 msleep(1); 2796 cnic_cm_cleanup(csk); 2797 2798 csk->flags = 0; 2799 csk_put(csk); 2800 return 0; 2801} 2802 2803static inline u16 cnic_get_vlan(struct net_device *dev, 2804 struct net_device **vlan_dev) 2805{ 2806 if (dev->priv_flags & IFF_802_1Q_VLAN) { 2807 *vlan_dev = vlan_dev_real_dev(dev); 2808 return vlan_dev_vlan_id(dev); 2809 } 2810 *vlan_dev = dev; 2811 return 0; 2812} 2813 2814static int cnic_get_v4_route(struct sockaddr_in *dst_addr, 2815 struct dst_entry **dst) 2816{ 2817#if defined(CONFIG_INET) 2818 struct flowi fl; 2819 int err; 2820 struct rtable *rt; 2821 2822 memset(&fl, 0, sizeof(fl)); 2823 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr; 2824 2825 err = ip_route_output_key(&init_net, &rt, &fl); 2826 if (!err) 2827 *dst = &rt->u.dst; 2828 return err; 2829#else 2830 return -ENETUNREACH; 2831#endif 2832} 2833 2834static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, 2835 struct dst_entry **dst) 2836{ 2837#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) 2838 struct flowi fl; 2839 2840 memset(&fl, 0, sizeof(fl)); 2841 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr); 2842 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL) 2843 fl.oif = dst_addr->sin6_scope_id; 2844 2845 *dst = ip6_route_output(&init_net, NULL, &fl); 2846 if (*dst) 2847 return 0; 2848#endif 2849 2850 return -ENETUNREACH; 2851} 2852 2853static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr, 2854 int ulp_type) 2855{ 2856 struct cnic_dev *dev = NULL; 2857 struct dst_entry *dst; 2858 struct net_device *netdev = NULL; 2859 int err = -ENETUNREACH; 2860 2861 if (dst_addr->sin_family == AF_INET) 2862 err = cnic_get_v4_route(dst_addr, &dst); 2863 else if (dst_addr->sin_family == AF_INET6) { 2864 struct sockaddr_in6 *dst_addr6 = 2865 (struct sockaddr_in6 *) dst_addr; 2866 2867 err = cnic_get_v6_route(dst_addr6, &dst); 2868 } else 2869 return NULL; 2870 2871 if (err) 2872 return NULL; 2873 2874 if (!dst->dev) 2875 goto done; 2876 2877 cnic_get_vlan(dst->dev, &netdev); 2878 2879 dev = cnic_from_netdev(netdev); 2880 2881done: 2882 dst_release(dst); 2883 if (dev) 2884 cnic_put(dev); 2885 return dev; 2886} 2887 2888static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 2889{ 2890 struct cnic_dev *dev = csk->dev; 2891 struct cnic_local *cp = dev->cnic_priv; 2892 2893 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk); 2894} 2895 2896static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 2897{ 2898 struct cnic_dev *dev = csk->dev; 2899 struct cnic_local *cp = dev->cnic_priv; 2900 int is_v6, rc = 0; 2901 struct dst_entry *dst = NULL; 2902 struct net_device *realdev; 2903 u32 local_port; 2904 2905 if (saddr->local.v6.sin6_family == AF_INET6 && 2906 saddr->remote.v6.sin6_family == AF_INET6) 2907 is_v6 = 1; 2908 else if (saddr->local.v4.sin_family == AF_INET && 2909 saddr->remote.v4.sin_family == AF_INET) 2910 is_v6 = 0; 2911 else 2912 return -EINVAL; 2913 2914 clear_bit(SK_F_IPV6, &csk->flags); 2915 2916 if (is_v6) { 2917 set_bit(SK_F_IPV6, &csk->flags); 2918 cnic_get_v6_route(&saddr->remote.v6, &dst); 2919 2920 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr, 2921 sizeof(struct in6_addr)); 2922 csk->dst_port = saddr->remote.v6.sin6_port; 2923 local_port = saddr->local.v6.sin6_port; 2924 2925 } else { 2926 cnic_get_v4_route(&saddr->remote.v4, &dst); 2927 2928 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr; 2929 csk->dst_port = saddr->remote.v4.sin_port; 2930 local_port = saddr->local.v4.sin_port; 2931 } 2932 2933 csk->vlan_id = 0; 2934 csk->mtu = dev->netdev->mtu; 2935 if (dst && dst->dev) { 2936 u16 vlan = cnic_get_vlan(dst->dev, &realdev); 2937 if (realdev == dev->netdev) { 2938 csk->vlan_id = vlan; 2939 csk->mtu = dst_mtu(dst); 2940 } 2941 } 2942 2943 if (local_port >= CNIC_LOCAL_PORT_MIN && 2944 local_port < CNIC_LOCAL_PORT_MAX) { 2945 if (cnic_alloc_id(&cp->csk_port_tbl, local_port)) 2946 local_port = 0; 2947 } else 2948 local_port = 0; 2949 2950 if (!local_port) { 2951 local_port = cnic_alloc_new_id(&cp->csk_port_tbl); 2952 if (local_port == -1) { 2953 rc = -ENOMEM; 2954 goto err_out; 2955 } 2956 } 2957 csk->src_port = local_port; 2958 2959err_out: 2960 dst_release(dst); 2961 return rc; 2962} 2963 2964static void cnic_init_csk_state(struct cnic_sock *csk) 2965{ 2966 csk->state = 0; 2967 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 2968 clear_bit(SK_F_CLOSING, &csk->flags); 2969} 2970 2971static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 2972{ 2973 int err = 0; 2974 2975 if (!cnic_in_use(csk)) 2976 return -EINVAL; 2977 2978 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags)) 2979 return -EINVAL; 2980 2981 cnic_init_csk_state(csk); 2982 2983 err = cnic_get_route(csk, saddr); 2984 if (err) 2985 goto err_out; 2986 2987 err = cnic_resolve_addr(csk, saddr); 2988 if (!err) 2989 return 0; 2990 2991err_out: 2992 clear_bit(SK_F_CONNECT_START, &csk->flags); 2993 return err; 2994} 2995 2996static int cnic_cm_abort(struct cnic_sock *csk) 2997{ 2998 struct cnic_local *cp = csk->dev->cnic_priv; 2999 u32 opcode; 3000 3001 if (!cnic_in_use(csk)) 3002 return -EINVAL; 3003 3004 if (cnic_abort_prep(csk)) 3005 return cnic_cm_abort_req(csk); 3006 3007 /* Getting here means that we haven't started connect, or 3008 * connect was not successful. 3009 */ 3010 3011 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; 3012 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 3013 opcode = csk->state; 3014 else 3015 opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; 3016 cp->close_conn(csk, opcode); 3017 3018 return 0; 3019} 3020 3021static int cnic_cm_close(struct cnic_sock *csk) 3022{ 3023 if (!cnic_in_use(csk)) 3024 return -EINVAL; 3025 3026 if (cnic_close_prep(csk)) { 3027 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 3028 return cnic_cm_close_req(csk); 3029 } 3030 return 0; 3031} 3032 3033static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk, 3034 u8 opcode) 3035{ 3036 struct cnic_ulp_ops *ulp_ops; 3037 int ulp_type = csk->ulp_type; 3038 3039 rcu_read_lock(); 3040 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 3041 if (ulp_ops) { 3042 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE) 3043 ulp_ops->cm_connect_complete(csk); 3044 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) 3045 ulp_ops->cm_close_complete(csk); 3046 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) 3047 ulp_ops->cm_remote_abort(csk); 3048 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) 3049 ulp_ops->cm_abort_complete(csk); 3050 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED) 3051 ulp_ops->cm_remote_close(csk); 3052 } 3053 rcu_read_unlock(); 3054} 3055 3056static int cnic_cm_set_pg(struct cnic_sock *csk) 3057{ 3058 if (cnic_offld_prep(csk)) { 3059 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 3060 cnic_cm_update_pg(csk); 3061 else 3062 cnic_cm_offload_pg(csk); 3063 } 3064 return 0; 3065} 3066 3067static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe) 3068{ 3069 struct cnic_local *cp = dev->cnic_priv; 3070 u32 l5_cid = kcqe->pg_host_opaque; 3071 u8 opcode = kcqe->op_code; 3072 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 3073 3074 csk_hold(csk); 3075 if (!cnic_in_use(csk)) 3076 goto done; 3077 3078 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3079 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3080 goto done; 3081 } 3082 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */ 3083 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) { 3084 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3085 cnic_cm_upcall(cp, csk, 3086 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 3087 goto done; 3088 } 3089 3090 csk->pg_cid = kcqe->pg_cid; 3091 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 3092 cnic_cm_conn_req(csk); 3093 3094done: 3095 csk_put(csk); 3096} 3097 3098static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) 3099{ 3100 struct cnic_local *cp = dev->cnic_priv; 3101 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe; 3102 u8 opcode = l4kcqe->op_code; 3103 u32 l5_cid; 3104 struct cnic_sock *csk; 3105 3106 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || 3107 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3108 cnic_cm_process_offld_pg(dev, l4kcqe); 3109 return; 3110 } 3111 3112 l5_cid = l4kcqe->conn_id; 3113 if (opcode & 0x80) 3114 l5_cid = l4kcqe->cid; 3115 if (l5_cid >= MAX_CM_SK_TBL_SZ) 3116 return; 3117 3118 csk = &cp->csk_tbl[l5_cid]; 3119 csk_hold(csk); 3120 3121 if (!cnic_in_use(csk)) { 3122 csk_put(csk); 3123 return; 3124 } 3125 3126 switch (opcode) { 3127 case L5CM_RAMROD_CMD_ID_TCP_CONNECT: 3128 if (l4kcqe->status != 0) { 3129 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3130 cnic_cm_upcall(cp, csk, 3131 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 3132 } 3133 break; 3134 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE: 3135 if (l4kcqe->status == 0) 3136 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags); 3137 3138 smp_mb__before_clear_bit(); 3139 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3140 cnic_cm_upcall(cp, csk, opcode); 3141 break; 3142 3143 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3144 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 3145 cnic_cm_upcall(cp, csk, opcode); 3146 break; 3147 } else if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) 3148 csk->state = opcode; 3149 /* fall through */ 3150 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3151 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3152 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 3153 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: 3154 cp->close_conn(csk, opcode); 3155 break; 3156 3157 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED: 3158 cnic_cm_upcall(cp, csk, opcode); 3159 break; 3160 } 3161 csk_put(csk); 3162} 3163 3164static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num) 3165{ 3166 struct cnic_dev *dev = data; 3167 int i; 3168 3169 for (i = 0; i < num; i++) 3170 cnic_cm_process_kcqe(dev, kcqe[i]); 3171} 3172 3173static struct cnic_ulp_ops cm_ulp_ops = { 3174 .indicate_kcqes = cnic_cm_indicate_kcqe, 3175}; 3176 3177static void cnic_cm_free_mem(struct cnic_dev *dev) 3178{ 3179 struct cnic_local *cp = dev->cnic_priv; 3180 3181 kfree(cp->csk_tbl); 3182 cp->csk_tbl = NULL; 3183 cnic_free_id_tbl(&cp->csk_port_tbl); 3184} 3185 3186static int cnic_cm_alloc_mem(struct cnic_dev *dev) 3187{ 3188 struct cnic_local *cp = dev->cnic_priv; 3189 3190 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ, 3191 GFP_KERNEL); 3192 if (!cp->csk_tbl) 3193 return -ENOMEM; 3194 3195 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, 3196 CNIC_LOCAL_PORT_MIN)) { 3197 cnic_cm_free_mem(dev); 3198 return -ENOMEM; 3199 } 3200 return 0; 3201} 3202 3203static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) 3204{ 3205 if ((opcode == csk->state) || 3206 (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED && 3207 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) { 3208 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) 3209 return 1; 3210 } 3211 /* 57710+ only workaround to handle unsolicited RESET_COMP 3212 * which will be treated like a RESET RCVD notification 3213 * which triggers the clean up procedure 3214 */ 3215 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) { 3216 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) { 3217 csk->state = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED; 3218 return 1; 3219 } 3220 } 3221 return 0; 3222} 3223 3224static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode) 3225{ 3226 struct cnic_dev *dev = csk->dev; 3227 struct cnic_local *cp = dev->cnic_priv; 3228 3229 clear_bit(SK_F_CONNECT_START, &csk->flags); 3230 cnic_close_conn(csk); 3231 cnic_cm_upcall(cp, csk, opcode); 3232} 3233 3234static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev) 3235{ 3236} 3237 3238static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) 3239{ 3240 u32 seed; 3241 3242 get_random_bytes(&seed, 4); 3243 cnic_ctx_wr(dev, 45, 0, seed); 3244 return 0; 3245} 3246 3247static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) 3248{ 3249 struct cnic_dev *dev = csk->dev; 3250 struct cnic_local *cp = dev->cnic_priv; 3251 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid]; 3252 union l5cm_specific_data l5_data; 3253 u32 cmd = 0; 3254 int close_complete = 0; 3255 3256 switch (opcode) { 3257 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3258 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3259 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3260 if (cnic_ready_to_close(csk, opcode)) 3261 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; 3262 break; 3263 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 3264 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; 3265 break; 3266 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: 3267 close_complete = 1; 3268 break; 3269 } 3270 if (cmd) { 3271 memset(&l5_data, 0, sizeof(l5_data)); 3272 3273 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE, 3274 &l5_data); 3275 } else if (close_complete) { 3276 ctx->timestamp = jiffies; 3277 cnic_close_conn(csk); 3278 cnic_cm_upcall(cp, csk, csk->state); 3279 } 3280} 3281 3282static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) 3283{ 3284} 3285 3286static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) 3287{ 3288 struct cnic_local *cp = dev->cnic_priv; 3289 int func = CNIC_FUNC(cp); 3290 3291 cnic_init_bnx2x_mac(dev); 3292 cnic_bnx2x_set_tcp_timestamp(dev, 1); 3293 3294 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 3295 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(func), 0); 3296 3297 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3298 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func), 1); 3299 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3300 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func), 3301 DEF_MAX_DA_COUNT); 3302 3303 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3304 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(func), DEF_TTL); 3305 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3306 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(func), DEF_TOS); 3307 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3308 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(func), 2); 3309 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3310 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func), DEF_SWS_TIMER); 3311 3312 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(func), 3313 DEF_MAX_CWND); 3314 return 0; 3315} 3316 3317static int cnic_cm_open(struct cnic_dev *dev) 3318{ 3319 struct cnic_local *cp = dev->cnic_priv; 3320 int err; 3321 3322 err = cnic_cm_alloc_mem(dev); 3323 if (err) 3324 return err; 3325 3326 err = cp->start_cm(dev); 3327 3328 if (err) 3329 goto err_out; 3330 3331 dev->cm_create = cnic_cm_create; 3332 dev->cm_destroy = cnic_cm_destroy; 3333 dev->cm_connect = cnic_cm_connect; 3334 dev->cm_abort = cnic_cm_abort; 3335 dev->cm_close = cnic_cm_close; 3336 dev->cm_select_dev = cnic_cm_select_dev; 3337 3338 cp->ulp_handle[CNIC_ULP_L4] = dev; 3339 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops); 3340 return 0; 3341 3342err_out: 3343 cnic_cm_free_mem(dev); 3344 return err; 3345} 3346 3347static int cnic_cm_shutdown(struct cnic_dev *dev) 3348{ 3349 struct cnic_local *cp = dev->cnic_priv; 3350 int i; 3351 3352 cp->stop_cm(dev); 3353 3354 if (!cp->csk_tbl) 3355 return 0; 3356 3357 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) { 3358 struct cnic_sock *csk = &cp->csk_tbl[i]; 3359 3360 clear_bit(SK_F_INUSE, &csk->flags); 3361 cnic_cm_cleanup(csk); 3362 } 3363 cnic_cm_free_mem(dev); 3364 3365 return 0; 3366} 3367 3368static void cnic_init_context(struct cnic_dev *dev, u32 cid) 3369{ 3370 struct cnic_local *cp = dev->cnic_priv; 3371 u32 cid_addr; 3372 int i; 3373 3374 if (CHIP_NUM(cp) == CHIP_NUM_5709) 3375 return; 3376 3377 cid_addr = GET_CID_ADDR(cid); 3378 3379 for (i = 0; i < CTX_SIZE; i += 4) 3380 cnic_ctx_wr(dev, cid_addr, i, 0); 3381} 3382 3383static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) 3384{ 3385 struct cnic_local *cp = dev->cnic_priv; 3386 int ret = 0, i; 3387 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0; 3388 3389 if (CHIP_NUM(cp) != CHIP_NUM_5709) 3390 return 0; 3391 3392 for (i = 0; i < cp->ctx_blks; i++) { 3393 int j; 3394 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; 3395 u32 val; 3396 3397 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE); 3398 3399 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, 3400 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); 3401 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1, 3402 (u64) cp->ctx_arr[i].mapping >> 32); 3403 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx | 3404 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3405 for (j = 0; j < 10; j++) { 3406 3407 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL); 3408 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) 3409 break; 3410 udelay(5); 3411 } 3412 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { 3413 ret = -EBUSY; 3414 break; 3415 } 3416 } 3417 return ret; 3418} 3419 3420static void cnic_free_irq(struct cnic_dev *dev) 3421{ 3422 struct cnic_local *cp = dev->cnic_priv; 3423 struct cnic_eth_dev *ethdev = cp->ethdev; 3424 3425 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3426 cp->disable_int_sync(dev); 3427 tasklet_disable(&cp->cnic_irq_task); 3428 free_irq(ethdev->irq_arr[0].vector, dev); 3429 } 3430} 3431 3432static int cnic_init_bnx2_irq(struct cnic_dev *dev) 3433{ 3434 struct cnic_local *cp = dev->cnic_priv; 3435 struct cnic_eth_dev *ethdev = cp->ethdev; 3436 3437 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3438 int err, i = 0; 3439 int sblk_num = cp->status_blk_num; 3440 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) + 3441 BNX2_HC_SB_CONFIG_1; 3442 3443 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT); 3444 3445 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8); 3446 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220); 3447 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); 3448 3449 cp->last_status_idx = cp->status_blk.bnx2->status_idx; 3450 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, 3451 (unsigned long) dev); 3452 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, 3453 "cnic", dev); 3454 if (err) { 3455 tasklet_disable(&cp->cnic_irq_task); 3456 return err; 3457 } 3458 while (cp->status_blk.bnx2->status_completion_producer_index && 3459 i < 10) { 3460 CNIC_WR(dev, BNX2_HC_COALESCE_NOW, 3461 1 << (11 + sblk_num)); 3462 udelay(10); 3463 i++; 3464 barrier(); 3465 } 3466 if (cp->status_blk.bnx2->status_completion_producer_index) { 3467 cnic_free_irq(dev); 3468 goto failed; 3469 } 3470 3471 } else { 3472 struct status_block *sblk = cp->status_blk.gen; 3473 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND); 3474 int i = 0; 3475 3476 while (sblk->status_completion_producer_index && i < 10) { 3477 CNIC_WR(dev, BNX2_HC_COMMAND, 3478 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 3479 udelay(10); 3480 i++; 3481 barrier(); 3482 } 3483 if (sblk->status_completion_producer_index) 3484 goto failed; 3485 3486 } 3487 return 0; 3488 3489failed: 3490 netdev_err(dev->netdev, "KCQ index not resetting to 0\n"); 3491 return -EBUSY; 3492} 3493 3494static void cnic_enable_bnx2_int(struct cnic_dev *dev) 3495{ 3496 struct cnic_local *cp = dev->cnic_priv; 3497 struct cnic_eth_dev *ethdev = cp->ethdev; 3498 3499 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 3500 return; 3501 3502 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 3503 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 3504} 3505 3506static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) 3507{ 3508 struct cnic_local *cp = dev->cnic_priv; 3509 struct cnic_eth_dev *ethdev = cp->ethdev; 3510 3511 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 3512 return; 3513 3514 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 3515 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 3516 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD); 3517 synchronize_irq(ethdev->irq_arr[0].vector); 3518} 3519 3520static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) 3521{ 3522 struct cnic_local *cp = dev->cnic_priv; 3523 struct cnic_eth_dev *ethdev = cp->ethdev; 3524 u32 cid_addr, tx_cid, sb_id; 3525 u32 val, offset0, offset1, offset2, offset3; 3526 int i; 3527 struct tx_bd *txbd; 3528 dma_addr_t buf_map; 3529 struct status_block *s_blk = cp->status_blk.gen; 3530 3531 sb_id = cp->status_blk_num; 3532 tx_cid = 20; 3533 cnic_init_context(dev, tx_cid); 3534 cnic_init_context(dev, tx_cid + 1); 3535 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; 3536 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3537 struct status_block_msix *sblk = cp->status_blk.bnx2; 3538 3539 tx_cid = TX_TSS_CID + sb_id - 1; 3540 cnic_init_context(dev, tx_cid); 3541 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | 3542 (TX_TSS_CID << 7)); 3543 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; 3544 } 3545 cp->tx_cons = *cp->tx_cons_ptr; 3546 3547 cid_addr = GET_CID_ADDR(tx_cid); 3548 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 3549 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40; 3550 3551 for (i = 0; i < PHY_CTX_SIZE; i += 4) 3552 cnic_ctx_wr(dev, cid_addr2, i, 0); 3553 3554 offset0 = BNX2_L2CTX_TYPE_XI; 3555 offset1 = BNX2_L2CTX_CMD_TYPE_XI; 3556 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; 3557 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; 3558 } else { 3559 offset0 = BNX2_L2CTX_TYPE; 3560 offset1 = BNX2_L2CTX_CMD_TYPE; 3561 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; 3562 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; 3563 } 3564 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; 3565 cnic_ctx_wr(dev, cid_addr, offset0, val); 3566 3567 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 3568 cnic_ctx_wr(dev, cid_addr, offset1, val); 3569 3570 txbd = (struct tx_bd *) cp->l2_ring; 3571 3572 buf_map = cp->l2_buf_map; 3573 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { 3574 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; 3575 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 3576 } 3577 val = (u64) cp->l2_ring_map >> 32; 3578 cnic_ctx_wr(dev, cid_addr, offset2, val); 3579 txbd->tx_bd_haddr_hi = val; 3580 3581 val = (u64) cp->l2_ring_map & 0xffffffff; 3582 cnic_ctx_wr(dev, cid_addr, offset3, val); 3583 txbd->tx_bd_haddr_lo = val; 3584} 3585 3586static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) 3587{ 3588 struct cnic_local *cp = dev->cnic_priv; 3589 struct cnic_eth_dev *ethdev = cp->ethdev; 3590 u32 cid_addr, sb_id, val, coal_reg, coal_val; 3591 int i; 3592 struct rx_bd *rxbd; 3593 struct status_block *s_blk = cp->status_blk.gen; 3594 3595 sb_id = cp->status_blk_num; 3596 cnic_init_context(dev, 2); 3597 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2; 3598 coal_reg = BNX2_HC_COMMAND; 3599 coal_val = CNIC_RD(dev, coal_reg); 3600 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3601 struct status_block_msix *sblk = cp->status_blk.bnx2; 3602 3603 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; 3604 coal_reg = BNX2_HC_COALESCE_NOW; 3605 coal_val = 1 << (11 + sb_id); 3606 } 3607 i = 0; 3608 while (!(*cp->rx_cons_ptr != 0) && i < 10) { 3609 CNIC_WR(dev, coal_reg, coal_val); 3610 udelay(10); 3611 i++; 3612 barrier(); 3613 } 3614 cp->rx_cons = *cp->rx_cons_ptr; 3615 3616 cid_addr = GET_CID_ADDR(2); 3617 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 3618 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 3619 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); 3620 3621 if (sb_id == 0) 3622 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT; 3623 else 3624 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); 3625 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 3626 3627 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE); 3628 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { 3629 dma_addr_t buf_map; 3630 int n = (i % cp->l2_rx_ring_size) + 1; 3631 3632 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size); 3633 rxbd->rx_bd_len = cp->l2_single_buf_size; 3634 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; 3635 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; 3636 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 3637 } 3638 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32; 3639 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); 3640 rxbd->rx_bd_haddr_hi = val; 3641 3642 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff; 3643 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); 3644 rxbd->rx_bd_haddr_lo = val; 3645 3646 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD); 3647 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2)); 3648} 3649 3650static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) 3651{ 3652 struct kwqe *wqes[1], l2kwqe; 3653 3654 memset(&l2kwqe, 0, sizeof(l2kwqe)); 3655 wqes[0] = &l2kwqe; 3656 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) | 3657 (L2_KWQE_OPCODE_VALUE_FLUSH << 3658 KWQE_OPCODE_SHIFT) | 2; 3659 dev->submit_kwqes(dev, wqes, 1); 3660} 3661 3662static void cnic_set_bnx2_mac(struct cnic_dev *dev) 3663{ 3664 struct cnic_local *cp = dev->cnic_priv; 3665 u32 val; 3666 3667 val = cp->func << 2; 3668 3669 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val); 3670 3671 val = cnic_reg_rd_ind(dev, cp->shmem_base + 3672 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER); 3673 dev->mac_addr[0] = (u8) (val >> 8); 3674 dev->mac_addr[1] = (u8) val; 3675 3676 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val); 3677 3678 val = cnic_reg_rd_ind(dev, cp->shmem_base + 3679 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER); 3680 dev->mac_addr[2] = (u8) (val >> 24); 3681 dev->mac_addr[3] = (u8) (val >> 16); 3682 dev->mac_addr[4] = (u8) (val >> 8); 3683 dev->mac_addr[5] = (u8) val; 3684 3685 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val); 3686 3687 val = 4 | BNX2_RPM_SORT_USER2_BC_EN; 3688 if (CHIP_NUM(cp) != CHIP_NUM_5709) 3689 val |= BNX2_RPM_SORT_USER2_PROM_VLAN; 3690 3691 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0); 3692 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val); 3693 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA); 3694} 3695 3696static int cnic_start_bnx2_hw(struct cnic_dev *dev) 3697{ 3698 struct cnic_local *cp = dev->cnic_priv; 3699 struct cnic_eth_dev *ethdev = cp->ethdev; 3700 struct status_block *sblk = cp->status_blk.gen; 3701 u32 val; 3702 int err; 3703 3704 cnic_set_bnx2_mac(dev); 3705 3706 val = CNIC_RD(dev, BNX2_MQ_CONFIG); 3707 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3708 if (BCM_PAGE_BITS > 12) 3709 val |= (12 - 8) << 4; 3710 else 3711 val |= (BCM_PAGE_BITS - 8) << 4; 3712 3713 CNIC_WR(dev, BNX2_MQ_CONFIG, val); 3714 3715 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8); 3716 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220); 3717 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220); 3718 3719 err = cnic_setup_5709_context(dev, 1); 3720 if (err) 3721 return err; 3722 3723 cnic_init_context(dev, KWQ_CID); 3724 cnic_init_context(dev, KCQ_CID); 3725 3726 cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID); 3727 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; 3728 3729 cp->max_kwq_idx = MAX_KWQ_IDX; 3730 cp->kwq_prod_idx = 0; 3731 cp->kwq_con_idx = 0; 3732 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 3733 3734 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708) 3735 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; 3736 else 3737 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; 3738 3739 /* Initialize the kernel work queue context. */ 3740 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 3741 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 3742 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val); 3743 3744 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; 3745 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 3746 3747 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; 3748 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 3749 3750 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); 3751 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 3752 3753 val = (u32) cp->kwq_info.pgtbl_map; 3754 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 3755 3756 cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID); 3757 cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; 3758 3759 cp->kcq_prod_idx = 0; 3760 3761 /* Initialize the kernel complete queue context. */ 3762 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 3763 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 3764 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val); 3765 3766 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; 3767 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 3768 3769 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; 3770 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 3771 3772 val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32); 3773 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 3774 3775 val = (u32) cp->kcq_info.pgtbl_map; 3776 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 3777 3778 cp->int_num = 0; 3779 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3780 u32 sb_id = cp->status_blk_num; 3781 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); 3782 3783 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 3784 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 3785 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 3786 } 3787 3788 /* Enable Commnad Scheduler notification when we write to the 3789 * host producer index of the kernel contexts. */ 3790 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2); 3791 3792 /* Enable Command Scheduler notification when we write to either 3793 * the Send Queue or Receive Queue producer indexes of the kernel 3794 * bypass contexts. */ 3795 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7); 3796 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7); 3797 3798 /* Notify COM when the driver post an application buffer. */ 3799 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000); 3800 3801 /* Set the CP and COM doorbells. These two processors polls the 3802 * doorbell for a non zero value before running. This must be done 3803 * after setting up the kernel queue contexts. */ 3804 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1); 3805 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1); 3806 3807 cnic_init_bnx2_tx_ring(dev); 3808 cnic_init_bnx2_rx_ring(dev); 3809 3810 err = cnic_init_bnx2_irq(dev); 3811 if (err) { 3812 netdev_err(dev->netdev, "cnic_init_irq failed\n"); 3813 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 3814 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 3815 return err; 3816 } 3817 3818 return 0; 3819} 3820 3821static void cnic_setup_bnx2x_context(struct cnic_dev *dev) 3822{ 3823 struct cnic_local *cp = dev->cnic_priv; 3824 struct cnic_eth_dev *ethdev = cp->ethdev; 3825 u32 start_offset = ethdev->ctx_tbl_offset; 3826 int i; 3827 3828 for (i = 0; i < cp->ctx_blks; i++) { 3829 struct cnic_ctx *ctx = &cp->ctx_arr[i]; 3830 dma_addr_t map = ctx->mapping; 3831 3832 if (cp->ctx_align) { 3833 unsigned long mask = cp->ctx_align - 1; 3834 3835 map = (map + mask) & ~mask; 3836 } 3837 3838 cnic_ctx_tbl_wr(dev, start_offset + i, map); 3839 } 3840} 3841 3842static int cnic_init_bnx2x_irq(struct cnic_dev *dev) 3843{ 3844 struct cnic_local *cp = dev->cnic_priv; 3845 struct cnic_eth_dev *ethdev = cp->ethdev; 3846 int err = 0; 3847 3848 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, 3849 (unsigned long) dev); 3850 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3851 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, 3852 "cnic", dev); 3853 if (err) 3854 tasklet_disable(&cp->cnic_irq_task); 3855 } 3856 return err; 3857} 3858 3859static void cnic_enable_bnx2x_int(struct cnic_dev *dev) 3860{ 3861 struct cnic_local *cp = dev->cnic_priv; 3862 u8 sb_id = cp->status_blk_num; 3863 int port = CNIC_PORT(cp); 3864 3865 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 3866 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, 3867 HC_INDEX_C_ISCSI_EQ_CONS), 3868 64 / 12); 3869 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 3870 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, 3871 HC_INDEX_C_ISCSI_EQ_CONS), 0); 3872} 3873 3874static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) 3875{ 3876} 3877 3878static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev) 3879{ 3880 struct cnic_local *cp = dev->cnic_priv; 3881 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring; 3882 struct eth_context *context; 3883 struct regpair context_addr; 3884 dma_addr_t buf_map; 3885 int func = CNIC_FUNC(cp); 3886 int port = CNIC_PORT(cp); 3887 int i; 3888 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 3889 u32 val; 3890 3891 memset(txbd, 0, BCM_PAGE_SIZE); 3892 3893 buf_map = cp->l2_buf_map; 3894 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { 3895 struct eth_tx_start_bd *start_bd = &txbd->start_bd; 3896 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); 3897 3898 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 3899 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 3900 reg_bd->addr_hi = start_bd->addr_hi; 3901 reg_bd->addr_lo = start_bd->addr_lo + 0x10; 3902 start_bd->nbytes = cpu_to_le16(0x10); 3903 start_bd->nbd = cpu_to_le16(3); 3904 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 3905 start_bd->general_data = (UNICAST_ADDRESS << 3906 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); 3907 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 3908 3909 } 3910 context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 1, &context_addr); 3911 3912 val = (u64) cp->l2_ring_map >> 32; 3913 txbd->next_bd.addr_hi = cpu_to_le32(val); 3914 3915 context->xstorm_st_context.tx_bd_page_base_hi = val; 3916 3917 val = (u64) cp->l2_ring_map & 0xffffffff; 3918 txbd->next_bd.addr_lo = cpu_to_le32(val); 3919 3920 context->xstorm_st_context.tx_bd_page_base_lo = val; 3921 3922 context->cstorm_st_context.sb_index_number = 3923 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS; 3924 context->cstorm_st_context.status_block_id = BNX2X_DEF_SB_ID; 3925 3926 context->xstorm_st_context.statistics_data = (cli | 3927 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE); 3928 3929 context->xstorm_ag_context.cdu_reserved = 3930 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func), 3931 CDU_REGION_NUMBER_XCM_AG, 3932 ETH_CONNECTION_TYPE); 3933 3934 /* reset xstorm per client statistics */ 3935 val = BAR_XSTRORM_INTMEM + 3936 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 3937 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) 3938 CNIC_WR(dev, val + i * 4, 0); 3939 3940 cp->tx_cons_ptr = 3941 &cp->bnx2x_def_status_blk->c_def_status_block.index_values[ 3942 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS]; 3943} 3944 3945static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev) 3946{ 3947 struct cnic_local *cp = dev->cnic_priv; 3948 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring + 3949 BCM_PAGE_SIZE); 3950 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) 3951 (cp->l2_ring + (2 * BCM_PAGE_SIZE)); 3952 struct eth_context *context; 3953 struct regpair context_addr; 3954 int i; 3955 int port = CNIC_PORT(cp); 3956 int func = CNIC_FUNC(cp); 3957 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 3958 u32 val; 3959 struct tstorm_eth_client_config tstorm_client = {0}; 3960 3961 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { 3962 dma_addr_t buf_map; 3963 int n = (i % cp->l2_rx_ring_size) + 1; 3964 3965 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size); 3966 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 3967 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 3968 } 3969 context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 0, &context_addr); 3970 3971 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32; 3972 rxbd->addr_hi = cpu_to_le32(val); 3973 3974 context->ustorm_st_context.common.bd_page_base_hi = val; 3975 3976 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff; 3977 rxbd->addr_lo = cpu_to_le32(val); 3978 3979 context->ustorm_st_context.common.bd_page_base_lo = val; 3980 3981 context->ustorm_st_context.common.sb_index_numbers = 3982 BNX2X_ISCSI_RX_SB_INDEX_NUM; 3983 context->ustorm_st_context.common.clientId = cli; 3984 context->ustorm_st_context.common.status_block_id = BNX2X_DEF_SB_ID; 3985 context->ustorm_st_context.common.flags = 3986 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS; 3987 context->ustorm_st_context.common.statistics_counter_id = cli; 3988 context->ustorm_st_context.common.mc_alignment_log_size = 0; 3989 context->ustorm_st_context.common.bd_buff_size = 3990 cp->l2_single_buf_size; 3991 3992 context->ustorm_ag_context.cdu_usage = 3993 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func), 3994 CDU_REGION_NUMBER_UCM_AG, 3995 ETH_CONNECTION_TYPE); 3996 3997 rxcqe += BNX2X_MAX_RCQ_DESC_CNT; 3998 val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) >> 32; 3999 rxcqe->addr_hi = cpu_to_le32(val); 4000 4001 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4002 USTORM_CQE_PAGE_BASE_OFFSET(port, cli) + 4, val); 4003 4004 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4005 USTORM_CQE_PAGE_NEXT_OFFSET(port, cli) + 4, val); 4006 4007 val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff; 4008 rxcqe->addr_lo = cpu_to_le32(val); 4009 4010 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4011 USTORM_CQE_PAGE_BASE_OFFSET(port, cli), val); 4012 4013 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4014 USTORM_CQE_PAGE_NEXT_OFFSET(port, cli), val); 4015 4016 /* client tstorm info */ 4017 tstorm_client.mtu = cp->l2_single_buf_size - 14; 4018 tstorm_client.config_flags = 4019 (TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE | 4020 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE); 4021 tstorm_client.statistics_counter_id = cli; 4022 4023 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4024 TSTORM_CLIENT_CONFIG_OFFSET(port, cli), 4025 ((u32 *)&tstorm_client)[0]); 4026 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4027 TSTORM_CLIENT_CONFIG_OFFSET(port, cli) + 4, 4028 ((u32 *)&tstorm_client)[1]); 4029 4030 /* reset tstorm per client statistics */ 4031 val = BAR_TSTRORM_INTMEM + 4032 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 4033 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) 4034 CNIC_WR(dev, val + i * 4, 0); 4035 4036 /* reset ustorm per client statistics */ 4037 val = BAR_USTRORM_INTMEM + 4038 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 4039 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++) 4040 CNIC_WR(dev, val + i * 4, 0); 4041 4042 cp->rx_cons_ptr = 4043 &cp->bnx2x_def_status_blk->u_def_status_block.index_values[ 4044 HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS]; 4045} 4046 4047static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) 4048{ 4049 struct cnic_local *cp = dev->cnic_priv; 4050 u32 base, addr, val; 4051 int port = CNIC_PORT(cp); 4052 4053 dev->max_iscsi_conn = 0; 4054 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR); 4055 if (base < 0xa0000 || base >= 0xc0000) 4056 return; 4057 4058 addr = BNX2X_SHMEM_ADDR(base, 4059 dev_info.port_hw_config[port].iscsi_mac_upper); 4060 4061 val = CNIC_RD(dev, addr); 4062 4063 dev->mac_addr[0] = (u8) (val >> 8); 4064 dev->mac_addr[1] = (u8) val; 4065 4066 addr = BNX2X_SHMEM_ADDR(base, 4067 dev_info.port_hw_config[port].iscsi_mac_lower); 4068 4069 val = CNIC_RD(dev, addr); 4070 4071 dev->mac_addr[2] = (u8) (val >> 24); 4072 dev->mac_addr[3] = (u8) (val >> 16); 4073 dev->mac_addr[4] = (u8) (val >> 8); 4074 dev->mac_addr[5] = (u8) val; 4075 4076 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]); 4077 val = CNIC_RD(dev, addr); 4078 4079 if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) { 4080 u16 val16; 4081 4082 addr = BNX2X_SHMEM_ADDR(base, 4083 drv_lic_key[port].max_iscsi_init_conn); 4084 val16 = CNIC_RD16(dev, addr); 4085 4086 if (val16) 4087 val16 ^= 0x1e1e; 4088 dev->max_iscsi_conn = val16; 4089 } 4090 if (BNX2X_CHIP_IS_E1H(cp->chip_id)) { 4091 int func = CNIC_FUNC(cp); 4092 4093 addr = BNX2X_SHMEM_ADDR(base, 4094 mf_cfg.func_mf_config[func].e1hov_tag); 4095 val = CNIC_RD(dev, addr); 4096 val &= FUNC_MF_CFG_E1HOV_TAG_MASK; 4097 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 4098 addr = BNX2X_SHMEM_ADDR(base, 4099 mf_cfg.func_mf_config[func].config); 4100 val = CNIC_RD(dev, addr); 4101 val &= FUNC_MF_CFG_PROTOCOL_MASK; 4102 if (val != FUNC_MF_CFG_PROTOCOL_ISCSI) 4103 dev->max_iscsi_conn = 0; 4104 } 4105 } 4106} 4107 4108static int cnic_start_bnx2x_hw(struct cnic_dev *dev) 4109{ 4110 struct cnic_local *cp = dev->cnic_priv; 4111 int func = CNIC_FUNC(cp), ret, i; 4112 int port = CNIC_PORT(cp); 4113 u16 eq_idx; 4114 u8 sb_id = cp->status_blk_num; 4115 4116 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, 4117 BNX2X_ISCSI_START_CID); 4118 4119 if (ret) 4120 return -ENOMEM; 4121 4122 cp->kcq_io_addr = BAR_CSTRORM_INTMEM + 4123 CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0); 4124 cp->kcq_prod_idx = 0; 4125 4126 cnic_get_bnx2x_iscsi_info(dev); 4127 4128 /* Only 1 EQ */ 4129 CNIC_WR16(dev, cp->kcq_io_addr, MAX_KCQ_IDX); 4130 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4131 CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0); 4132 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4133 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0), 4134 cp->kcq_info.pg_map_arr[1] & 0xffffffff); 4135 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4136 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4, 4137 (u64) cp->kcq_info.pg_map_arr[1] >> 32); 4138 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4139 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0), 4140 cp->kcq_info.pg_map_arr[0] & 0xffffffff); 4141 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4142 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4, 4143 (u64) cp->kcq_info.pg_map_arr[0] >> 32); 4144 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4145 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1); 4146 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 4147 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(func, 0), cp->status_blk_num); 4148 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4149 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(func, 0), 4150 HC_INDEX_C_ISCSI_EQ_CONS); 4151 4152 for (i = 0; i < cp->conn_buf_info.num_pages; i++) { 4153 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4154 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i), 4155 cp->conn_buf_info.pgtbl[2 * i]); 4156 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4157 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i) + 4, 4158 cp->conn_buf_info.pgtbl[(2 * i) + 1]); 4159 } 4160 4161 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4162 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func), 4163 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); 4164 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4165 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func) + 4, 4166 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); 4167 4168 cnic_setup_bnx2x_context(dev); 4169 4170 eq_idx = CNIC_RD16(dev, BAR_CSTRORM_INTMEM + 4171 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) + 4172 offsetof(struct cstorm_status_block_c, 4173 index_values[HC_INDEX_C_ISCSI_EQ_CONS])); 4174 if (eq_idx != 0) { 4175 netdev_err(dev->netdev, "EQ cons index %x != 0\n", eq_idx); 4176 return -EBUSY; 4177 } 4178 ret = cnic_init_bnx2x_irq(dev); 4179 if (ret) 4180 return ret; 4181 4182 cnic_init_bnx2x_tx_ring(dev); 4183 cnic_init_bnx2x_rx_ring(dev); 4184 4185 return 0; 4186} 4187 4188static void cnic_init_rings(struct cnic_dev *dev) 4189{ 4190 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 4191 cnic_init_bnx2_tx_ring(dev); 4192 cnic_init_bnx2_rx_ring(dev); 4193 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 4194 struct cnic_local *cp = dev->cnic_priv; 4195 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4196 union l5cm_specific_data l5_data; 4197 struct ustorm_eth_rx_producers rx_prods = {0}; 4198 u32 off, i; 4199 4200 rx_prods.bd_prod = 0; 4201 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; 4202 barrier(); 4203 4204 off = BAR_USTRORM_INTMEM + 4205 USTORM_RX_PRODS_OFFSET(CNIC_PORT(cp), cli); 4206 4207 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) 4208 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); 4209 4210 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 4211 4212 cnic_init_bnx2x_tx_ring(dev); 4213 cnic_init_bnx2x_rx_ring(dev); 4214 4215 l5_data.phy_address.lo = cli; 4216 l5_data.phy_address.hi = 0; 4217 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, 4218 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); 4219 i = 0; 4220 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 4221 ++i < 10) 4222 msleep(1); 4223 4224 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 4225 netdev_err(dev->netdev, 4226 "iSCSI CLIENT_SETUP did not complete\n"); 4227 cnic_kwq_completion(dev, 1); 4228 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1); 4229 } 4230} 4231 4232static void cnic_shutdown_rings(struct cnic_dev *dev) 4233{ 4234 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 4235 cnic_shutdown_bnx2_rx_ring(dev); 4236 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 4237 struct cnic_local *cp = dev->cnic_priv; 4238 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4239 union l5cm_specific_data l5_data; 4240 int i; 4241 4242 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0); 4243 4244 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 4245 4246 l5_data.phy_address.lo = cli; 4247 l5_data.phy_address.hi = 0; 4248 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT, 4249 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); 4250 i = 0; 4251 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 4252 ++i < 10) 4253 msleep(1); 4254 4255 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 4256 netdev_err(dev->netdev, 4257 "iSCSI CLIENT_HALT did not complete\n"); 4258 cnic_kwq_completion(dev, 1); 4259 4260 memset(&l5_data, 0, sizeof(l5_data)); 4261 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL, 4262 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE | 4263 (1 << SPE_HDR_COMMON_RAMROD_SHIFT), &l5_data); 4264 msleep(10); 4265 } 4266} 4267 4268static int cnic_register_netdev(struct cnic_dev *dev) 4269{ 4270 struct cnic_local *cp = dev->cnic_priv; 4271 struct cnic_eth_dev *ethdev = cp->ethdev; 4272 int err; 4273 4274 if (!ethdev) 4275 return -ENODEV; 4276 4277 if (ethdev->drv_state & CNIC_DRV_STATE_REGD) 4278 return 0; 4279 4280 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); 4281 if (err) 4282 netdev_err(dev->netdev, "register_cnic failed\n"); 4283 4284 return err; 4285} 4286 4287static void cnic_unregister_netdev(struct cnic_dev *dev) 4288{ 4289 struct cnic_local *cp = dev->cnic_priv; 4290 struct cnic_eth_dev *ethdev = cp->ethdev; 4291 4292 if (!ethdev) 4293 return; 4294 4295 ethdev->drv_unregister_cnic(dev->netdev); 4296} 4297 4298static int cnic_start_hw(struct cnic_dev *dev) 4299{ 4300 struct cnic_local *cp = dev->cnic_priv; 4301 struct cnic_eth_dev *ethdev = cp->ethdev; 4302 int err; 4303 4304 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 4305 return -EALREADY; 4306 4307 dev->regview = ethdev->io_base; 4308 cp->chip_id = ethdev->chip_id; 4309 pci_dev_get(dev->pcidev); 4310 cp->func = PCI_FUNC(dev->pcidev->devfn); 4311 cp->status_blk.gen = ethdev->irq_arr[0].status_blk; 4312 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; 4313 4314 err = cp->alloc_resc(dev); 4315 if (err) { 4316 netdev_err(dev->netdev, "allocate resource failure\n"); 4317 goto err1; 4318 } 4319 4320 err = cp->start_hw(dev); 4321 if (err) 4322 goto err1; 4323 4324 err = cnic_cm_open(dev); 4325 if (err) 4326 goto err1; 4327 4328 set_bit(CNIC_F_CNIC_UP, &dev->flags); 4329 4330 cp->enable_int(dev); 4331 4332 return 0; 4333 4334err1: 4335 cp->free_resc(dev); 4336 pci_dev_put(dev->pcidev); 4337 return err; 4338} 4339 4340static void cnic_stop_bnx2_hw(struct cnic_dev *dev) 4341{ 4342 cnic_disable_bnx2_int_sync(dev); 4343 4344 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 4345 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 4346 4347 cnic_init_context(dev, KWQ_CID); 4348 cnic_init_context(dev, KCQ_CID); 4349 4350 cnic_setup_5709_context(dev, 0); 4351 cnic_free_irq(dev); 4352 4353 cnic_free_resc(dev); 4354} 4355 4356 4357static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) 4358{ 4359 struct cnic_local *cp = dev->cnic_priv; 4360 u8 sb_id = cp->status_blk_num; 4361 int port = CNIC_PORT(cp); 4362 4363 cnic_free_irq(dev); 4364 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 4365 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) + 4366 offsetof(struct cstorm_status_block_c, 4367 index_values[HC_INDEX_C_ISCSI_EQ_CONS]), 4368 0); 4369 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4370 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->func, 0), 0); 4371 CNIC_WR16(dev, cp->kcq_io_addr, 0); 4372 cnic_free_resc(dev); 4373} 4374 4375static void cnic_stop_hw(struct cnic_dev *dev) 4376{ 4377 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 4378 struct cnic_local *cp = dev->cnic_priv; 4379 int i = 0; 4380 4381 /* Need to wait for the ring shutdown event to complete 4382 * before clearing the CNIC_UP flag. 4383 */ 4384 while (cp->uio_dev != -1 && i < 15) { 4385 msleep(100); 4386 i++; 4387 } 4388 clear_bit(CNIC_F_CNIC_UP, &dev->flags); 4389 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); 4390 synchronize_rcu(); 4391 cnic_cm_shutdown(dev); 4392 cp->stop_hw(dev); 4393 pci_dev_put(dev->pcidev); 4394 } 4395} 4396 4397static void cnic_free_dev(struct cnic_dev *dev) 4398{ 4399 int i = 0; 4400 4401 while ((atomic_read(&dev->ref_count) != 0) && i < 10) { 4402 msleep(100); 4403 i++; 4404 } 4405 if (atomic_read(&dev->ref_count) != 0) 4406 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n"); 4407 4408 netdev_info(dev->netdev, "Removed CNIC device\n"); 4409 dev_put(dev->netdev); 4410 kfree(dev); 4411} 4412 4413static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, 4414 struct pci_dev *pdev) 4415{ 4416 struct cnic_dev *cdev; 4417 struct cnic_local *cp; 4418 int alloc_size; 4419 4420 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local); 4421 4422 cdev = kzalloc(alloc_size , GFP_KERNEL); 4423 if (cdev == NULL) { 4424 netdev_err(dev, "allocate dev struct failure\n"); 4425 return NULL; 4426 } 4427 4428 cdev->netdev = dev; 4429 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev); 4430 cdev->register_device = cnic_register_device; 4431 cdev->unregister_device = cnic_unregister_device; 4432 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; 4433 4434 cp = cdev->cnic_priv; 4435 cp->dev = cdev; 4436 cp->uio_dev = -1; 4437 cp->l2_single_buf_size = 0x400; 4438 cp->l2_rx_ring_size = 3; 4439 4440 spin_lock_init(&cp->cnic_ulp_lock); 4441 4442 netdev_info(dev, "Added CNIC device\n"); 4443 4444 return cdev; 4445} 4446 4447static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) 4448{ 4449 struct pci_dev *pdev; 4450 struct cnic_dev *cdev; 4451 struct cnic_local *cp; 4452 struct cnic_eth_dev *ethdev = NULL; 4453 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; 4454 4455 probe = symbol_get(bnx2_cnic_probe); 4456 if (probe) { 4457 ethdev = (*probe)(dev); 4458 symbol_put(bnx2_cnic_probe); 4459 } 4460 if (!ethdev) 4461 return NULL; 4462 4463 pdev = ethdev->pdev; 4464 if (!pdev) 4465 return NULL; 4466 4467 dev_hold(dev); 4468 pci_dev_get(pdev); 4469 if (pdev->device == PCI_DEVICE_ID_NX2_5709 || 4470 pdev->device == PCI_DEVICE_ID_NX2_5709S) { 4471 u8 rev; 4472 4473 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 4474 if (rev < 0x10) { 4475 pci_dev_put(pdev); 4476 goto cnic_err; 4477 } 4478 } 4479 pci_dev_put(pdev); 4480 4481 cdev = cnic_alloc_dev(dev, pdev); 4482 if (cdev == NULL) 4483 goto cnic_err; 4484 4485 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags); 4486 cdev->submit_kwqes = cnic_submit_bnx2_kwqes; 4487 4488 cp = cdev->cnic_priv; 4489 cp->ethdev = ethdev; 4490 cdev->pcidev = pdev; 4491 4492 cp->cnic_ops = &cnic_bnx2_ops; 4493 cp->start_hw = cnic_start_bnx2_hw; 4494 cp->stop_hw = cnic_stop_bnx2_hw; 4495 cp->setup_pgtbl = cnic_setup_page_tbl; 4496 cp->alloc_resc = cnic_alloc_bnx2_resc; 4497 cp->free_resc = cnic_free_resc; 4498 cp->start_cm = cnic_cm_init_bnx2_hw; 4499 cp->stop_cm = cnic_cm_stop_bnx2_hw; 4500 cp->enable_int = cnic_enable_bnx2_int; 4501 cp->disable_int_sync = cnic_disable_bnx2_int_sync; 4502 cp->close_conn = cnic_close_bnx2_conn; 4503 cp->next_idx = cnic_bnx2_next_idx; 4504 cp->hw_idx = cnic_bnx2_hw_idx; 4505 return cdev; 4506 4507cnic_err: 4508 dev_put(dev); 4509 return NULL; 4510} 4511 4512static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) 4513{ 4514 struct pci_dev *pdev; 4515 struct cnic_dev *cdev; 4516 struct cnic_local *cp; 4517 struct cnic_eth_dev *ethdev = NULL; 4518 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; 4519 4520 probe = symbol_get(bnx2x_cnic_probe); 4521 if (probe) { 4522 ethdev = (*probe)(dev); 4523 symbol_put(bnx2x_cnic_probe); 4524 } 4525 if (!ethdev) 4526 return NULL; 4527 4528 pdev = ethdev->pdev; 4529 if (!pdev) 4530 return NULL; 4531 4532 dev_hold(dev); 4533 cdev = cnic_alloc_dev(dev, pdev); 4534 if (cdev == NULL) { 4535 dev_put(dev); 4536 return NULL; 4537 } 4538 4539 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags); 4540 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes; 4541 4542 cp = cdev->cnic_priv; 4543 cp->ethdev = ethdev; 4544 cdev->pcidev = pdev; 4545 4546 cp->cnic_ops = &cnic_bnx2x_ops; 4547 cp->start_hw = cnic_start_bnx2x_hw; 4548 cp->stop_hw = cnic_stop_bnx2x_hw; 4549 cp->setup_pgtbl = cnic_setup_page_tbl_le; 4550 cp->alloc_resc = cnic_alloc_bnx2x_resc; 4551 cp->free_resc = cnic_free_resc; 4552 cp->start_cm = cnic_cm_init_bnx2x_hw; 4553 cp->stop_cm = cnic_cm_stop_bnx2x_hw; 4554 cp->enable_int = cnic_enable_bnx2x_int; 4555 cp->disable_int_sync = cnic_disable_bnx2x_int_sync; 4556 cp->ack_int = cnic_ack_bnx2x_msix; 4557 cp->close_conn = cnic_close_bnx2x_conn; 4558 cp->next_idx = cnic_bnx2x_next_idx; 4559 cp->hw_idx = cnic_bnx2x_hw_idx; 4560 return cdev; 4561} 4562 4563static struct cnic_dev *is_cnic_dev(struct net_device *dev) 4564{ 4565 struct ethtool_drvinfo drvinfo; 4566 struct cnic_dev *cdev = NULL; 4567 4568 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) { 4569 memset(&drvinfo, 0, sizeof(drvinfo)); 4570 dev->ethtool_ops->get_drvinfo(dev, &drvinfo); 4571 4572 if (!strcmp(drvinfo.driver, "bnx2")) 4573 cdev = init_bnx2_cnic(dev); 4574 if (!strcmp(drvinfo.driver, "bnx2x")) 4575 cdev = init_bnx2x_cnic(dev); 4576 if (cdev) { 4577 write_lock(&cnic_dev_lock); 4578 list_add(&cdev->list, &cnic_dev_list); 4579 write_unlock(&cnic_dev_lock); 4580 } 4581 } 4582 return cdev; 4583} 4584 4585/** 4586 * netdev event handler 4587 */ 4588static int cnic_netdev_event(struct notifier_block *this, unsigned long event, 4589 void *ptr) 4590{ 4591 struct net_device *netdev = ptr; 4592 struct cnic_dev *dev; 4593 int if_type; 4594 int new_dev = 0; 4595 4596 dev = cnic_from_netdev(netdev); 4597 4598 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) { 4599 /* Check for the hot-plug device */ 4600 dev = is_cnic_dev(netdev); 4601 if (dev) { 4602 new_dev = 1; 4603 cnic_hold(dev); 4604 } 4605 } 4606 if (dev) { 4607 struct cnic_local *cp = dev->cnic_priv; 4608 4609 if (new_dev) 4610 cnic_ulp_init(dev); 4611 else if (event == NETDEV_UNREGISTER) 4612 cnic_ulp_exit(dev); 4613 4614 if (event == NETDEV_UP) { 4615 if (cnic_register_netdev(dev) != 0) { 4616 cnic_put(dev); 4617 goto done; 4618 } 4619 if (!cnic_start_hw(dev)) 4620 cnic_ulp_start(dev); 4621 } 4622 4623 rcu_read_lock(); 4624 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 4625 struct cnic_ulp_ops *ulp_ops; 4626 void *ctx; 4627 4628 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 4629 if (!ulp_ops || !ulp_ops->indicate_netevent) 4630 continue; 4631 4632 ctx = cp->ulp_handle[if_type]; 4633 4634 ulp_ops->indicate_netevent(ctx, event); 4635 } 4636 rcu_read_unlock(); 4637 4638 if (event == NETDEV_GOING_DOWN) { 4639 cnic_ulp_stop(dev); 4640 cnic_stop_hw(dev); 4641 cnic_unregister_netdev(dev); 4642 } else if (event == NETDEV_UNREGISTER) { 4643 write_lock(&cnic_dev_lock); 4644 list_del_init(&dev->list); 4645 write_unlock(&cnic_dev_lock); 4646 4647 cnic_put(dev); 4648 cnic_free_dev(dev); 4649 goto done; 4650 } 4651 cnic_put(dev); 4652 } 4653done: 4654 return NOTIFY_DONE; 4655} 4656 4657static struct notifier_block cnic_netdev_notifier = { 4658 .notifier_call = cnic_netdev_event 4659}; 4660 4661static void cnic_release(void) 4662{ 4663 struct cnic_dev *dev; 4664 4665 while (!list_empty(&cnic_dev_list)) { 4666 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); 4667 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 4668 cnic_ulp_stop(dev); 4669 cnic_stop_hw(dev); 4670 } 4671 4672 cnic_ulp_exit(dev); 4673 cnic_unregister_netdev(dev); 4674 list_del_init(&dev->list); 4675 cnic_free_dev(dev); 4676 } 4677} 4678 4679static int __init cnic_init(void) 4680{ 4681 int rc = 0; 4682 4683 pr_info("%s", version); 4684 4685 rc = register_netdevice_notifier(&cnic_netdev_notifier); 4686 if (rc) { 4687 cnic_release(); 4688 return rc; 4689 } 4690 4691 return 0; 4692} 4693 4694static void __exit cnic_exit(void) 4695{ 4696 unregister_netdevice_notifier(&cnic_netdev_notifier); 4697 cnic_release(); 4698} 4699 4700module_init(cnic_init); 4701module_exit(cnic_exit);