Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.32-rc7 2825 lines 68 kB view raw
1/* cnic.c: Broadcom CNIC core network driver. 2 * 3 * Copyright (c) 2006-2009 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) 10 * Modified and maintained by: Michael Chan <mchan@broadcom.com> 11 */ 12 13#include <linux/module.h> 14 15#include <linux/kernel.h> 16#include <linux/errno.h> 17#include <linux/list.h> 18#include <linux/slab.h> 19#include <linux/pci.h> 20#include <linux/init.h> 21#include <linux/netdevice.h> 22#include <linux/uio_driver.h> 23#include <linux/in.h> 24#include <linux/dma-mapping.h> 25#include <linux/delay.h> 26#include <linux/ethtool.h> 27#include <linux/if_vlan.h> 28#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 29#define BCM_VLAN 1 30#endif 31#include <net/ip.h> 32#include <net/tcp.h> 33#include <net/route.h> 34#include <net/ipv6.h> 35#include <net/ip6_route.h> 36#include <scsi/iscsi_if.h> 37 38#include "cnic_if.h" 39#include "bnx2.h" 40#include "cnic.h" 41#include "cnic_defs.h" 42 43#define DRV_MODULE_NAME "cnic" 44#define PFX DRV_MODULE_NAME ": " 45 46static char version[] __devinitdata = 47 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; 48 49MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " 50 "Chen (zongxi@broadcom.com"); 51MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver"); 52MODULE_LICENSE("GPL"); 53MODULE_VERSION(CNIC_MODULE_VERSION); 54 55static LIST_HEAD(cnic_dev_list); 56static DEFINE_RWLOCK(cnic_dev_lock); 57static DEFINE_MUTEX(cnic_lock); 58 59static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; 60 61static int cnic_service_bnx2(void *, void *); 62static int cnic_ctl(void *, struct cnic_ctl_info *); 63 64static struct cnic_ops cnic_bnx2_ops = { 65 .cnic_owner = THIS_MODULE, 66 .cnic_handler = cnic_service_bnx2, 67 .cnic_ctl = cnic_ctl, 68}; 69 70static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *); 71static void cnic_init_bnx2_tx_ring(struct cnic_dev *); 72static void cnic_init_bnx2_rx_ring(struct cnic_dev *); 73static int cnic_cm_set_pg(struct cnic_sock *); 74 75static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) 76{ 77 struct cnic_dev *dev = uinfo->priv; 78 struct cnic_local *cp = dev->cnic_priv; 79 80 if (!capable(CAP_NET_ADMIN)) 81 return -EPERM; 82 83 if (cp->uio_dev != -1) 84 return -EBUSY; 85 86 cp->uio_dev = iminor(inode); 87 88 cnic_init_bnx2_tx_ring(dev); 89 cnic_init_bnx2_rx_ring(dev); 90 91 return 0; 92} 93 94static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) 95{ 96 struct cnic_dev *dev = uinfo->priv; 97 struct cnic_local *cp = dev->cnic_priv; 98 99 cnic_shutdown_bnx2_rx_ring(dev); 100 101 cp->uio_dev = -1; 102 return 0; 103} 104 105static inline void cnic_hold(struct cnic_dev *dev) 106{ 107 atomic_inc(&dev->ref_count); 108} 109 110static inline void cnic_put(struct cnic_dev *dev) 111{ 112 atomic_dec(&dev->ref_count); 113} 114 115static inline void csk_hold(struct cnic_sock *csk) 116{ 117 atomic_inc(&csk->ref_count); 118} 119 120static inline void csk_put(struct cnic_sock *csk) 121{ 122 atomic_dec(&csk->ref_count); 123} 124 125static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) 126{ 127 struct cnic_dev *cdev; 128 129 read_lock(&cnic_dev_lock); 130 list_for_each_entry(cdev, &cnic_dev_list, list) { 131 if (netdev == cdev->netdev) { 132 cnic_hold(cdev); 133 read_unlock(&cnic_dev_lock); 134 return cdev; 135 } 136 } 137 read_unlock(&cnic_dev_lock); 138 return NULL; 139} 140 141static inline void ulp_get(struct cnic_ulp_ops *ulp_ops) 142{ 143 atomic_inc(&ulp_ops->ref_count); 144} 145 146static inline void ulp_put(struct cnic_ulp_ops *ulp_ops) 147{ 148 atomic_dec(&ulp_ops->ref_count); 149} 150 151static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) 152{ 153 struct cnic_local *cp = dev->cnic_priv; 154 struct cnic_eth_dev *ethdev = cp->ethdev; 155 struct drv_ctl_info info; 156 struct drv_ctl_io *io = &info.data.io; 157 158 info.cmd = DRV_CTL_CTX_WR_CMD; 159 io->cid_addr = cid_addr; 160 io->offset = off; 161 io->data = val; 162 ethdev->drv_ctl(dev->netdev, &info); 163} 164 165static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) 166{ 167 struct cnic_local *cp = dev->cnic_priv; 168 struct cnic_eth_dev *ethdev = cp->ethdev; 169 struct drv_ctl_info info; 170 struct drv_ctl_io *io = &info.data.io; 171 172 info.cmd = DRV_CTL_IO_WR_CMD; 173 io->offset = off; 174 io->data = val; 175 ethdev->drv_ctl(dev->netdev, &info); 176} 177 178static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) 179{ 180 struct cnic_local *cp = dev->cnic_priv; 181 struct cnic_eth_dev *ethdev = cp->ethdev; 182 struct drv_ctl_info info; 183 struct drv_ctl_io *io = &info.data.io; 184 185 info.cmd = DRV_CTL_IO_RD_CMD; 186 io->offset = off; 187 ethdev->drv_ctl(dev->netdev, &info); 188 return io->data; 189} 190 191static int cnic_in_use(struct cnic_sock *csk) 192{ 193 return test_bit(SK_F_INUSE, &csk->flags); 194} 195 196static void cnic_kwq_completion(struct cnic_dev *dev, u32 count) 197{ 198 struct cnic_local *cp = dev->cnic_priv; 199 struct cnic_eth_dev *ethdev = cp->ethdev; 200 struct drv_ctl_info info; 201 202 info.cmd = DRV_CTL_COMPLETION_CMD; 203 info.data.comp.comp_count = count; 204 ethdev->drv_ctl(dev->netdev, &info); 205} 206 207static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, 208 struct cnic_sock *csk) 209{ 210 struct iscsi_path path_req; 211 char *buf = NULL; 212 u16 len = 0; 213 u32 msg_type = ISCSI_KEVENT_IF_DOWN; 214 struct cnic_ulp_ops *ulp_ops; 215 216 if (cp->uio_dev == -1) 217 return -ENODEV; 218 219 if (csk) { 220 len = sizeof(path_req); 221 buf = (char *) &path_req; 222 memset(&path_req, 0, len); 223 224 msg_type = ISCSI_KEVENT_PATH_REQ; 225 path_req.handle = (u64) csk->l5_cid; 226 if (test_bit(SK_F_IPV6, &csk->flags)) { 227 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0], 228 sizeof(struct in6_addr)); 229 path_req.ip_addr_len = 16; 230 } else { 231 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0], 232 sizeof(struct in_addr)); 233 path_req.ip_addr_len = 4; 234 } 235 path_req.vlan_id = csk->vlan_id; 236 path_req.pmtu = csk->mtu; 237 } 238 239 rcu_read_lock(); 240 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); 241 if (ulp_ops) 242 ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len); 243 rcu_read_unlock(); 244 return 0; 245} 246 247static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, 248 char *buf, u16 len) 249{ 250 int rc = -EINVAL; 251 252 switch (msg_type) { 253 case ISCSI_UEVENT_PATH_UPDATE: { 254 struct cnic_local *cp; 255 u32 l5_cid; 256 struct cnic_sock *csk; 257 struct iscsi_path *path_resp; 258 259 if (len < sizeof(*path_resp)) 260 break; 261 262 path_resp = (struct iscsi_path *) buf; 263 cp = dev->cnic_priv; 264 l5_cid = (u32) path_resp->handle; 265 if (l5_cid >= MAX_CM_SK_TBL_SZ) 266 break; 267 268 csk = &cp->csk_tbl[l5_cid]; 269 csk_hold(csk); 270 if (cnic_in_use(csk)) { 271 memcpy(csk->ha, path_resp->mac_addr, 6); 272 if (test_bit(SK_F_IPV6, &csk->flags)) 273 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, 274 sizeof(struct in6_addr)); 275 else 276 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, 277 sizeof(struct in_addr)); 278 if (is_valid_ether_addr(csk->ha)) 279 cnic_cm_set_pg(csk); 280 } 281 csk_put(csk); 282 rc = 0; 283 } 284 } 285 286 return rc; 287} 288 289static int cnic_offld_prep(struct cnic_sock *csk) 290{ 291 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 292 return 0; 293 294 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) { 295 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 296 return 0; 297 } 298 299 return 1; 300} 301 302static int cnic_close_prep(struct cnic_sock *csk) 303{ 304 clear_bit(SK_F_CONNECT_START, &csk->flags); 305 smp_mb__after_clear_bit(); 306 307 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 308 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 309 msleep(1); 310 311 return 1; 312 } 313 return 0; 314} 315 316static int cnic_abort_prep(struct cnic_sock *csk) 317{ 318 clear_bit(SK_F_CONNECT_START, &csk->flags); 319 smp_mb__after_clear_bit(); 320 321 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 322 msleep(1); 323 324 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 325 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; 326 return 1; 327 } 328 329 return 0; 330} 331 332static void cnic_uio_stop(void) 333{ 334 struct cnic_dev *dev; 335 336 read_lock(&cnic_dev_lock); 337 list_for_each_entry(dev, &cnic_dev_list, list) { 338 struct cnic_local *cp = dev->cnic_priv; 339 340 if (cp->cnic_uinfo) 341 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 342 } 343 read_unlock(&cnic_dev_lock); 344} 345 346int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) 347{ 348 struct cnic_dev *dev; 349 350 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 351 printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n", 352 ulp_type); 353 return -EINVAL; 354 } 355 mutex_lock(&cnic_lock); 356 if (cnic_ulp_tbl[ulp_type]) { 357 printk(KERN_ERR PFX "cnic_register_driver: Type %d has already " 358 "been registered\n", ulp_type); 359 mutex_unlock(&cnic_lock); 360 return -EBUSY; 361 } 362 363 read_lock(&cnic_dev_lock); 364 list_for_each_entry(dev, &cnic_dev_list, list) { 365 struct cnic_local *cp = dev->cnic_priv; 366 367 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); 368 } 369 read_unlock(&cnic_dev_lock); 370 371 atomic_set(&ulp_ops->ref_count, 0); 372 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); 373 mutex_unlock(&cnic_lock); 374 375 /* Prevent race conditions with netdev_event */ 376 rtnl_lock(); 377 read_lock(&cnic_dev_lock); 378 list_for_each_entry(dev, &cnic_dev_list, list) { 379 struct cnic_local *cp = dev->cnic_priv; 380 381 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) 382 ulp_ops->cnic_init(dev); 383 } 384 read_unlock(&cnic_dev_lock); 385 rtnl_unlock(); 386 387 return 0; 388} 389 390int cnic_unregister_driver(int ulp_type) 391{ 392 struct cnic_dev *dev; 393 struct cnic_ulp_ops *ulp_ops; 394 int i = 0; 395 396 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 397 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", 398 ulp_type); 399 return -EINVAL; 400 } 401 mutex_lock(&cnic_lock); 402 ulp_ops = cnic_ulp_tbl[ulp_type]; 403 if (!ulp_ops) { 404 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not " 405 "been registered\n", ulp_type); 406 goto out_unlock; 407 } 408 read_lock(&cnic_dev_lock); 409 list_for_each_entry(dev, &cnic_dev_list, list) { 410 struct cnic_local *cp = dev->cnic_priv; 411 412 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 413 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d " 414 "still has devices registered\n", ulp_type); 415 read_unlock(&cnic_dev_lock); 416 goto out_unlock; 417 } 418 } 419 read_unlock(&cnic_dev_lock); 420 421 if (ulp_type == CNIC_ULP_ISCSI) 422 cnic_uio_stop(); 423 424 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); 425 426 mutex_unlock(&cnic_lock); 427 synchronize_rcu(); 428 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) { 429 msleep(100); 430 i++; 431 } 432 433 if (atomic_read(&ulp_ops->ref_count) != 0) 434 printk(KERN_WARNING PFX "%s: Failed waiting for ref count to go" 435 " to zero.\n", dev->netdev->name); 436 return 0; 437 438out_unlock: 439 mutex_unlock(&cnic_lock); 440 return -EINVAL; 441} 442 443static int cnic_start_hw(struct cnic_dev *); 444static void cnic_stop_hw(struct cnic_dev *); 445 446static int cnic_register_device(struct cnic_dev *dev, int ulp_type, 447 void *ulp_ctx) 448{ 449 struct cnic_local *cp = dev->cnic_priv; 450 struct cnic_ulp_ops *ulp_ops; 451 452 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 453 printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n", 454 ulp_type); 455 return -EINVAL; 456 } 457 mutex_lock(&cnic_lock); 458 if (cnic_ulp_tbl[ulp_type] == NULL) { 459 printk(KERN_ERR PFX "cnic_register_device: Driver with type %d " 460 "has not been registered\n", ulp_type); 461 mutex_unlock(&cnic_lock); 462 return -EAGAIN; 463 } 464 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 465 printk(KERN_ERR PFX "cnic_register_device: Type %d has already " 466 "been registered to this device\n", ulp_type); 467 mutex_unlock(&cnic_lock); 468 return -EBUSY; 469 } 470 471 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); 472 cp->ulp_handle[ulp_type] = ulp_ctx; 473 ulp_ops = cnic_ulp_tbl[ulp_type]; 474 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); 475 cnic_hold(dev); 476 477 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 478 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) 479 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); 480 481 mutex_unlock(&cnic_lock); 482 483 return 0; 484 485} 486EXPORT_SYMBOL(cnic_register_driver); 487 488static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) 489{ 490 struct cnic_local *cp = dev->cnic_priv; 491 int i = 0; 492 493 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 494 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", 495 ulp_type); 496 return -EINVAL; 497 } 498 mutex_lock(&cnic_lock); 499 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 500 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL); 501 cnic_put(dev); 502 } else { 503 printk(KERN_ERR PFX "cnic_unregister_device: device not " 504 "registered to this ulp type %d\n", ulp_type); 505 mutex_unlock(&cnic_lock); 506 return -EINVAL; 507 } 508 mutex_unlock(&cnic_lock); 509 510 synchronize_rcu(); 511 512 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && 513 i < 20) { 514 msleep(100); 515 i++; 516 } 517 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) 518 printk(KERN_WARNING PFX "%s: Failed waiting for ULP up call" 519 " to complete.\n", dev->netdev->name); 520 521 return 0; 522} 523EXPORT_SYMBOL(cnic_unregister_driver); 524 525static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id) 526{ 527 id_tbl->start = start_id; 528 id_tbl->max = size; 529 id_tbl->next = 0; 530 spin_lock_init(&id_tbl->lock); 531 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); 532 if (!id_tbl->table) 533 return -ENOMEM; 534 535 return 0; 536} 537 538static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl) 539{ 540 kfree(id_tbl->table); 541 id_tbl->table = NULL; 542} 543 544static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id) 545{ 546 int ret = -1; 547 548 id -= id_tbl->start; 549 if (id >= id_tbl->max) 550 return ret; 551 552 spin_lock(&id_tbl->lock); 553 if (!test_bit(id, id_tbl->table)) { 554 set_bit(id, id_tbl->table); 555 ret = 0; 556 } 557 spin_unlock(&id_tbl->lock); 558 return ret; 559} 560 561/* Returns -1 if not successful */ 562static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl) 563{ 564 u32 id; 565 566 spin_lock(&id_tbl->lock); 567 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); 568 if (id >= id_tbl->max) { 569 id = -1; 570 if (id_tbl->next != 0) { 571 id = find_first_zero_bit(id_tbl->table, id_tbl->next); 572 if (id >= id_tbl->next) 573 id = -1; 574 } 575 } 576 577 if (id < id_tbl->max) { 578 set_bit(id, id_tbl->table); 579 id_tbl->next = (id + 1) & (id_tbl->max - 1); 580 id += id_tbl->start; 581 } 582 583 spin_unlock(&id_tbl->lock); 584 585 return id; 586} 587 588static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id) 589{ 590 if (id == -1) 591 return; 592 593 id -= id_tbl->start; 594 if (id >= id_tbl->max) 595 return; 596 597 clear_bit(id, id_tbl->table); 598} 599 600static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) 601{ 602 int i; 603 604 if (!dma->pg_arr) 605 return; 606 607 for (i = 0; i < dma->num_pages; i++) { 608 if (dma->pg_arr[i]) { 609 pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE, 610 dma->pg_arr[i], dma->pg_map_arr[i]); 611 dma->pg_arr[i] = NULL; 612 } 613 } 614 if (dma->pgtbl) { 615 pci_free_consistent(dev->pcidev, dma->pgtbl_size, 616 dma->pgtbl, dma->pgtbl_map); 617 dma->pgtbl = NULL; 618 } 619 kfree(dma->pg_arr); 620 dma->pg_arr = NULL; 621 dma->num_pages = 0; 622} 623 624static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) 625{ 626 int i; 627 u32 *page_table = dma->pgtbl; 628 629 for (i = 0; i < dma->num_pages; i++) { 630 /* Each entry needs to be in big endian format. */ 631 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 632 page_table++; 633 *page_table = (u32) dma->pg_map_arr[i]; 634 page_table++; 635 } 636} 637 638static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, 639 int pages, int use_pg_tbl) 640{ 641 int i, size; 642 struct cnic_local *cp = dev->cnic_priv; 643 644 size = pages * (sizeof(void *) + sizeof(dma_addr_t)); 645 dma->pg_arr = kzalloc(size, GFP_ATOMIC); 646 if (dma->pg_arr == NULL) 647 return -ENOMEM; 648 649 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); 650 dma->num_pages = pages; 651 652 for (i = 0; i < pages; i++) { 653 dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev, 654 BCM_PAGE_SIZE, 655 &dma->pg_map_arr[i]); 656 if (dma->pg_arr[i] == NULL) 657 goto error; 658 } 659 if (!use_pg_tbl) 660 return 0; 661 662 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & 663 ~(BCM_PAGE_SIZE - 1); 664 dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size, 665 &dma->pgtbl_map); 666 if (dma->pgtbl == NULL) 667 goto error; 668 669 cp->setup_pgtbl(dev, dma); 670 671 return 0; 672 673error: 674 cnic_free_dma(dev, dma); 675 return -ENOMEM; 676} 677 678static void cnic_free_resc(struct cnic_dev *dev) 679{ 680 struct cnic_local *cp = dev->cnic_priv; 681 int i = 0; 682 683 if (cp->cnic_uinfo) { 684 while (cp->uio_dev != -1 && i < 15) { 685 msleep(100); 686 i++; 687 } 688 uio_unregister_device(cp->cnic_uinfo); 689 kfree(cp->cnic_uinfo); 690 cp->cnic_uinfo = NULL; 691 } 692 693 if (cp->l2_buf) { 694 pci_free_consistent(dev->pcidev, cp->l2_buf_size, 695 cp->l2_buf, cp->l2_buf_map); 696 cp->l2_buf = NULL; 697 } 698 699 if (cp->l2_ring) { 700 pci_free_consistent(dev->pcidev, cp->l2_ring_size, 701 cp->l2_ring, cp->l2_ring_map); 702 cp->l2_ring = NULL; 703 } 704 705 for (i = 0; i < cp->ctx_blks; i++) { 706 if (cp->ctx_arr[i].ctx) { 707 pci_free_consistent(dev->pcidev, cp->ctx_blk_size, 708 cp->ctx_arr[i].ctx, 709 cp->ctx_arr[i].mapping); 710 cp->ctx_arr[i].ctx = NULL; 711 } 712 } 713 kfree(cp->ctx_arr); 714 cp->ctx_arr = NULL; 715 cp->ctx_blks = 0; 716 717 cnic_free_dma(dev, &cp->gbl_buf_info); 718 cnic_free_dma(dev, &cp->conn_buf_info); 719 cnic_free_dma(dev, &cp->kwq_info); 720 cnic_free_dma(dev, &cp->kcq_info); 721 kfree(cp->iscsi_tbl); 722 cp->iscsi_tbl = NULL; 723 kfree(cp->ctx_tbl); 724 cp->ctx_tbl = NULL; 725 726 cnic_free_id_tbl(&cp->cid_tbl); 727} 728 729static int cnic_alloc_context(struct cnic_dev *dev) 730{ 731 struct cnic_local *cp = dev->cnic_priv; 732 733 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 734 int i, k, arr_size; 735 736 cp->ctx_blk_size = BCM_PAGE_SIZE; 737 cp->cids_per_blk = BCM_PAGE_SIZE / 128; 738 arr_size = BNX2_MAX_CID / cp->cids_per_blk * 739 sizeof(struct cnic_ctx); 740 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); 741 if (cp->ctx_arr == NULL) 742 return -ENOMEM; 743 744 k = 0; 745 for (i = 0; i < 2; i++) { 746 u32 j, reg, off, lo, hi; 747 748 if (i == 0) 749 off = BNX2_PG_CTX_MAP; 750 else 751 off = BNX2_ISCSI_CTX_MAP; 752 753 reg = cnic_reg_rd_ind(dev, off); 754 lo = reg >> 16; 755 hi = reg & 0xffff; 756 for (j = lo; j < hi; j += cp->cids_per_blk, k++) 757 cp->ctx_arr[k].cid = j; 758 } 759 760 cp->ctx_blks = k; 761 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { 762 cp->ctx_blks = 0; 763 return -ENOMEM; 764 } 765 766 for (i = 0; i < cp->ctx_blks; i++) { 767 cp->ctx_arr[i].ctx = 768 pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE, 769 &cp->ctx_arr[i].mapping); 770 if (cp->ctx_arr[i].ctx == NULL) 771 return -ENOMEM; 772 } 773 } 774 return 0; 775} 776 777static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages) 778{ 779 struct cnic_local *cp = dev->cnic_priv; 780 781 cp->l2_ring_size = pages * BCM_PAGE_SIZE; 782 cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size, 783 &cp->l2_ring_map); 784 if (!cp->l2_ring) 785 return -ENOMEM; 786 787 cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; 788 cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size); 789 cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size, 790 &cp->l2_buf_map); 791 if (!cp->l2_buf) 792 return -ENOMEM; 793 794 return 0; 795} 796 797static int cnic_alloc_uio(struct cnic_dev *dev) { 798 struct cnic_local *cp = dev->cnic_priv; 799 struct uio_info *uinfo; 800 int ret; 801 802 uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC); 803 if (!uinfo) 804 return -ENOMEM; 805 806 uinfo->mem[0].addr = dev->netdev->base_addr; 807 uinfo->mem[0].internal_addr = dev->regview; 808 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; 809 uinfo->mem[0].memtype = UIO_MEM_PHYS; 810 811 uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK; 812 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 813 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 814 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; 815 else 816 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; 817 818 uinfo->name = "bnx2_cnic"; 819 } 820 821 uinfo->mem[1].memtype = UIO_MEM_LOGICAL; 822 823 uinfo->mem[2].addr = (unsigned long) cp->l2_ring; 824 uinfo->mem[2].size = cp->l2_ring_size; 825 uinfo->mem[2].memtype = UIO_MEM_LOGICAL; 826 827 uinfo->mem[3].addr = (unsigned long) cp->l2_buf; 828 uinfo->mem[3].size = cp->l2_buf_size; 829 uinfo->mem[3].memtype = UIO_MEM_LOGICAL; 830 831 uinfo->version = CNIC_MODULE_VERSION; 832 uinfo->irq = UIO_IRQ_CUSTOM; 833 834 uinfo->open = cnic_uio_open; 835 uinfo->release = cnic_uio_close; 836 837 uinfo->priv = dev; 838 839 ret = uio_register_device(&dev->pcidev->dev, uinfo); 840 if (ret) { 841 kfree(uinfo); 842 return ret; 843 } 844 845 cp->cnic_uinfo = uinfo; 846 return 0; 847} 848 849static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) 850{ 851 struct cnic_local *cp = dev->cnic_priv; 852 int ret; 853 854 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); 855 if (ret) 856 goto error; 857 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; 858 859 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1); 860 if (ret) 861 goto error; 862 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr; 863 864 ret = cnic_alloc_context(dev); 865 if (ret) 866 goto error; 867 868 ret = cnic_alloc_l2_rings(dev, 2); 869 if (ret) 870 goto error; 871 872 ret = cnic_alloc_uio(dev); 873 if (ret) 874 goto error; 875 876 return 0; 877 878error: 879 cnic_free_resc(dev); 880 return ret; 881} 882 883static inline u32 cnic_kwq_avail(struct cnic_local *cp) 884{ 885 return cp->max_kwq_idx - 886 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); 887} 888 889static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 890 u32 num_wqes) 891{ 892 struct cnic_local *cp = dev->cnic_priv; 893 struct kwqe *prod_qe; 894 u16 prod, sw_prod, i; 895 896 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 897 return -EAGAIN; /* bnx2 is down */ 898 899 spin_lock_bh(&cp->cnic_ulp_lock); 900 if (num_wqes > cnic_kwq_avail(cp) && 901 !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) { 902 spin_unlock_bh(&cp->cnic_ulp_lock); 903 return -EAGAIN; 904 } 905 906 cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT; 907 908 prod = cp->kwq_prod_idx; 909 sw_prod = prod & MAX_KWQ_IDX; 910 for (i = 0; i < num_wqes; i++) { 911 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; 912 memcpy(prod_qe, wqes[i], sizeof(struct kwqe)); 913 prod++; 914 sw_prod = prod & MAX_KWQ_IDX; 915 } 916 cp->kwq_prod_idx = prod; 917 918 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); 919 920 spin_unlock_bh(&cp->cnic_ulp_lock); 921 return 0; 922} 923 924static void service_kcqes(struct cnic_dev *dev, int num_cqes) 925{ 926 struct cnic_local *cp = dev->cnic_priv; 927 int i, j; 928 929 i = 0; 930 j = 1; 931 while (num_cqes) { 932 struct cnic_ulp_ops *ulp_ops; 933 int ulp_type; 934 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; 935 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK; 936 937 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) 938 cnic_kwq_completion(dev, 1); 939 940 while (j < num_cqes) { 941 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; 942 943 if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer) 944 break; 945 946 if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) 947 cnic_kwq_completion(dev, 1); 948 j++; 949 } 950 951 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA) 952 ulp_type = CNIC_ULP_RDMA; 953 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) 954 ulp_type = CNIC_ULP_ISCSI; 955 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) 956 ulp_type = CNIC_ULP_L4; 957 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) 958 goto end; 959 else { 960 printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n", 961 dev->netdev->name, kcqe_op_flag); 962 goto end; 963 } 964 965 rcu_read_lock(); 966 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 967 if (likely(ulp_ops)) { 968 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 969 cp->completed_kcq + i, j); 970 } 971 rcu_read_unlock(); 972end: 973 num_cqes -= j; 974 i += j; 975 j = 1; 976 } 977 return; 978} 979 980static u16 cnic_bnx2_next_idx(u16 idx) 981{ 982 return idx + 1; 983} 984 985static u16 cnic_bnx2_hw_idx(u16 idx) 986{ 987 return idx; 988} 989 990static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) 991{ 992 struct cnic_local *cp = dev->cnic_priv; 993 u16 i, ri, last; 994 struct kcqe *kcqe; 995 int kcqe_cnt = 0, last_cnt = 0; 996 997 i = ri = last = *sw_prod; 998 ri &= MAX_KCQ_IDX; 999 1000 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { 1001 kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; 1002 cp->completed_kcq[kcqe_cnt++] = kcqe; 1003 i = cp->next_idx(i); 1004 ri = i & MAX_KCQ_IDX; 1005 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) { 1006 last_cnt = kcqe_cnt; 1007 last = i; 1008 } 1009 } 1010 1011 *sw_prod = last; 1012 return last_cnt; 1013} 1014 1015static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp) 1016{ 1017 u16 rx_cons = *cp->rx_cons_ptr; 1018 u16 tx_cons = *cp->tx_cons_ptr; 1019 1020 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { 1021 cp->tx_cons = tx_cons; 1022 cp->rx_cons = rx_cons; 1023 uio_event_notify(cp->cnic_uinfo); 1024 } 1025} 1026 1027static int cnic_service_bnx2(void *data, void *status_blk) 1028{ 1029 struct cnic_dev *dev = data; 1030 struct status_block *sblk = status_blk; 1031 struct cnic_local *cp = dev->cnic_priv; 1032 u32 status_idx = sblk->status_idx; 1033 u16 hw_prod, sw_prod; 1034 int kcqe_cnt; 1035 1036 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 1037 return status_idx; 1038 1039 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 1040 1041 hw_prod = sblk->status_completion_producer_index; 1042 sw_prod = cp->kcq_prod_idx; 1043 while (sw_prod != hw_prod) { 1044 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); 1045 if (kcqe_cnt == 0) 1046 goto done; 1047 1048 service_kcqes(dev, kcqe_cnt); 1049 1050 /* Tell compiler that status_blk fields can change. */ 1051 barrier(); 1052 if (status_idx != sblk->status_idx) { 1053 status_idx = sblk->status_idx; 1054 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 1055 hw_prod = sblk->status_completion_producer_index; 1056 } else 1057 break; 1058 } 1059 1060done: 1061 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); 1062 1063 cp->kcq_prod_idx = sw_prod; 1064 1065 cnic_chk_bnx2_pkt_rings(cp); 1066 return status_idx; 1067} 1068 1069static void cnic_service_bnx2_msix(unsigned long data) 1070{ 1071 struct cnic_dev *dev = (struct cnic_dev *) data; 1072 struct cnic_local *cp = dev->cnic_priv; 1073 struct status_block_msix *status_blk = cp->bnx2_status_blk; 1074 u32 status_idx = status_blk->status_idx; 1075 u16 hw_prod, sw_prod; 1076 int kcqe_cnt; 1077 1078 cp->kwq_con_idx = status_blk->status_cmd_consumer_index; 1079 1080 hw_prod = status_blk->status_completion_producer_index; 1081 sw_prod = cp->kcq_prod_idx; 1082 while (sw_prod != hw_prod) { 1083 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); 1084 if (kcqe_cnt == 0) 1085 goto done; 1086 1087 service_kcqes(dev, kcqe_cnt); 1088 1089 /* Tell compiler that status_blk fields can change. */ 1090 barrier(); 1091 if (status_idx != status_blk->status_idx) { 1092 status_idx = status_blk->status_idx; 1093 cp->kwq_con_idx = status_blk->status_cmd_consumer_index; 1094 hw_prod = status_blk->status_completion_producer_index; 1095 } else 1096 break; 1097 } 1098 1099done: 1100 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); 1101 cp->kcq_prod_idx = sw_prod; 1102 1103 cnic_chk_bnx2_pkt_rings(cp); 1104 1105 cp->last_status_idx = status_idx; 1106 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 1107 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 1108} 1109 1110static irqreturn_t cnic_irq(int irq, void *dev_instance) 1111{ 1112 struct cnic_dev *dev = dev_instance; 1113 struct cnic_local *cp = dev->cnic_priv; 1114 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; 1115 1116 if (cp->ack_int) 1117 cp->ack_int(dev); 1118 1119 prefetch(cp->status_blk); 1120 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 1121 1122 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) 1123 tasklet_schedule(&cp->cnic_irq_task); 1124 1125 return IRQ_HANDLED; 1126} 1127 1128static void cnic_ulp_stop(struct cnic_dev *dev) 1129{ 1130 struct cnic_local *cp = dev->cnic_priv; 1131 int if_type; 1132 1133 if (cp->cnic_uinfo) 1134 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 1135 1136 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 1137 struct cnic_ulp_ops *ulp_ops; 1138 1139 mutex_lock(&cnic_lock); 1140 ulp_ops = cp->ulp_ops[if_type]; 1141 if (!ulp_ops) { 1142 mutex_unlock(&cnic_lock); 1143 continue; 1144 } 1145 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 1146 mutex_unlock(&cnic_lock); 1147 1148 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) 1149 ulp_ops->cnic_stop(cp->ulp_handle[if_type]); 1150 1151 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 1152 } 1153} 1154 1155static void cnic_ulp_start(struct cnic_dev *dev) 1156{ 1157 struct cnic_local *cp = dev->cnic_priv; 1158 int if_type; 1159 1160 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 1161 struct cnic_ulp_ops *ulp_ops; 1162 1163 mutex_lock(&cnic_lock); 1164 ulp_ops = cp->ulp_ops[if_type]; 1165 if (!ulp_ops || !ulp_ops->cnic_start) { 1166 mutex_unlock(&cnic_lock); 1167 continue; 1168 } 1169 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 1170 mutex_unlock(&cnic_lock); 1171 1172 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) 1173 ulp_ops->cnic_start(cp->ulp_handle[if_type]); 1174 1175 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 1176 } 1177} 1178 1179static int cnic_ctl(void *data, struct cnic_ctl_info *info) 1180{ 1181 struct cnic_dev *dev = data; 1182 1183 switch (info->cmd) { 1184 case CNIC_CTL_STOP_CMD: 1185 cnic_hold(dev); 1186 1187 cnic_ulp_stop(dev); 1188 cnic_stop_hw(dev); 1189 1190 cnic_put(dev); 1191 break; 1192 case CNIC_CTL_START_CMD: 1193 cnic_hold(dev); 1194 1195 if (!cnic_start_hw(dev)) 1196 cnic_ulp_start(dev); 1197 1198 cnic_put(dev); 1199 break; 1200 default: 1201 return -EINVAL; 1202 } 1203 return 0; 1204} 1205 1206static void cnic_ulp_init(struct cnic_dev *dev) 1207{ 1208 int i; 1209 struct cnic_local *cp = dev->cnic_priv; 1210 1211 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 1212 struct cnic_ulp_ops *ulp_ops; 1213 1214 mutex_lock(&cnic_lock); 1215 ulp_ops = cnic_ulp_tbl[i]; 1216 if (!ulp_ops || !ulp_ops->cnic_init) { 1217 mutex_unlock(&cnic_lock); 1218 continue; 1219 } 1220 ulp_get(ulp_ops); 1221 mutex_unlock(&cnic_lock); 1222 1223 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) 1224 ulp_ops->cnic_init(dev); 1225 1226 ulp_put(ulp_ops); 1227 } 1228} 1229 1230static void cnic_ulp_exit(struct cnic_dev *dev) 1231{ 1232 int i; 1233 struct cnic_local *cp = dev->cnic_priv; 1234 1235 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 1236 struct cnic_ulp_ops *ulp_ops; 1237 1238 mutex_lock(&cnic_lock); 1239 ulp_ops = cnic_ulp_tbl[i]; 1240 if (!ulp_ops || !ulp_ops->cnic_exit) { 1241 mutex_unlock(&cnic_lock); 1242 continue; 1243 } 1244 ulp_get(ulp_ops); 1245 mutex_unlock(&cnic_lock); 1246 1247 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) 1248 ulp_ops->cnic_exit(dev); 1249 1250 ulp_put(ulp_ops); 1251 } 1252} 1253 1254static int cnic_cm_offload_pg(struct cnic_sock *csk) 1255{ 1256 struct cnic_dev *dev = csk->dev; 1257 struct l4_kwq_offload_pg *l4kwqe; 1258 struct kwqe *wqes[1]; 1259 1260 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1; 1261 memset(l4kwqe, 0, sizeof(*l4kwqe)); 1262 wqes[0] = (struct kwqe *) l4kwqe; 1263 1264 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG; 1265 l4kwqe->flags = 1266 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT; 1267 l4kwqe->l2hdr_nbytes = ETH_HLEN; 1268 1269 l4kwqe->da0 = csk->ha[0]; 1270 l4kwqe->da1 = csk->ha[1]; 1271 l4kwqe->da2 = csk->ha[2]; 1272 l4kwqe->da3 = csk->ha[3]; 1273 l4kwqe->da4 = csk->ha[4]; 1274 l4kwqe->da5 = csk->ha[5]; 1275 1276 l4kwqe->sa0 = dev->mac_addr[0]; 1277 l4kwqe->sa1 = dev->mac_addr[1]; 1278 l4kwqe->sa2 = dev->mac_addr[2]; 1279 l4kwqe->sa3 = dev->mac_addr[3]; 1280 l4kwqe->sa4 = dev->mac_addr[4]; 1281 l4kwqe->sa5 = dev->mac_addr[5]; 1282 1283 l4kwqe->etype = ETH_P_IP; 1284 l4kwqe->ipid_count = DEF_IPID_COUNT; 1285 l4kwqe->host_opaque = csk->l5_cid; 1286 1287 if (csk->vlan_id) { 1288 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING; 1289 l4kwqe->vlan_tag = csk->vlan_id; 1290 l4kwqe->l2hdr_nbytes += 4; 1291 } 1292 1293 return dev->submit_kwqes(dev, wqes, 1); 1294} 1295 1296static int cnic_cm_update_pg(struct cnic_sock *csk) 1297{ 1298 struct cnic_dev *dev = csk->dev; 1299 struct l4_kwq_update_pg *l4kwqe; 1300 struct kwqe *wqes[1]; 1301 1302 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1; 1303 memset(l4kwqe, 0, sizeof(*l4kwqe)); 1304 wqes[0] = (struct kwqe *) l4kwqe; 1305 1306 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG; 1307 l4kwqe->flags = 1308 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT; 1309 l4kwqe->pg_cid = csk->pg_cid; 1310 1311 l4kwqe->da0 = csk->ha[0]; 1312 l4kwqe->da1 = csk->ha[1]; 1313 l4kwqe->da2 = csk->ha[2]; 1314 l4kwqe->da3 = csk->ha[3]; 1315 l4kwqe->da4 = csk->ha[4]; 1316 l4kwqe->da5 = csk->ha[5]; 1317 1318 l4kwqe->pg_host_opaque = csk->l5_cid; 1319 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA; 1320 1321 return dev->submit_kwqes(dev, wqes, 1); 1322} 1323 1324static int cnic_cm_upload_pg(struct cnic_sock *csk) 1325{ 1326 struct cnic_dev *dev = csk->dev; 1327 struct l4_kwq_upload *l4kwqe; 1328 struct kwqe *wqes[1]; 1329 1330 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1; 1331 memset(l4kwqe, 0, sizeof(*l4kwqe)); 1332 wqes[0] = (struct kwqe *) l4kwqe; 1333 1334 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG; 1335 l4kwqe->flags = 1336 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT; 1337 l4kwqe->cid = csk->pg_cid; 1338 1339 return dev->submit_kwqes(dev, wqes, 1); 1340} 1341 1342static int cnic_cm_conn_req(struct cnic_sock *csk) 1343{ 1344 struct cnic_dev *dev = csk->dev; 1345 struct l4_kwq_connect_req1 *l4kwqe1; 1346 struct l4_kwq_connect_req2 *l4kwqe2; 1347 struct l4_kwq_connect_req3 *l4kwqe3; 1348 struct kwqe *wqes[3]; 1349 u8 tcp_flags = 0; 1350 int num_wqes = 2; 1351 1352 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1; 1353 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2; 1354 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3; 1355 memset(l4kwqe1, 0, sizeof(*l4kwqe1)); 1356 memset(l4kwqe2, 0, sizeof(*l4kwqe2)); 1357 memset(l4kwqe3, 0, sizeof(*l4kwqe3)); 1358 1359 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3; 1360 l4kwqe3->flags = 1361 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT; 1362 l4kwqe3->ka_timeout = csk->ka_timeout; 1363 l4kwqe3->ka_interval = csk->ka_interval; 1364 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count; 1365 l4kwqe3->tos = csk->tos; 1366 l4kwqe3->ttl = csk->ttl; 1367 l4kwqe3->snd_seq_scale = csk->snd_seq_scale; 1368 l4kwqe3->pmtu = csk->mtu; 1369 l4kwqe3->rcv_buf = csk->rcv_buf; 1370 l4kwqe3->snd_buf = csk->snd_buf; 1371 l4kwqe3->seed = csk->seed; 1372 1373 wqes[0] = (struct kwqe *) l4kwqe1; 1374 if (test_bit(SK_F_IPV6, &csk->flags)) { 1375 wqes[1] = (struct kwqe *) l4kwqe2; 1376 wqes[2] = (struct kwqe *) l4kwqe3; 1377 num_wqes = 3; 1378 1379 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6; 1380 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2; 1381 l4kwqe2->flags = 1382 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT | 1383 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT; 1384 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]); 1385 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]); 1386 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]); 1387 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]); 1388 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]); 1389 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]); 1390 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) - 1391 sizeof(struct tcphdr); 1392 } else { 1393 wqes[1] = (struct kwqe *) l4kwqe3; 1394 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) - 1395 sizeof(struct tcphdr); 1396 } 1397 1398 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1; 1399 l4kwqe1->flags = 1400 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) | 1401 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT; 1402 l4kwqe1->cid = csk->cid; 1403 l4kwqe1->pg_cid = csk->pg_cid; 1404 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]); 1405 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]); 1406 l4kwqe1->src_port = be16_to_cpu(csk->src_port); 1407 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port); 1408 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK) 1409 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK; 1410 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE) 1411 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE; 1412 if (csk->tcp_flags & SK_TCP_NAGLE) 1413 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE; 1414 if (csk->tcp_flags & SK_TCP_TIMESTAMP) 1415 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP; 1416 if (csk->tcp_flags & SK_TCP_SACK) 1417 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK; 1418 if (csk->tcp_flags & SK_TCP_SEG_SCALING) 1419 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING; 1420 1421 l4kwqe1->tcp_flags = tcp_flags; 1422 1423 return dev->submit_kwqes(dev, wqes, num_wqes); 1424} 1425 1426static int cnic_cm_close_req(struct cnic_sock *csk) 1427{ 1428 struct cnic_dev *dev = csk->dev; 1429 struct l4_kwq_close_req *l4kwqe; 1430 struct kwqe *wqes[1]; 1431 1432 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2; 1433 memset(l4kwqe, 0, sizeof(*l4kwqe)); 1434 wqes[0] = (struct kwqe *) l4kwqe; 1435 1436 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE; 1437 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT; 1438 l4kwqe->cid = csk->cid; 1439 1440 return dev->submit_kwqes(dev, wqes, 1); 1441} 1442 1443static int cnic_cm_abort_req(struct cnic_sock *csk) 1444{ 1445 struct cnic_dev *dev = csk->dev; 1446 struct l4_kwq_reset_req *l4kwqe; 1447 struct kwqe *wqes[1]; 1448 1449 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2; 1450 memset(l4kwqe, 0, sizeof(*l4kwqe)); 1451 wqes[0] = (struct kwqe *) l4kwqe; 1452 1453 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET; 1454 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT; 1455 l4kwqe->cid = csk->cid; 1456 1457 return dev->submit_kwqes(dev, wqes, 1); 1458} 1459 1460static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, 1461 u32 l5_cid, struct cnic_sock **csk, void *context) 1462{ 1463 struct cnic_local *cp = dev->cnic_priv; 1464 struct cnic_sock *csk1; 1465 1466 if (l5_cid >= MAX_CM_SK_TBL_SZ) 1467 return -EINVAL; 1468 1469 csk1 = &cp->csk_tbl[l5_cid]; 1470 if (atomic_read(&csk1->ref_count)) 1471 return -EAGAIN; 1472 1473 if (test_and_set_bit(SK_F_INUSE, &csk1->flags)) 1474 return -EBUSY; 1475 1476 csk1->dev = dev; 1477 csk1->cid = cid; 1478 csk1->l5_cid = l5_cid; 1479 csk1->ulp_type = ulp_type; 1480 csk1->context = context; 1481 1482 csk1->ka_timeout = DEF_KA_TIMEOUT; 1483 csk1->ka_interval = DEF_KA_INTERVAL; 1484 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT; 1485 csk1->tos = DEF_TOS; 1486 csk1->ttl = DEF_TTL; 1487 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE; 1488 csk1->rcv_buf = DEF_RCV_BUF; 1489 csk1->snd_buf = DEF_SND_BUF; 1490 csk1->seed = DEF_SEED; 1491 1492 *csk = csk1; 1493 return 0; 1494} 1495 1496static void cnic_cm_cleanup(struct cnic_sock *csk) 1497{ 1498 if (csk->src_port) { 1499 struct cnic_dev *dev = csk->dev; 1500 struct cnic_local *cp = dev->cnic_priv; 1501 1502 cnic_free_id(&cp->csk_port_tbl, csk->src_port); 1503 csk->src_port = 0; 1504 } 1505} 1506 1507static void cnic_close_conn(struct cnic_sock *csk) 1508{ 1509 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) { 1510 cnic_cm_upload_pg(csk); 1511 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 1512 } 1513 cnic_cm_cleanup(csk); 1514} 1515 1516static int cnic_cm_destroy(struct cnic_sock *csk) 1517{ 1518 if (!cnic_in_use(csk)) 1519 return -EINVAL; 1520 1521 csk_hold(csk); 1522 clear_bit(SK_F_INUSE, &csk->flags); 1523 smp_mb__after_clear_bit(); 1524 while (atomic_read(&csk->ref_count) != 1) 1525 msleep(1); 1526 cnic_cm_cleanup(csk); 1527 1528 csk->flags = 0; 1529 csk_put(csk); 1530 return 0; 1531} 1532 1533static inline u16 cnic_get_vlan(struct net_device *dev, 1534 struct net_device **vlan_dev) 1535{ 1536 if (dev->priv_flags & IFF_802_1Q_VLAN) { 1537 *vlan_dev = vlan_dev_real_dev(dev); 1538 return vlan_dev_vlan_id(dev); 1539 } 1540 *vlan_dev = dev; 1541 return 0; 1542} 1543 1544static int cnic_get_v4_route(struct sockaddr_in *dst_addr, 1545 struct dst_entry **dst) 1546{ 1547#if defined(CONFIG_INET) 1548 struct flowi fl; 1549 int err; 1550 struct rtable *rt; 1551 1552 memset(&fl, 0, sizeof(fl)); 1553 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr; 1554 1555 err = ip_route_output_key(&init_net, &rt, &fl); 1556 if (!err) 1557 *dst = &rt->u.dst; 1558 return err; 1559#else 1560 return -ENETUNREACH; 1561#endif 1562} 1563 1564static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, 1565 struct dst_entry **dst) 1566{ 1567#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) 1568 struct flowi fl; 1569 1570 memset(&fl, 0, sizeof(fl)); 1571 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr); 1572 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL) 1573 fl.oif = dst_addr->sin6_scope_id; 1574 1575 *dst = ip6_route_output(&init_net, NULL, &fl); 1576 if (*dst) 1577 return 0; 1578#endif 1579 1580 return -ENETUNREACH; 1581} 1582 1583static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr, 1584 int ulp_type) 1585{ 1586 struct cnic_dev *dev = NULL; 1587 struct dst_entry *dst; 1588 struct net_device *netdev = NULL; 1589 int err = -ENETUNREACH; 1590 1591 if (dst_addr->sin_family == AF_INET) 1592 err = cnic_get_v4_route(dst_addr, &dst); 1593 else if (dst_addr->sin_family == AF_INET6) { 1594 struct sockaddr_in6 *dst_addr6 = 1595 (struct sockaddr_in6 *) dst_addr; 1596 1597 err = cnic_get_v6_route(dst_addr6, &dst); 1598 } else 1599 return NULL; 1600 1601 if (err) 1602 return NULL; 1603 1604 if (!dst->dev) 1605 goto done; 1606 1607 cnic_get_vlan(dst->dev, &netdev); 1608 1609 dev = cnic_from_netdev(netdev); 1610 1611done: 1612 dst_release(dst); 1613 if (dev) 1614 cnic_put(dev); 1615 return dev; 1616} 1617 1618static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 1619{ 1620 struct cnic_dev *dev = csk->dev; 1621 struct cnic_local *cp = dev->cnic_priv; 1622 1623 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk); 1624} 1625 1626static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 1627{ 1628 struct cnic_dev *dev = csk->dev; 1629 struct cnic_local *cp = dev->cnic_priv; 1630 int is_v6, err, rc = -ENETUNREACH; 1631 struct dst_entry *dst; 1632 struct net_device *realdev; 1633 u32 local_port; 1634 1635 if (saddr->local.v6.sin6_family == AF_INET6 && 1636 saddr->remote.v6.sin6_family == AF_INET6) 1637 is_v6 = 1; 1638 else if (saddr->local.v4.sin_family == AF_INET && 1639 saddr->remote.v4.sin_family == AF_INET) 1640 is_v6 = 0; 1641 else 1642 return -EINVAL; 1643 1644 clear_bit(SK_F_IPV6, &csk->flags); 1645 1646 if (is_v6) { 1647#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) 1648 set_bit(SK_F_IPV6, &csk->flags); 1649 err = cnic_get_v6_route(&saddr->remote.v6, &dst); 1650 if (err) 1651 return err; 1652 1653 if (!dst || dst->error || !dst->dev) 1654 goto err_out; 1655 1656 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr, 1657 sizeof(struct in6_addr)); 1658 csk->dst_port = saddr->remote.v6.sin6_port; 1659 local_port = saddr->local.v6.sin6_port; 1660#else 1661 return rc; 1662#endif 1663 1664 } else { 1665 err = cnic_get_v4_route(&saddr->remote.v4, &dst); 1666 if (err) 1667 return err; 1668 1669 if (!dst || dst->error || !dst->dev) 1670 goto err_out; 1671 1672 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr; 1673 csk->dst_port = saddr->remote.v4.sin_port; 1674 local_port = saddr->local.v4.sin_port; 1675 } 1676 1677 csk->vlan_id = cnic_get_vlan(dst->dev, &realdev); 1678 if (realdev != dev->netdev) 1679 goto err_out; 1680 1681 if (local_port >= CNIC_LOCAL_PORT_MIN && 1682 local_port < CNIC_LOCAL_PORT_MAX) { 1683 if (cnic_alloc_id(&cp->csk_port_tbl, local_port)) 1684 local_port = 0; 1685 } else 1686 local_port = 0; 1687 1688 if (!local_port) { 1689 local_port = cnic_alloc_new_id(&cp->csk_port_tbl); 1690 if (local_port == -1) { 1691 rc = -ENOMEM; 1692 goto err_out; 1693 } 1694 } 1695 csk->src_port = local_port; 1696 1697 csk->mtu = dst_mtu(dst); 1698 rc = 0; 1699 1700err_out: 1701 dst_release(dst); 1702 return rc; 1703} 1704 1705static void cnic_init_csk_state(struct cnic_sock *csk) 1706{ 1707 csk->state = 0; 1708 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 1709 clear_bit(SK_F_CLOSING, &csk->flags); 1710} 1711 1712static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 1713{ 1714 int err = 0; 1715 1716 if (!cnic_in_use(csk)) 1717 return -EINVAL; 1718 1719 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags)) 1720 return -EINVAL; 1721 1722 cnic_init_csk_state(csk); 1723 1724 err = cnic_get_route(csk, saddr); 1725 if (err) 1726 goto err_out; 1727 1728 err = cnic_resolve_addr(csk, saddr); 1729 if (!err) 1730 return 0; 1731 1732err_out: 1733 clear_bit(SK_F_CONNECT_START, &csk->flags); 1734 return err; 1735} 1736 1737static int cnic_cm_abort(struct cnic_sock *csk) 1738{ 1739 struct cnic_local *cp = csk->dev->cnic_priv; 1740 u32 opcode; 1741 1742 if (!cnic_in_use(csk)) 1743 return -EINVAL; 1744 1745 if (cnic_abort_prep(csk)) 1746 return cnic_cm_abort_req(csk); 1747 1748 /* Getting here means that we haven't started connect, or 1749 * connect was not successful. 1750 */ 1751 1752 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; 1753 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 1754 opcode = csk->state; 1755 else 1756 opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; 1757 cp->close_conn(csk, opcode); 1758 1759 return 0; 1760} 1761 1762static int cnic_cm_close(struct cnic_sock *csk) 1763{ 1764 if (!cnic_in_use(csk)) 1765 return -EINVAL; 1766 1767 if (cnic_close_prep(csk)) { 1768 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 1769 return cnic_cm_close_req(csk); 1770 } 1771 return 0; 1772} 1773 1774static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk, 1775 u8 opcode) 1776{ 1777 struct cnic_ulp_ops *ulp_ops; 1778 int ulp_type = csk->ulp_type; 1779 1780 rcu_read_lock(); 1781 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 1782 if (ulp_ops) { 1783 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE) 1784 ulp_ops->cm_connect_complete(csk); 1785 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) 1786 ulp_ops->cm_close_complete(csk); 1787 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) 1788 ulp_ops->cm_remote_abort(csk); 1789 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) 1790 ulp_ops->cm_abort_complete(csk); 1791 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED) 1792 ulp_ops->cm_remote_close(csk); 1793 } 1794 rcu_read_unlock(); 1795} 1796 1797static int cnic_cm_set_pg(struct cnic_sock *csk) 1798{ 1799 if (cnic_offld_prep(csk)) { 1800 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 1801 cnic_cm_update_pg(csk); 1802 else 1803 cnic_cm_offload_pg(csk); 1804 } 1805 return 0; 1806} 1807 1808static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe) 1809{ 1810 struct cnic_local *cp = dev->cnic_priv; 1811 u32 l5_cid = kcqe->pg_host_opaque; 1812 u8 opcode = kcqe->op_code; 1813 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 1814 1815 csk_hold(csk); 1816 if (!cnic_in_use(csk)) 1817 goto done; 1818 1819 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 1820 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 1821 goto done; 1822 } 1823 csk->pg_cid = kcqe->pg_cid; 1824 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 1825 cnic_cm_conn_req(csk); 1826 1827done: 1828 csk_put(csk); 1829} 1830 1831static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) 1832{ 1833 struct cnic_local *cp = dev->cnic_priv; 1834 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe; 1835 u8 opcode = l4kcqe->op_code; 1836 u32 l5_cid; 1837 struct cnic_sock *csk; 1838 1839 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || 1840 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 1841 cnic_cm_process_offld_pg(dev, l4kcqe); 1842 return; 1843 } 1844 1845 l5_cid = l4kcqe->conn_id; 1846 if (opcode & 0x80) 1847 l5_cid = l4kcqe->cid; 1848 if (l5_cid >= MAX_CM_SK_TBL_SZ) 1849 return; 1850 1851 csk = &cp->csk_tbl[l5_cid]; 1852 csk_hold(csk); 1853 1854 if (!cnic_in_use(csk)) { 1855 csk_put(csk); 1856 return; 1857 } 1858 1859 switch (opcode) { 1860 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE: 1861 if (l4kcqe->status == 0) 1862 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags); 1863 1864 smp_mb__before_clear_bit(); 1865 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 1866 cnic_cm_upcall(cp, csk, opcode); 1867 break; 1868 1869 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 1870 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) 1871 csk->state = opcode; 1872 /* fall through */ 1873 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 1874 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 1875 cp->close_conn(csk, opcode); 1876 break; 1877 1878 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED: 1879 cnic_cm_upcall(cp, csk, opcode); 1880 break; 1881 } 1882 csk_put(csk); 1883} 1884 1885static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num) 1886{ 1887 struct cnic_dev *dev = data; 1888 int i; 1889 1890 for (i = 0; i < num; i++) 1891 cnic_cm_process_kcqe(dev, kcqe[i]); 1892} 1893 1894static struct cnic_ulp_ops cm_ulp_ops = { 1895 .indicate_kcqes = cnic_cm_indicate_kcqe, 1896}; 1897 1898static void cnic_cm_free_mem(struct cnic_dev *dev) 1899{ 1900 struct cnic_local *cp = dev->cnic_priv; 1901 1902 kfree(cp->csk_tbl); 1903 cp->csk_tbl = NULL; 1904 cnic_free_id_tbl(&cp->csk_port_tbl); 1905} 1906 1907static int cnic_cm_alloc_mem(struct cnic_dev *dev) 1908{ 1909 struct cnic_local *cp = dev->cnic_priv; 1910 1911 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ, 1912 GFP_KERNEL); 1913 if (!cp->csk_tbl) 1914 return -ENOMEM; 1915 1916 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, 1917 CNIC_LOCAL_PORT_MIN)) { 1918 cnic_cm_free_mem(dev); 1919 return -ENOMEM; 1920 } 1921 return 0; 1922} 1923 1924static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) 1925{ 1926 if ((opcode == csk->state) || 1927 (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED && 1928 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) { 1929 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) 1930 return 1; 1931 } 1932 return 0; 1933} 1934 1935static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode) 1936{ 1937 struct cnic_dev *dev = csk->dev; 1938 struct cnic_local *cp = dev->cnic_priv; 1939 1940 clear_bit(SK_F_CONNECT_START, &csk->flags); 1941 if (cnic_ready_to_close(csk, opcode)) { 1942 cnic_close_conn(csk); 1943 cnic_cm_upcall(cp, csk, opcode); 1944 } 1945} 1946 1947static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev) 1948{ 1949} 1950 1951static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) 1952{ 1953 u32 seed; 1954 1955 get_random_bytes(&seed, 4); 1956 cnic_ctx_wr(dev, 45, 0, seed); 1957 return 0; 1958} 1959 1960static int cnic_cm_open(struct cnic_dev *dev) 1961{ 1962 struct cnic_local *cp = dev->cnic_priv; 1963 int err; 1964 1965 err = cnic_cm_alloc_mem(dev); 1966 if (err) 1967 return err; 1968 1969 err = cp->start_cm(dev); 1970 1971 if (err) 1972 goto err_out; 1973 1974 dev->cm_create = cnic_cm_create; 1975 dev->cm_destroy = cnic_cm_destroy; 1976 dev->cm_connect = cnic_cm_connect; 1977 dev->cm_abort = cnic_cm_abort; 1978 dev->cm_close = cnic_cm_close; 1979 dev->cm_select_dev = cnic_cm_select_dev; 1980 1981 cp->ulp_handle[CNIC_ULP_L4] = dev; 1982 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops); 1983 return 0; 1984 1985err_out: 1986 cnic_cm_free_mem(dev); 1987 return err; 1988} 1989 1990static int cnic_cm_shutdown(struct cnic_dev *dev) 1991{ 1992 struct cnic_local *cp = dev->cnic_priv; 1993 int i; 1994 1995 cp->stop_cm(dev); 1996 1997 if (!cp->csk_tbl) 1998 return 0; 1999 2000 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) { 2001 struct cnic_sock *csk = &cp->csk_tbl[i]; 2002 2003 clear_bit(SK_F_INUSE, &csk->flags); 2004 cnic_cm_cleanup(csk); 2005 } 2006 cnic_cm_free_mem(dev); 2007 2008 return 0; 2009} 2010 2011static void cnic_init_context(struct cnic_dev *dev, u32 cid) 2012{ 2013 struct cnic_local *cp = dev->cnic_priv; 2014 u32 cid_addr; 2015 int i; 2016 2017 if (CHIP_NUM(cp) == CHIP_NUM_5709) 2018 return; 2019 2020 cid_addr = GET_CID_ADDR(cid); 2021 2022 for (i = 0; i < CTX_SIZE; i += 4) 2023 cnic_ctx_wr(dev, cid_addr, i, 0); 2024} 2025 2026static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) 2027{ 2028 struct cnic_local *cp = dev->cnic_priv; 2029 int ret = 0, i; 2030 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0; 2031 2032 if (CHIP_NUM(cp) != CHIP_NUM_5709) 2033 return 0; 2034 2035 for (i = 0; i < cp->ctx_blks; i++) { 2036 int j; 2037 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; 2038 u32 val; 2039 2040 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE); 2041 2042 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, 2043 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); 2044 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1, 2045 (u64) cp->ctx_arr[i].mapping >> 32); 2046 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx | 2047 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 2048 for (j = 0; j < 10; j++) { 2049 2050 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL); 2051 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) 2052 break; 2053 udelay(5); 2054 } 2055 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { 2056 ret = -EBUSY; 2057 break; 2058 } 2059 } 2060 return ret; 2061} 2062 2063static void cnic_free_irq(struct cnic_dev *dev) 2064{ 2065 struct cnic_local *cp = dev->cnic_priv; 2066 struct cnic_eth_dev *ethdev = cp->ethdev; 2067 2068 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 2069 cp->disable_int_sync(dev); 2070 tasklet_disable(&cp->cnic_irq_task); 2071 free_irq(ethdev->irq_arr[0].vector, dev); 2072 } 2073} 2074 2075static int cnic_init_bnx2_irq(struct cnic_dev *dev) 2076{ 2077 struct cnic_local *cp = dev->cnic_priv; 2078 struct cnic_eth_dev *ethdev = cp->ethdev; 2079 2080 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 2081 int err, i = 0; 2082 int sblk_num = cp->status_blk_num; 2083 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) + 2084 BNX2_HC_SB_CONFIG_1; 2085 2086 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT); 2087 2088 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8); 2089 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220); 2090 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); 2091 2092 cp->bnx2_status_blk = cp->status_blk; 2093 cp->last_status_idx = cp->bnx2_status_blk->status_idx; 2094 tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix, 2095 (unsigned long) dev); 2096 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, 2097 "cnic", dev); 2098 if (err) { 2099 tasklet_disable(&cp->cnic_irq_task); 2100 return err; 2101 } 2102 while (cp->bnx2_status_blk->status_completion_producer_index && 2103 i < 10) { 2104 CNIC_WR(dev, BNX2_HC_COALESCE_NOW, 2105 1 << (11 + sblk_num)); 2106 udelay(10); 2107 i++; 2108 barrier(); 2109 } 2110 if (cp->bnx2_status_blk->status_completion_producer_index) { 2111 cnic_free_irq(dev); 2112 goto failed; 2113 } 2114 2115 } else { 2116 struct status_block *sblk = cp->status_blk; 2117 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND); 2118 int i = 0; 2119 2120 while (sblk->status_completion_producer_index && i < 10) { 2121 CNIC_WR(dev, BNX2_HC_COMMAND, 2122 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 2123 udelay(10); 2124 i++; 2125 barrier(); 2126 } 2127 if (sblk->status_completion_producer_index) 2128 goto failed; 2129 2130 } 2131 return 0; 2132 2133failed: 2134 printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n", 2135 dev->netdev->name); 2136 return -EBUSY; 2137} 2138 2139static void cnic_enable_bnx2_int(struct cnic_dev *dev) 2140{ 2141 struct cnic_local *cp = dev->cnic_priv; 2142 struct cnic_eth_dev *ethdev = cp->ethdev; 2143 2144 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 2145 return; 2146 2147 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 2148 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 2149} 2150 2151static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) 2152{ 2153 struct cnic_local *cp = dev->cnic_priv; 2154 struct cnic_eth_dev *ethdev = cp->ethdev; 2155 2156 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 2157 return; 2158 2159 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 2160 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 2161 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD); 2162 synchronize_irq(ethdev->irq_arr[0].vector); 2163} 2164 2165static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) 2166{ 2167 struct cnic_local *cp = dev->cnic_priv; 2168 struct cnic_eth_dev *ethdev = cp->ethdev; 2169 u32 cid_addr, tx_cid, sb_id; 2170 u32 val, offset0, offset1, offset2, offset3; 2171 int i; 2172 struct tx_bd *txbd; 2173 dma_addr_t buf_map; 2174 struct status_block *s_blk = cp->status_blk; 2175 2176 sb_id = cp->status_blk_num; 2177 tx_cid = 20; 2178 cnic_init_context(dev, tx_cid); 2179 cnic_init_context(dev, tx_cid + 1); 2180 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; 2181 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 2182 struct status_block_msix *sblk = cp->status_blk; 2183 2184 tx_cid = TX_TSS_CID + sb_id - 1; 2185 cnic_init_context(dev, tx_cid); 2186 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | 2187 (TX_TSS_CID << 7)); 2188 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; 2189 } 2190 cp->tx_cons = *cp->tx_cons_ptr; 2191 2192 cid_addr = GET_CID_ADDR(tx_cid); 2193 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 2194 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40; 2195 2196 for (i = 0; i < PHY_CTX_SIZE; i += 4) 2197 cnic_ctx_wr(dev, cid_addr2, i, 0); 2198 2199 offset0 = BNX2_L2CTX_TYPE_XI; 2200 offset1 = BNX2_L2CTX_CMD_TYPE_XI; 2201 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; 2202 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; 2203 } else { 2204 offset0 = BNX2_L2CTX_TYPE; 2205 offset1 = BNX2_L2CTX_CMD_TYPE; 2206 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; 2207 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; 2208 } 2209 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; 2210 cnic_ctx_wr(dev, cid_addr, offset0, val); 2211 2212 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 2213 cnic_ctx_wr(dev, cid_addr, offset1, val); 2214 2215 txbd = (struct tx_bd *) cp->l2_ring; 2216 2217 buf_map = cp->l2_buf_map; 2218 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { 2219 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; 2220 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 2221 } 2222 val = (u64) cp->l2_ring_map >> 32; 2223 cnic_ctx_wr(dev, cid_addr, offset2, val); 2224 txbd->tx_bd_haddr_hi = val; 2225 2226 val = (u64) cp->l2_ring_map & 0xffffffff; 2227 cnic_ctx_wr(dev, cid_addr, offset3, val); 2228 txbd->tx_bd_haddr_lo = val; 2229} 2230 2231static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) 2232{ 2233 struct cnic_local *cp = dev->cnic_priv; 2234 struct cnic_eth_dev *ethdev = cp->ethdev; 2235 u32 cid_addr, sb_id, val, coal_reg, coal_val; 2236 int i; 2237 struct rx_bd *rxbd; 2238 struct status_block *s_blk = cp->status_blk; 2239 2240 sb_id = cp->status_blk_num; 2241 cnic_init_context(dev, 2); 2242 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2; 2243 coal_reg = BNX2_HC_COMMAND; 2244 coal_val = CNIC_RD(dev, coal_reg); 2245 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 2246 struct status_block_msix *sblk = cp->status_blk; 2247 2248 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; 2249 coal_reg = BNX2_HC_COALESCE_NOW; 2250 coal_val = 1 << (11 + sb_id); 2251 } 2252 i = 0; 2253 while (!(*cp->rx_cons_ptr != 0) && i < 10) { 2254 CNIC_WR(dev, coal_reg, coal_val); 2255 udelay(10); 2256 i++; 2257 barrier(); 2258 } 2259 cp->rx_cons = *cp->rx_cons_ptr; 2260 2261 cid_addr = GET_CID_ADDR(2); 2262 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 2263 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 2264 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); 2265 2266 if (sb_id == 0) 2267 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT; 2268 else 2269 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); 2270 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 2271 2272 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE); 2273 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { 2274 dma_addr_t buf_map; 2275 int n = (i % cp->l2_rx_ring_size) + 1; 2276 2277 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size); 2278 rxbd->rx_bd_len = cp->l2_single_buf_size; 2279 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; 2280 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; 2281 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 2282 } 2283 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32; 2284 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); 2285 rxbd->rx_bd_haddr_hi = val; 2286 2287 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff; 2288 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); 2289 rxbd->rx_bd_haddr_lo = val; 2290 2291 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD); 2292 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2)); 2293} 2294 2295static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) 2296{ 2297 struct kwqe *wqes[1], l2kwqe; 2298 2299 memset(&l2kwqe, 0, sizeof(l2kwqe)); 2300 wqes[0] = &l2kwqe; 2301 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) | 2302 (L2_KWQE_OPCODE_VALUE_FLUSH << 2303 KWQE_OPCODE_SHIFT) | 2; 2304 dev->submit_kwqes(dev, wqes, 1); 2305} 2306 2307static void cnic_set_bnx2_mac(struct cnic_dev *dev) 2308{ 2309 struct cnic_local *cp = dev->cnic_priv; 2310 u32 val; 2311 2312 val = cp->func << 2; 2313 2314 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val); 2315 2316 val = cnic_reg_rd_ind(dev, cp->shmem_base + 2317 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER); 2318 dev->mac_addr[0] = (u8) (val >> 8); 2319 dev->mac_addr[1] = (u8) val; 2320 2321 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val); 2322 2323 val = cnic_reg_rd_ind(dev, cp->shmem_base + 2324 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER); 2325 dev->mac_addr[2] = (u8) (val >> 24); 2326 dev->mac_addr[3] = (u8) (val >> 16); 2327 dev->mac_addr[4] = (u8) (val >> 8); 2328 dev->mac_addr[5] = (u8) val; 2329 2330 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val); 2331 2332 val = 4 | BNX2_RPM_SORT_USER2_BC_EN; 2333 if (CHIP_NUM(cp) != CHIP_NUM_5709) 2334 val |= BNX2_RPM_SORT_USER2_PROM_VLAN; 2335 2336 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0); 2337 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val); 2338 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA); 2339} 2340 2341static int cnic_start_bnx2_hw(struct cnic_dev *dev) 2342{ 2343 struct cnic_local *cp = dev->cnic_priv; 2344 struct cnic_eth_dev *ethdev = cp->ethdev; 2345 struct status_block *sblk = cp->status_blk; 2346 u32 val; 2347 int err; 2348 2349 cnic_set_bnx2_mac(dev); 2350 2351 val = CNIC_RD(dev, BNX2_MQ_CONFIG); 2352 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 2353 if (BCM_PAGE_BITS > 12) 2354 val |= (12 - 8) << 4; 2355 else 2356 val |= (BCM_PAGE_BITS - 8) << 4; 2357 2358 CNIC_WR(dev, BNX2_MQ_CONFIG, val); 2359 2360 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8); 2361 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220); 2362 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220); 2363 2364 err = cnic_setup_5709_context(dev, 1); 2365 if (err) 2366 return err; 2367 2368 cnic_init_context(dev, KWQ_CID); 2369 cnic_init_context(dev, KCQ_CID); 2370 2371 cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID); 2372 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; 2373 2374 cp->max_kwq_idx = MAX_KWQ_IDX; 2375 cp->kwq_prod_idx = 0; 2376 cp->kwq_con_idx = 0; 2377 cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT; 2378 2379 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708) 2380 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; 2381 else 2382 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; 2383 2384 /* Initialize the kernel work queue context. */ 2385 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 2386 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 2387 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val); 2388 2389 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; 2390 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 2391 2392 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; 2393 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 2394 2395 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); 2396 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 2397 2398 val = (u32) cp->kwq_info.pgtbl_map; 2399 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 2400 2401 cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID); 2402 cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; 2403 2404 cp->kcq_prod_idx = 0; 2405 2406 /* Initialize the kernel complete queue context. */ 2407 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 2408 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 2409 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val); 2410 2411 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; 2412 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 2413 2414 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; 2415 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 2416 2417 val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32); 2418 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 2419 2420 val = (u32) cp->kcq_info.pgtbl_map; 2421 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 2422 2423 cp->int_num = 0; 2424 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 2425 u32 sb_id = cp->status_blk_num; 2426 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); 2427 2428 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 2429 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 2430 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 2431 } 2432 2433 /* Enable Commnad Scheduler notification when we write to the 2434 * host producer index of the kernel contexts. */ 2435 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2); 2436 2437 /* Enable Command Scheduler notification when we write to either 2438 * the Send Queue or Receive Queue producer indexes of the kernel 2439 * bypass contexts. */ 2440 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7); 2441 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7); 2442 2443 /* Notify COM when the driver post an application buffer. */ 2444 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000); 2445 2446 /* Set the CP and COM doorbells. These two processors polls the 2447 * doorbell for a non zero value before running. This must be done 2448 * after setting up the kernel queue contexts. */ 2449 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1); 2450 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1); 2451 2452 cnic_init_bnx2_tx_ring(dev); 2453 cnic_init_bnx2_rx_ring(dev); 2454 2455 err = cnic_init_bnx2_irq(dev); 2456 if (err) { 2457 printk(KERN_ERR PFX "%s: cnic_init_irq failed\n", 2458 dev->netdev->name); 2459 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 2460 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 2461 return err; 2462 } 2463 2464 return 0; 2465} 2466 2467static int cnic_register_netdev(struct cnic_dev *dev) 2468{ 2469 struct cnic_local *cp = dev->cnic_priv; 2470 struct cnic_eth_dev *ethdev = cp->ethdev; 2471 int err; 2472 2473 if (!ethdev) 2474 return -ENODEV; 2475 2476 if (ethdev->drv_state & CNIC_DRV_STATE_REGD) 2477 return 0; 2478 2479 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); 2480 if (err) 2481 printk(KERN_ERR PFX "%s: register_cnic failed\n", 2482 dev->netdev->name); 2483 2484 return err; 2485} 2486 2487static void cnic_unregister_netdev(struct cnic_dev *dev) 2488{ 2489 struct cnic_local *cp = dev->cnic_priv; 2490 struct cnic_eth_dev *ethdev = cp->ethdev; 2491 2492 if (!ethdev) 2493 return; 2494 2495 ethdev->drv_unregister_cnic(dev->netdev); 2496} 2497 2498static int cnic_start_hw(struct cnic_dev *dev) 2499{ 2500 struct cnic_local *cp = dev->cnic_priv; 2501 struct cnic_eth_dev *ethdev = cp->ethdev; 2502 int err; 2503 2504 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2505 return -EALREADY; 2506 2507 dev->regview = ethdev->io_base; 2508 cp->chip_id = ethdev->chip_id; 2509 pci_dev_get(dev->pcidev); 2510 cp->func = PCI_FUNC(dev->pcidev->devfn); 2511 cp->status_blk = ethdev->irq_arr[0].status_blk; 2512 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; 2513 2514 err = cp->alloc_resc(dev); 2515 if (err) { 2516 printk(KERN_ERR PFX "%s: allocate resource failure\n", 2517 dev->netdev->name); 2518 goto err1; 2519 } 2520 2521 err = cp->start_hw(dev); 2522 if (err) 2523 goto err1; 2524 2525 err = cnic_cm_open(dev); 2526 if (err) 2527 goto err1; 2528 2529 set_bit(CNIC_F_CNIC_UP, &dev->flags); 2530 2531 cp->enable_int(dev); 2532 2533 return 0; 2534 2535err1: 2536 cp->free_resc(dev); 2537 pci_dev_put(dev->pcidev); 2538 return err; 2539} 2540 2541static void cnic_stop_bnx2_hw(struct cnic_dev *dev) 2542{ 2543 cnic_disable_bnx2_int_sync(dev); 2544 2545 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 2546 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 2547 2548 cnic_init_context(dev, KWQ_CID); 2549 cnic_init_context(dev, KCQ_CID); 2550 2551 cnic_setup_5709_context(dev, 0); 2552 cnic_free_irq(dev); 2553 2554 cnic_free_resc(dev); 2555} 2556 2557static void cnic_stop_hw(struct cnic_dev *dev) 2558{ 2559 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 2560 struct cnic_local *cp = dev->cnic_priv; 2561 2562 clear_bit(CNIC_F_CNIC_UP, &dev->flags); 2563 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); 2564 synchronize_rcu(); 2565 cnic_cm_shutdown(dev); 2566 cp->stop_hw(dev); 2567 pci_dev_put(dev->pcidev); 2568 } 2569} 2570 2571static void cnic_free_dev(struct cnic_dev *dev) 2572{ 2573 int i = 0; 2574 2575 while ((atomic_read(&dev->ref_count) != 0) && i < 10) { 2576 msleep(100); 2577 i++; 2578 } 2579 if (atomic_read(&dev->ref_count) != 0) 2580 printk(KERN_ERR PFX "%s: Failed waiting for ref count to go" 2581 " to zero.\n", dev->netdev->name); 2582 2583 printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name); 2584 dev_put(dev->netdev); 2585 kfree(dev); 2586} 2587 2588static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, 2589 struct pci_dev *pdev) 2590{ 2591 struct cnic_dev *cdev; 2592 struct cnic_local *cp; 2593 int alloc_size; 2594 2595 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local); 2596 2597 cdev = kzalloc(alloc_size , GFP_KERNEL); 2598 if (cdev == NULL) { 2599 printk(KERN_ERR PFX "%s: allocate dev struct failure\n", 2600 dev->name); 2601 return NULL; 2602 } 2603 2604 cdev->netdev = dev; 2605 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev); 2606 cdev->register_device = cnic_register_device; 2607 cdev->unregister_device = cnic_unregister_device; 2608 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; 2609 2610 cp = cdev->cnic_priv; 2611 cp->dev = cdev; 2612 cp->uio_dev = -1; 2613 cp->l2_single_buf_size = 0x400; 2614 cp->l2_rx_ring_size = 3; 2615 2616 spin_lock_init(&cp->cnic_ulp_lock); 2617 2618 printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name); 2619 2620 return cdev; 2621} 2622 2623static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) 2624{ 2625 struct pci_dev *pdev; 2626 struct cnic_dev *cdev; 2627 struct cnic_local *cp; 2628 struct cnic_eth_dev *ethdev = NULL; 2629 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; 2630 2631 probe = symbol_get(bnx2_cnic_probe); 2632 if (probe) { 2633 ethdev = (*probe)(dev); 2634 symbol_put(bnx2_cnic_probe); 2635 } 2636 if (!ethdev) 2637 return NULL; 2638 2639 pdev = ethdev->pdev; 2640 if (!pdev) 2641 return NULL; 2642 2643 dev_hold(dev); 2644 pci_dev_get(pdev); 2645 if (pdev->device == PCI_DEVICE_ID_NX2_5709 || 2646 pdev->device == PCI_DEVICE_ID_NX2_5709S) { 2647 u8 rev; 2648 2649 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 2650 if (rev < 0x10) { 2651 pci_dev_put(pdev); 2652 goto cnic_err; 2653 } 2654 } 2655 pci_dev_put(pdev); 2656 2657 cdev = cnic_alloc_dev(dev, pdev); 2658 if (cdev == NULL) 2659 goto cnic_err; 2660 2661 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags); 2662 cdev->submit_kwqes = cnic_submit_bnx2_kwqes; 2663 2664 cp = cdev->cnic_priv; 2665 cp->ethdev = ethdev; 2666 cdev->pcidev = pdev; 2667 2668 cp->cnic_ops = &cnic_bnx2_ops; 2669 cp->start_hw = cnic_start_bnx2_hw; 2670 cp->stop_hw = cnic_stop_bnx2_hw; 2671 cp->setup_pgtbl = cnic_setup_page_tbl; 2672 cp->alloc_resc = cnic_alloc_bnx2_resc; 2673 cp->free_resc = cnic_free_resc; 2674 cp->start_cm = cnic_cm_init_bnx2_hw; 2675 cp->stop_cm = cnic_cm_stop_bnx2_hw; 2676 cp->enable_int = cnic_enable_bnx2_int; 2677 cp->disable_int_sync = cnic_disable_bnx2_int_sync; 2678 cp->close_conn = cnic_close_bnx2_conn; 2679 cp->next_idx = cnic_bnx2_next_idx; 2680 cp->hw_idx = cnic_bnx2_hw_idx; 2681 return cdev; 2682 2683cnic_err: 2684 dev_put(dev); 2685 return NULL; 2686} 2687 2688static struct cnic_dev *is_cnic_dev(struct net_device *dev) 2689{ 2690 struct ethtool_drvinfo drvinfo; 2691 struct cnic_dev *cdev = NULL; 2692 2693 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) { 2694 memset(&drvinfo, 0, sizeof(drvinfo)); 2695 dev->ethtool_ops->get_drvinfo(dev, &drvinfo); 2696 2697 if (!strcmp(drvinfo.driver, "bnx2")) 2698 cdev = init_bnx2_cnic(dev); 2699 if (cdev) { 2700 write_lock(&cnic_dev_lock); 2701 list_add(&cdev->list, &cnic_dev_list); 2702 write_unlock(&cnic_dev_lock); 2703 } 2704 } 2705 return cdev; 2706} 2707 2708/** 2709 * netdev event handler 2710 */ 2711static int cnic_netdev_event(struct notifier_block *this, unsigned long event, 2712 void *ptr) 2713{ 2714 struct net_device *netdev = ptr; 2715 struct cnic_dev *dev; 2716 int if_type; 2717 int new_dev = 0; 2718 2719 dev = cnic_from_netdev(netdev); 2720 2721 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) { 2722 /* Check for the hot-plug device */ 2723 dev = is_cnic_dev(netdev); 2724 if (dev) { 2725 new_dev = 1; 2726 cnic_hold(dev); 2727 } 2728 } 2729 if (dev) { 2730 struct cnic_local *cp = dev->cnic_priv; 2731 2732 if (new_dev) 2733 cnic_ulp_init(dev); 2734 else if (event == NETDEV_UNREGISTER) 2735 cnic_ulp_exit(dev); 2736 2737 if (event == NETDEV_UP) { 2738 if (cnic_register_netdev(dev) != 0) { 2739 cnic_put(dev); 2740 goto done; 2741 } 2742 if (!cnic_start_hw(dev)) 2743 cnic_ulp_start(dev); 2744 } 2745 2746 rcu_read_lock(); 2747 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 2748 struct cnic_ulp_ops *ulp_ops; 2749 void *ctx; 2750 2751 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 2752 if (!ulp_ops || !ulp_ops->indicate_netevent) 2753 continue; 2754 2755 ctx = cp->ulp_handle[if_type]; 2756 2757 ulp_ops->indicate_netevent(ctx, event); 2758 } 2759 rcu_read_unlock(); 2760 2761 if (event == NETDEV_GOING_DOWN) { 2762 cnic_ulp_stop(dev); 2763 cnic_stop_hw(dev); 2764 cnic_unregister_netdev(dev); 2765 } else if (event == NETDEV_UNREGISTER) { 2766 write_lock(&cnic_dev_lock); 2767 list_del_init(&dev->list); 2768 write_unlock(&cnic_dev_lock); 2769 2770 cnic_put(dev); 2771 cnic_free_dev(dev); 2772 goto done; 2773 } 2774 cnic_put(dev); 2775 } 2776done: 2777 return NOTIFY_DONE; 2778} 2779 2780static struct notifier_block cnic_netdev_notifier = { 2781 .notifier_call = cnic_netdev_event 2782}; 2783 2784static void cnic_release(void) 2785{ 2786 struct cnic_dev *dev; 2787 2788 while (!list_empty(&cnic_dev_list)) { 2789 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); 2790 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 2791 cnic_ulp_stop(dev); 2792 cnic_stop_hw(dev); 2793 } 2794 2795 cnic_ulp_exit(dev); 2796 cnic_unregister_netdev(dev); 2797 list_del_init(&dev->list); 2798 cnic_free_dev(dev); 2799 } 2800} 2801 2802static int __init cnic_init(void) 2803{ 2804 int rc = 0; 2805 2806 printk(KERN_INFO "%s", version); 2807 2808 rc = register_netdevice_notifier(&cnic_netdev_notifier); 2809 if (rc) { 2810 cnic_release(); 2811 return rc; 2812 } 2813 2814 return 0; 2815} 2816 2817static void __exit cnic_exit(void) 2818{ 2819 unregister_netdevice_notifier(&cnic_netdev_notifier); 2820 cnic_release(); 2821 return; 2822} 2823 2824module_init(cnic_init); 2825module_exit(cnic_exit);