Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.39-rc3 3802 lines 92 kB view raw
1/* 2 * linux/drivers/net/ehea/ehea_main.c 3 * 4 * eHEA ethernet device driver for IBM eServer System p 5 * 6 * (C) Copyright IBM Corp. 2006 7 * 8 * Authors: 9 * Christoph Raisch <raisch@de.ibm.com> 10 * Jan-Bernd Themann <themann@de.ibm.com> 11 * Thomas Klein <tklein@de.ibm.com> 12 * 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2, or (at your option) 17 * any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 27 */ 28 29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31#include <linux/in.h> 32#include <linux/ip.h> 33#include <linux/tcp.h> 34#include <linux/udp.h> 35#include <linux/if.h> 36#include <linux/list.h> 37#include <linux/slab.h> 38#include <linux/if_ether.h> 39#include <linux/notifier.h> 40#include <linux/reboot.h> 41#include <linux/memory.h> 42#include <asm/kexec.h> 43#include <linux/mutex.h> 44 45#include <net/ip.h> 46 47#include "ehea.h" 48#include "ehea_qmr.h" 49#include "ehea_phyp.h" 50 51 52MODULE_LICENSE("GPL"); 53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 54MODULE_DESCRIPTION("IBM eServer HEA Driver"); 55MODULE_VERSION(DRV_VERSION); 56 57 58static int msg_level = -1; 59static int rq1_entries = EHEA_DEF_ENTRIES_RQ1; 60static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; 61static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; 62static int sq_entries = EHEA_DEF_ENTRIES_SQ; 63static int use_mcs; 64static int use_lro; 65static int lro_max_aggr = EHEA_LRO_MAX_AGGR; 66static int num_tx_qps = EHEA_NUM_TX_QP; 67static int prop_carrier_state; 68 69module_param(msg_level, int, 0); 70module_param(rq1_entries, int, 0); 71module_param(rq2_entries, int, 0); 72module_param(rq3_entries, int, 0); 73module_param(sq_entries, int, 0); 74module_param(prop_carrier_state, int, 0); 75module_param(use_mcs, int, 0); 76module_param(use_lro, int, 0); 77module_param(lro_max_aggr, int, 0); 78module_param(num_tx_qps, int, 0); 79 80MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS"); 81MODULE_PARM_DESC(msg_level, "msg_level"); 82MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical " 83 "port to stack. 1:yes, 0:no. Default = 0 "); 84MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 " 85 "[2^x - 1], x = [6..14]. Default = " 86 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")"); 87MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 " 88 "[2^x - 1], x = [6..14]. Default = " 89 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")"); 90MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 " 91 "[2^x - 1], x = [6..14]. Default = " 92 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")"); 93MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " 94 "[2^x - 1], x = [6..14]. Default = " 95 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")"); 96MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 "); 97 98MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = " 99 __MODULE_STRING(EHEA_LRO_MAX_AGGR)); 100MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, " 101 "Default = 0"); 102 103static int port_name_cnt; 104static LIST_HEAD(adapter_list); 105static unsigned long ehea_driver_flags; 106static DEFINE_MUTEX(dlpar_mem_lock); 107struct ehea_fw_handle_array ehea_fw_handles; 108struct ehea_bcmc_reg_array ehea_bcmc_regs; 109 110 111static int __devinit ehea_probe_adapter(struct platform_device *dev, 112 const struct of_device_id *id); 113 114static int __devexit ehea_remove(struct platform_device *dev); 115 116static struct of_device_id ehea_device_table[] = { 117 { 118 .name = "lhea", 119 .compatible = "IBM,lhea", 120 }, 121 {}, 122}; 123MODULE_DEVICE_TABLE(of, ehea_device_table); 124 125static struct of_platform_driver ehea_driver = { 126 .driver = { 127 .name = "ehea", 128 .owner = THIS_MODULE, 129 .of_match_table = ehea_device_table, 130 }, 131 .probe = ehea_probe_adapter, 132 .remove = ehea_remove, 133}; 134 135void ehea_dump(void *adr, int len, char *msg) 136{ 137 int x; 138 unsigned char *deb = adr; 139 for (x = 0; x < len; x += 16) { 140 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n", 141 msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8])); 142 deb += 16; 143 } 144} 145 146void ehea_schedule_port_reset(struct ehea_port *port) 147{ 148 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags)) 149 schedule_work(&port->reset_task); 150} 151 152static void ehea_update_firmware_handles(void) 153{ 154 struct ehea_fw_handle_entry *arr = NULL; 155 struct ehea_adapter *adapter; 156 int num_adapters = 0; 157 int num_ports = 0; 158 int num_portres = 0; 159 int i = 0; 160 int num_fw_handles, k, l; 161 162 /* Determine number of handles */ 163 mutex_lock(&ehea_fw_handles.lock); 164 165 list_for_each_entry(adapter, &adapter_list, list) { 166 num_adapters++; 167 168 for (k = 0; k < EHEA_MAX_PORTS; k++) { 169 struct ehea_port *port = adapter->port[k]; 170 171 if (!port || (port->state != EHEA_PORT_UP)) 172 continue; 173 174 num_ports++; 175 num_portres += port->num_def_qps + port->num_add_tx_qps; 176 } 177 } 178 179 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES + 180 num_ports * EHEA_NUM_PORT_FW_HANDLES + 181 num_portres * EHEA_NUM_PORTRES_FW_HANDLES; 182 183 if (num_fw_handles) { 184 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL); 185 if (!arr) 186 goto out; /* Keep the existing array */ 187 } else 188 goto out_update; 189 190 list_for_each_entry(adapter, &adapter_list, list) { 191 if (num_adapters == 0) 192 break; 193 194 for (k = 0; k < EHEA_MAX_PORTS; k++) { 195 struct ehea_port *port = adapter->port[k]; 196 197 if (!port || (port->state != EHEA_PORT_UP) || 198 (num_ports == 0)) 199 continue; 200 201 for (l = 0; 202 l < port->num_def_qps + port->num_add_tx_qps; 203 l++) { 204 struct ehea_port_res *pr = &port->port_res[l]; 205 206 arr[i].adh = adapter->handle; 207 arr[i++].fwh = pr->qp->fw_handle; 208 arr[i].adh = adapter->handle; 209 arr[i++].fwh = pr->send_cq->fw_handle; 210 arr[i].adh = adapter->handle; 211 arr[i++].fwh = pr->recv_cq->fw_handle; 212 arr[i].adh = adapter->handle; 213 arr[i++].fwh = pr->eq->fw_handle; 214 arr[i].adh = adapter->handle; 215 arr[i++].fwh = pr->send_mr.handle; 216 arr[i].adh = adapter->handle; 217 arr[i++].fwh = pr->recv_mr.handle; 218 } 219 arr[i].adh = adapter->handle; 220 arr[i++].fwh = port->qp_eq->fw_handle; 221 num_ports--; 222 } 223 224 arr[i].adh = adapter->handle; 225 arr[i++].fwh = adapter->neq->fw_handle; 226 227 if (adapter->mr.handle) { 228 arr[i].adh = adapter->handle; 229 arr[i++].fwh = adapter->mr.handle; 230 } 231 num_adapters--; 232 } 233 234out_update: 235 kfree(ehea_fw_handles.arr); 236 ehea_fw_handles.arr = arr; 237 ehea_fw_handles.num_entries = i; 238out: 239 mutex_unlock(&ehea_fw_handles.lock); 240} 241 242static void ehea_update_bcmc_registrations(void) 243{ 244 unsigned long flags; 245 struct ehea_bcmc_reg_entry *arr = NULL; 246 struct ehea_adapter *adapter; 247 struct ehea_mc_list *mc_entry; 248 int num_registrations = 0; 249 int i = 0; 250 int k; 251 252 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags); 253 254 /* Determine number of registrations */ 255 list_for_each_entry(adapter, &adapter_list, list) 256 for (k = 0; k < EHEA_MAX_PORTS; k++) { 257 struct ehea_port *port = adapter->port[k]; 258 259 if (!port || (port->state != EHEA_PORT_UP)) 260 continue; 261 262 num_registrations += 2; /* Broadcast registrations */ 263 264 list_for_each_entry(mc_entry, &port->mc_list->list,list) 265 num_registrations += 2; 266 } 267 268 if (num_registrations) { 269 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC); 270 if (!arr) 271 goto out; /* Keep the existing array */ 272 } else 273 goto out_update; 274 275 list_for_each_entry(adapter, &adapter_list, list) { 276 for (k = 0; k < EHEA_MAX_PORTS; k++) { 277 struct ehea_port *port = adapter->port[k]; 278 279 if (!port || (port->state != EHEA_PORT_UP)) 280 continue; 281 282 if (num_registrations == 0) 283 goto out_update; 284 285 arr[i].adh = adapter->handle; 286 arr[i].port_id = port->logical_port_id; 287 arr[i].reg_type = EHEA_BCMC_BROADCAST | 288 EHEA_BCMC_UNTAGGED; 289 arr[i++].macaddr = port->mac_addr; 290 291 arr[i].adh = adapter->handle; 292 arr[i].port_id = port->logical_port_id; 293 arr[i].reg_type = EHEA_BCMC_BROADCAST | 294 EHEA_BCMC_VLANID_ALL; 295 arr[i++].macaddr = port->mac_addr; 296 num_registrations -= 2; 297 298 list_for_each_entry(mc_entry, 299 &port->mc_list->list, list) { 300 if (num_registrations == 0) 301 goto out_update; 302 303 arr[i].adh = adapter->handle; 304 arr[i].port_id = port->logical_port_id; 305 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL | 306 EHEA_BCMC_MULTICAST | 307 EHEA_BCMC_UNTAGGED; 308 arr[i++].macaddr = mc_entry->macaddr; 309 310 arr[i].adh = adapter->handle; 311 arr[i].port_id = port->logical_port_id; 312 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL | 313 EHEA_BCMC_MULTICAST | 314 EHEA_BCMC_VLANID_ALL; 315 arr[i++].macaddr = mc_entry->macaddr; 316 num_registrations -= 2; 317 } 318 } 319 } 320 321out_update: 322 kfree(ehea_bcmc_regs.arr); 323 ehea_bcmc_regs.arr = arr; 324 ehea_bcmc_regs.num_entries = i; 325out: 326 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags); 327} 328 329static struct net_device_stats *ehea_get_stats(struct net_device *dev) 330{ 331 struct ehea_port *port = netdev_priv(dev); 332 struct net_device_stats *stats = &port->stats; 333 struct hcp_ehea_port_cb2 *cb2; 334 u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0; 335 int i; 336 337 memset(stats, 0, sizeof(*stats)); 338 339 cb2 = (void *)get_zeroed_page(GFP_KERNEL); 340 if (!cb2) { 341 netdev_err(dev, "no mem for cb2\n"); 342 goto out; 343 } 344 345 hret = ehea_h_query_ehea_port(port->adapter->handle, 346 port->logical_port_id, 347 H_PORT_CB2, H_PORT_CB2_ALL, cb2); 348 if (hret != H_SUCCESS) { 349 netdev_err(dev, "query_ehea_port failed\n"); 350 goto out_herr; 351 } 352 353 if (netif_msg_hw(port)) 354 ehea_dump(cb2, sizeof(*cb2), "net_device_stats"); 355 356 rx_packets = 0; 357 for (i = 0; i < port->num_def_qps; i++) { 358 rx_packets += port->port_res[i].rx_packets; 359 rx_bytes += port->port_res[i].rx_bytes; 360 } 361 362 tx_packets = 0; 363 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 364 tx_packets += port->port_res[i].tx_packets; 365 tx_bytes += port->port_res[i].tx_bytes; 366 } 367 368 stats->tx_packets = tx_packets; 369 stats->multicast = cb2->rxmcp; 370 stats->rx_errors = cb2->rxuerr; 371 stats->rx_bytes = rx_bytes; 372 stats->tx_bytes = tx_bytes; 373 stats->rx_packets = rx_packets; 374 375out_herr: 376 free_page((unsigned long)cb2); 377out: 378 return stats; 379} 380 381static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) 382{ 383 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; 384 struct net_device *dev = pr->port->netdev; 385 int max_index_mask = pr->rq1_skba.len - 1; 386 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes; 387 int adder = 0; 388 int i; 389 390 pr->rq1_skba.os_skbs = 0; 391 392 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { 393 if (nr_of_wqes > 0) 394 pr->rq1_skba.index = index; 395 pr->rq1_skba.os_skbs = fill_wqes; 396 return; 397 } 398 399 for (i = 0; i < fill_wqes; i++) { 400 if (!skb_arr_rq1[index]) { 401 skb_arr_rq1[index] = netdev_alloc_skb(dev, 402 EHEA_L_PKT_SIZE); 403 if (!skb_arr_rq1[index]) { 404 netdev_info(dev, "Unable to allocate enough skb in the array\n"); 405 pr->rq1_skba.os_skbs = fill_wqes - i; 406 break; 407 } 408 } 409 index--; 410 index &= max_index_mask; 411 adder++; 412 } 413 414 if (adder == 0) 415 return; 416 417 /* Ring doorbell */ 418 ehea_update_rq1a(pr->qp, adder); 419} 420 421static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) 422{ 423 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; 424 struct net_device *dev = pr->port->netdev; 425 int i; 426 427 if (nr_rq1a > pr->rq1_skba.len) { 428 netdev_err(dev, "NR_RQ1A bigger than skb array len\n"); 429 return; 430 } 431 432 for (i = 0; i < nr_rq1a; i++) { 433 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); 434 if (!skb_arr_rq1[i]) { 435 netdev_info(dev, "Not enough memory to allocate skb array\n"); 436 break; 437 } 438 } 439 /* Ring doorbell */ 440 ehea_update_rq1a(pr->qp, i - 1); 441} 442 443static int ehea_refill_rq_def(struct ehea_port_res *pr, 444 struct ehea_q_skb_arr *q_skba, int rq_nr, 445 int num_wqes, int wqe_type, int packet_size) 446{ 447 struct net_device *dev = pr->port->netdev; 448 struct ehea_qp *qp = pr->qp; 449 struct sk_buff **skb_arr = q_skba->arr; 450 struct ehea_rwqe *rwqe; 451 int i, index, max_index_mask, fill_wqes; 452 int adder = 0; 453 int ret = 0; 454 455 fill_wqes = q_skba->os_skbs + num_wqes; 456 q_skba->os_skbs = 0; 457 458 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { 459 q_skba->os_skbs = fill_wqes; 460 return ret; 461 } 462 463 index = q_skba->index; 464 max_index_mask = q_skba->len - 1; 465 for (i = 0; i < fill_wqes; i++) { 466 u64 tmp_addr; 467 struct sk_buff *skb; 468 469 skb = netdev_alloc_skb_ip_align(dev, packet_size); 470 if (!skb) { 471 q_skba->os_skbs = fill_wqes - i; 472 if (q_skba->os_skbs == q_skba->len - 2) { 473 netdev_info(pr->port->netdev, 474 "rq%i ran dry - no mem for skb\n", 475 rq_nr); 476 ret = -ENOMEM; 477 } 478 break; 479 } 480 481 skb_arr[index] = skb; 482 tmp_addr = ehea_map_vaddr(skb->data); 483 if (tmp_addr == -1) { 484 dev_kfree_skb(skb); 485 q_skba->os_skbs = fill_wqes - i; 486 ret = 0; 487 break; 488 } 489 490 rwqe = ehea_get_next_rwqe(qp, rq_nr); 491 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) 492 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); 493 rwqe->sg_list[0].l_key = pr->recv_mr.lkey; 494 rwqe->sg_list[0].vaddr = tmp_addr; 495 rwqe->sg_list[0].len = packet_size; 496 rwqe->data_segments = 1; 497 498 index++; 499 index &= max_index_mask; 500 adder++; 501 } 502 503 q_skba->index = index; 504 if (adder == 0) 505 goto out; 506 507 /* Ring doorbell */ 508 iosync(); 509 if (rq_nr == 2) 510 ehea_update_rq2a(pr->qp, adder); 511 else 512 ehea_update_rq3a(pr->qp, adder); 513out: 514 return ret; 515} 516 517 518static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes) 519{ 520 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, 521 nr_of_wqes, EHEA_RWQE2_TYPE, 522 EHEA_RQ2_PKT_SIZE); 523} 524 525 526static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes) 527{ 528 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, 529 nr_of_wqes, EHEA_RWQE3_TYPE, 530 EHEA_MAX_PACKET_SIZE); 531} 532 533static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) 534{ 535 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5; 536 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0) 537 return 0; 538 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) && 539 (cqe->header_length == 0)) 540 return 0; 541 return -EINVAL; 542} 543 544static inline void ehea_fill_skb(struct net_device *dev, 545 struct sk_buff *skb, struct ehea_cqe *cqe) 546{ 547 int length = cqe->num_bytes_transfered - 4; /*remove CRC */ 548 549 skb_put(skb, length); 550 skb->protocol = eth_type_trans(skb, dev); 551 552 /* The packet was not an IPV4 packet so a complemented checksum was 553 calculated. The value is found in the Internet Checksum field. */ 554 if (cqe->status & EHEA_CQE_BLIND_CKSUM) { 555 skb->ip_summed = CHECKSUM_COMPLETE; 556 skb->csum = csum_unfold(~cqe->inet_checksum_value); 557 } else 558 skb->ip_summed = CHECKSUM_UNNECESSARY; 559} 560 561static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array, 562 int arr_len, 563 struct ehea_cqe *cqe) 564{ 565 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); 566 struct sk_buff *skb; 567 void *pref; 568 int x; 569 570 x = skb_index + 1; 571 x &= (arr_len - 1); 572 573 pref = skb_array[x]; 574 if (pref) { 575 prefetchw(pref); 576 prefetchw(pref + EHEA_CACHE_LINE); 577 578 pref = (skb_array[x]->data); 579 prefetch(pref); 580 prefetch(pref + EHEA_CACHE_LINE); 581 prefetch(pref + EHEA_CACHE_LINE * 2); 582 prefetch(pref + EHEA_CACHE_LINE * 3); 583 } 584 585 skb = skb_array[skb_index]; 586 skb_array[skb_index] = NULL; 587 return skb; 588} 589 590static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array, 591 int arr_len, int wqe_index) 592{ 593 struct sk_buff *skb; 594 void *pref; 595 int x; 596 597 x = wqe_index + 1; 598 x &= (arr_len - 1); 599 600 pref = skb_array[x]; 601 if (pref) { 602 prefetchw(pref); 603 prefetchw(pref + EHEA_CACHE_LINE); 604 605 pref = (skb_array[x]->data); 606 prefetchw(pref); 607 prefetchw(pref + EHEA_CACHE_LINE); 608 } 609 610 skb = skb_array[wqe_index]; 611 skb_array[wqe_index] = NULL; 612 return skb; 613} 614 615static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, 616 struct ehea_cqe *cqe, int *processed_rq2, 617 int *processed_rq3) 618{ 619 struct sk_buff *skb; 620 621 if (cqe->status & EHEA_CQE_STAT_ERR_TCP) 622 pr->p_stats.err_tcp_cksum++; 623 if (cqe->status & EHEA_CQE_STAT_ERR_IP) 624 pr->p_stats.err_ip_cksum++; 625 if (cqe->status & EHEA_CQE_STAT_ERR_CRC) 626 pr->p_stats.err_frame_crc++; 627 628 if (rq == 2) { 629 *processed_rq2 += 1; 630 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); 631 dev_kfree_skb(skb); 632 } else if (rq == 3) { 633 *processed_rq3 += 1; 634 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe); 635 dev_kfree_skb(skb); 636 } 637 638 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { 639 if (netif_msg_rx_err(pr->port)) { 640 pr_err("Critical receive error for QP %d. Resetting port.\n", 641 pr->qp->init_attr.qp_nr); 642 ehea_dump(cqe, sizeof(*cqe), "CQE"); 643 } 644 ehea_schedule_port_reset(pr->port); 645 return 1; 646 } 647 648 return 0; 649} 650 651static int get_skb_hdr(struct sk_buff *skb, void **iphdr, 652 void **tcph, u64 *hdr_flags, void *priv) 653{ 654 struct ehea_cqe *cqe = priv; 655 unsigned int ip_len; 656 struct iphdr *iph; 657 658 /* non tcp/udp packets */ 659 if (!cqe->header_length) 660 return -1; 661 662 /* non tcp packet */ 663 skb_reset_network_header(skb); 664 iph = ip_hdr(skb); 665 if (iph->protocol != IPPROTO_TCP) 666 return -1; 667 668 ip_len = ip_hdrlen(skb); 669 skb_set_transport_header(skb, ip_len); 670 *tcph = tcp_hdr(skb); 671 672 /* check if ip header and tcp header are complete */ 673 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) 674 return -1; 675 676 *hdr_flags = LRO_IPV4 | LRO_TCP; 677 *iphdr = iph; 678 679 return 0; 680} 681 682static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe, 683 struct sk_buff *skb) 684{ 685 int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) && 686 pr->port->vgrp); 687 688 if (skb->dev->features & NETIF_F_LRO) { 689 if (vlan_extracted) 690 lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb, 691 pr->port->vgrp, 692 cqe->vlan_tag, 693 cqe); 694 else 695 lro_receive_skb(&pr->lro_mgr, skb, cqe); 696 } else { 697 if (vlan_extracted) 698 vlan_hwaccel_receive_skb(skb, pr->port->vgrp, 699 cqe->vlan_tag); 700 else 701 netif_receive_skb(skb); 702 } 703} 704 705static int ehea_proc_rwqes(struct net_device *dev, 706 struct ehea_port_res *pr, 707 int budget) 708{ 709 struct ehea_port *port = pr->port; 710 struct ehea_qp *qp = pr->qp; 711 struct ehea_cqe *cqe; 712 struct sk_buff *skb; 713 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; 714 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr; 715 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr; 716 int skb_arr_rq1_len = pr->rq1_skba.len; 717 int skb_arr_rq2_len = pr->rq2_skba.len; 718 int skb_arr_rq3_len = pr->rq3_skba.len; 719 int processed, processed_rq1, processed_rq2, processed_rq3; 720 u64 processed_bytes = 0; 721 int wqe_index, last_wqe_index, rq, port_reset; 722 723 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; 724 last_wqe_index = 0; 725 726 cqe = ehea_poll_rq1(qp, &wqe_index); 727 while ((processed < budget) && cqe) { 728 ehea_inc_rq1(qp); 729 processed_rq1++; 730 processed++; 731 if (netif_msg_rx_status(port)) 732 ehea_dump(cqe, sizeof(*cqe), "CQE"); 733 734 last_wqe_index = wqe_index; 735 rmb(); 736 if (!ehea_check_cqe(cqe, &rq)) { 737 if (rq == 1) { 738 /* LL RQ1 */ 739 skb = get_skb_by_index_ll(skb_arr_rq1, 740 skb_arr_rq1_len, 741 wqe_index); 742 if (unlikely(!skb)) { 743 netif_info(port, rx_err, dev, 744 "LL rq1: skb=NULL\n"); 745 746 skb = netdev_alloc_skb(dev, 747 EHEA_L_PKT_SIZE); 748 if (!skb) { 749 netdev_err(dev, "Not enough memory to allocate skb\n"); 750 break; 751 } 752 } 753 skb_copy_to_linear_data(skb, ((char *)cqe) + 64, 754 cqe->num_bytes_transfered - 4); 755 ehea_fill_skb(dev, skb, cqe); 756 } else if (rq == 2) { 757 /* RQ2 */ 758 skb = get_skb_by_index(skb_arr_rq2, 759 skb_arr_rq2_len, cqe); 760 if (unlikely(!skb)) { 761 netif_err(port, rx_err, dev, 762 "rq2: skb=NULL\n"); 763 break; 764 } 765 ehea_fill_skb(dev, skb, cqe); 766 processed_rq2++; 767 } else { 768 /* RQ3 */ 769 skb = get_skb_by_index(skb_arr_rq3, 770 skb_arr_rq3_len, cqe); 771 if (unlikely(!skb)) { 772 netif_err(port, rx_err, dev, 773 "rq3: skb=NULL\n"); 774 break; 775 } 776 ehea_fill_skb(dev, skb, cqe); 777 processed_rq3++; 778 } 779 780 processed_bytes += skb->len; 781 ehea_proc_skb(pr, cqe, skb); 782 } else { 783 pr->p_stats.poll_receive_errors++; 784 port_reset = ehea_treat_poll_error(pr, rq, cqe, 785 &processed_rq2, 786 &processed_rq3); 787 if (port_reset) 788 break; 789 } 790 cqe = ehea_poll_rq1(qp, &wqe_index); 791 } 792 if (dev->features & NETIF_F_LRO) 793 lro_flush_all(&pr->lro_mgr); 794 795 pr->rx_packets += processed; 796 pr->rx_bytes += processed_bytes; 797 798 ehea_refill_rq1(pr, last_wqe_index, processed_rq1); 799 ehea_refill_rq2(pr, processed_rq2); 800 ehea_refill_rq3(pr, processed_rq3); 801 802 return processed; 803} 804 805#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull 806 807static void reset_sq_restart_flag(struct ehea_port *port) 808{ 809 int i; 810 811 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 812 struct ehea_port_res *pr = &port->port_res[i]; 813 pr->sq_restart_flag = 0; 814 } 815 wake_up(&port->restart_wq); 816} 817 818static void check_sqs(struct ehea_port *port) 819{ 820 struct ehea_swqe *swqe; 821 int swqe_index; 822 int i, k; 823 824 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 825 struct ehea_port_res *pr = &port->port_res[i]; 826 int ret; 827 k = 0; 828 swqe = ehea_get_swqe(pr->qp, &swqe_index); 829 memset(swqe, 0, SWQE_HEADER_SIZE); 830 atomic_dec(&pr->swqe_avail); 831 832 swqe->tx_control |= EHEA_SWQE_PURGE; 833 swqe->wr_id = SWQE_RESTART_CHECK; 834 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; 835 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT; 836 swqe->immediate_data_length = 80; 837 838 ehea_post_swqe(pr->qp, swqe); 839 840 ret = wait_event_timeout(port->restart_wq, 841 pr->sq_restart_flag == 0, 842 msecs_to_jiffies(100)); 843 844 if (!ret) { 845 pr_err("HW/SW queues out of sync\n"); 846 ehea_schedule_port_reset(pr->port); 847 return; 848 } 849 } 850} 851 852 853static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) 854{ 855 struct sk_buff *skb; 856 struct ehea_cq *send_cq = pr->send_cq; 857 struct ehea_cqe *cqe; 858 int quota = my_quota; 859 int cqe_counter = 0; 860 int swqe_av = 0; 861 int index; 862 unsigned long flags; 863 864 cqe = ehea_poll_cq(send_cq); 865 while (cqe && (quota > 0)) { 866 ehea_inc_cq(send_cq); 867 868 cqe_counter++; 869 rmb(); 870 871 if (cqe->wr_id == SWQE_RESTART_CHECK) { 872 pr->sq_restart_flag = 1; 873 swqe_av++; 874 break; 875 } 876 877 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { 878 pr_err("Bad send completion status=0x%04X\n", 879 cqe->status); 880 881 if (netif_msg_tx_err(pr->port)) 882 ehea_dump(cqe, sizeof(*cqe), "Send CQE"); 883 884 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) { 885 pr_err("Resetting port\n"); 886 ehea_schedule_port_reset(pr->port); 887 break; 888 } 889 } 890 891 if (netif_msg_tx_done(pr->port)) 892 ehea_dump(cqe, sizeof(*cqe), "CQE"); 893 894 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id) 895 == EHEA_SWQE2_TYPE)) { 896 897 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); 898 skb = pr->sq_skba.arr[index]; 899 dev_kfree_skb(skb); 900 pr->sq_skba.arr[index] = NULL; 901 } 902 903 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); 904 quota--; 905 906 cqe = ehea_poll_cq(send_cq); 907 } 908 909 ehea_update_feca(send_cq, cqe_counter); 910 atomic_add(swqe_av, &pr->swqe_avail); 911 912 spin_lock_irqsave(&pr->netif_queue, flags); 913 914 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail) 915 >= pr->swqe_refill_th)) { 916 netif_wake_queue(pr->port->netdev); 917 pr->queue_stopped = 0; 918 } 919 spin_unlock_irqrestore(&pr->netif_queue, flags); 920 wake_up(&pr->port->swqe_avail_wq); 921 922 return cqe; 923} 924 925#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16 926#define EHEA_POLL_MAX_CQES 65535 927 928static int ehea_poll(struct napi_struct *napi, int budget) 929{ 930 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, 931 napi); 932 struct net_device *dev = pr->port->netdev; 933 struct ehea_cqe *cqe; 934 struct ehea_cqe *cqe_skb = NULL; 935 int force_irq, wqe_index; 936 int rx = 0; 937 938 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ); 939 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); 940 941 if (!force_irq) 942 rx += ehea_proc_rwqes(dev, pr, budget - rx); 943 944 while ((rx != budget) || force_irq) { 945 pr->poll_counter = 0; 946 force_irq = 0; 947 napi_complete(napi); 948 ehea_reset_cq_ep(pr->recv_cq); 949 ehea_reset_cq_ep(pr->send_cq); 950 ehea_reset_cq_n1(pr->recv_cq); 951 ehea_reset_cq_n1(pr->send_cq); 952 rmb(); 953 cqe = ehea_poll_rq1(pr->qp, &wqe_index); 954 cqe_skb = ehea_poll_cq(pr->send_cq); 955 956 if (!cqe && !cqe_skb) 957 return rx; 958 959 if (!napi_reschedule(napi)) 960 return rx; 961 962 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); 963 rx += ehea_proc_rwqes(dev, pr, budget - rx); 964 } 965 966 pr->poll_counter++; 967 return rx; 968} 969 970#ifdef CONFIG_NET_POLL_CONTROLLER 971static void ehea_netpoll(struct net_device *dev) 972{ 973 struct ehea_port *port = netdev_priv(dev); 974 int i; 975 976 for (i = 0; i < port->num_def_qps; i++) 977 napi_schedule(&port->port_res[i].napi); 978} 979#endif 980 981static irqreturn_t ehea_recv_irq_handler(int irq, void *param) 982{ 983 struct ehea_port_res *pr = param; 984 985 napi_schedule(&pr->napi); 986 987 return IRQ_HANDLED; 988} 989 990static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param) 991{ 992 struct ehea_port *port = param; 993 struct ehea_eqe *eqe; 994 struct ehea_qp *qp; 995 u32 qp_token; 996 u64 resource_type, aer, aerr; 997 int reset_port = 0; 998 999 eqe = ehea_poll_eq(port->qp_eq); 1000 1001 while (eqe) { 1002 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); 1003 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n", 1004 eqe->entry, qp_token); 1005 1006 qp = port->port_res[qp_token].qp; 1007 1008 resource_type = ehea_error_data(port->adapter, qp->fw_handle, 1009 &aer, &aerr); 1010 1011 if (resource_type == EHEA_AER_RESTYPE_QP) { 1012 if ((aer & EHEA_AER_RESET_MASK) || 1013 (aerr & EHEA_AERR_RESET_MASK)) 1014 reset_port = 1; 1015 } else 1016 reset_port = 1; /* Reset in case of CQ or EQ error */ 1017 1018 eqe = ehea_poll_eq(port->qp_eq); 1019 } 1020 1021 if (reset_port) { 1022 pr_err("Resetting port\n"); 1023 ehea_schedule_port_reset(port); 1024 } 1025 1026 return IRQ_HANDLED; 1027} 1028 1029static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter, 1030 int logical_port) 1031{ 1032 int i; 1033 1034 for (i = 0; i < EHEA_MAX_PORTS; i++) 1035 if (adapter->port[i]) 1036 if (adapter->port[i]->logical_port_id == logical_port) 1037 return adapter->port[i]; 1038 return NULL; 1039} 1040 1041int ehea_sense_port_attr(struct ehea_port *port) 1042{ 1043 int ret; 1044 u64 hret; 1045 struct hcp_ehea_port_cb0 *cb0; 1046 1047 /* may be called via ehea_neq_tasklet() */ 1048 cb0 = (void *)get_zeroed_page(GFP_ATOMIC); 1049 if (!cb0) { 1050 pr_err("no mem for cb0\n"); 1051 ret = -ENOMEM; 1052 goto out; 1053 } 1054 1055 hret = ehea_h_query_ehea_port(port->adapter->handle, 1056 port->logical_port_id, H_PORT_CB0, 1057 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF), 1058 cb0); 1059 if (hret != H_SUCCESS) { 1060 ret = -EIO; 1061 goto out_free; 1062 } 1063 1064 /* MAC address */ 1065 port->mac_addr = cb0->port_mac_addr << 16; 1066 1067 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) { 1068 ret = -EADDRNOTAVAIL; 1069 goto out_free; 1070 } 1071 1072 /* Port speed */ 1073 switch (cb0->port_speed) { 1074 case H_SPEED_10M_H: 1075 port->port_speed = EHEA_SPEED_10M; 1076 port->full_duplex = 0; 1077 break; 1078 case H_SPEED_10M_F: 1079 port->port_speed = EHEA_SPEED_10M; 1080 port->full_duplex = 1; 1081 break; 1082 case H_SPEED_100M_H: 1083 port->port_speed = EHEA_SPEED_100M; 1084 port->full_duplex = 0; 1085 break; 1086 case H_SPEED_100M_F: 1087 port->port_speed = EHEA_SPEED_100M; 1088 port->full_duplex = 1; 1089 break; 1090 case H_SPEED_1G_F: 1091 port->port_speed = EHEA_SPEED_1G; 1092 port->full_duplex = 1; 1093 break; 1094 case H_SPEED_10G_F: 1095 port->port_speed = EHEA_SPEED_10G; 1096 port->full_duplex = 1; 1097 break; 1098 default: 1099 port->port_speed = 0; 1100 port->full_duplex = 0; 1101 break; 1102 } 1103 1104 port->autoneg = 1; 1105 port->num_mcs = cb0->num_default_qps; 1106 1107 /* Number of default QPs */ 1108 if (use_mcs) 1109 port->num_def_qps = cb0->num_default_qps; 1110 else 1111 port->num_def_qps = 1; 1112 1113 if (!port->num_def_qps) { 1114 ret = -EINVAL; 1115 goto out_free; 1116 } 1117 1118 port->num_tx_qps = num_tx_qps; 1119 1120 if (port->num_def_qps >= port->num_tx_qps) 1121 port->num_add_tx_qps = 0; 1122 else 1123 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps; 1124 1125 ret = 0; 1126out_free: 1127 if (ret || netif_msg_probe(port)) 1128 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr"); 1129 free_page((unsigned long)cb0); 1130out: 1131 return ret; 1132} 1133 1134int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) 1135{ 1136 struct hcp_ehea_port_cb4 *cb4; 1137 u64 hret; 1138 int ret = 0; 1139 1140 cb4 = (void *)get_zeroed_page(GFP_KERNEL); 1141 if (!cb4) { 1142 pr_err("no mem for cb4\n"); 1143 ret = -ENOMEM; 1144 goto out; 1145 } 1146 1147 cb4->port_speed = port_speed; 1148 1149 netif_carrier_off(port->netdev); 1150 1151 hret = ehea_h_modify_ehea_port(port->adapter->handle, 1152 port->logical_port_id, 1153 H_PORT_CB4, H_PORT_CB4_SPEED, cb4); 1154 if (hret == H_SUCCESS) { 1155 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0; 1156 1157 hret = ehea_h_query_ehea_port(port->adapter->handle, 1158 port->logical_port_id, 1159 H_PORT_CB4, H_PORT_CB4_SPEED, 1160 cb4); 1161 if (hret == H_SUCCESS) { 1162 switch (cb4->port_speed) { 1163 case H_SPEED_10M_H: 1164 port->port_speed = EHEA_SPEED_10M; 1165 port->full_duplex = 0; 1166 break; 1167 case H_SPEED_10M_F: 1168 port->port_speed = EHEA_SPEED_10M; 1169 port->full_duplex = 1; 1170 break; 1171 case H_SPEED_100M_H: 1172 port->port_speed = EHEA_SPEED_100M; 1173 port->full_duplex = 0; 1174 break; 1175 case H_SPEED_100M_F: 1176 port->port_speed = EHEA_SPEED_100M; 1177 port->full_duplex = 1; 1178 break; 1179 case H_SPEED_1G_F: 1180 port->port_speed = EHEA_SPEED_1G; 1181 port->full_duplex = 1; 1182 break; 1183 case H_SPEED_10G_F: 1184 port->port_speed = EHEA_SPEED_10G; 1185 port->full_duplex = 1; 1186 break; 1187 default: 1188 port->port_speed = 0; 1189 port->full_duplex = 0; 1190 break; 1191 } 1192 } else { 1193 pr_err("Failed sensing port speed\n"); 1194 ret = -EIO; 1195 } 1196 } else { 1197 if (hret == H_AUTHORITY) { 1198 pr_info("Hypervisor denied setting port speed\n"); 1199 ret = -EPERM; 1200 } else { 1201 ret = -EIO; 1202 pr_err("Failed setting port speed\n"); 1203 } 1204 } 1205 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) 1206 netif_carrier_on(port->netdev); 1207 1208 free_page((unsigned long)cb4); 1209out: 1210 return ret; 1211} 1212 1213static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) 1214{ 1215 int ret; 1216 u8 ec; 1217 u8 portnum; 1218 struct ehea_port *port; 1219 struct net_device *dev; 1220 1221 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); 1222 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); 1223 port = ehea_get_port(adapter, portnum); 1224 dev = port->netdev; 1225 1226 switch (ec) { 1227 case EHEA_EC_PORTSTATE_CHG: /* port state change */ 1228 1229 if (!port) { 1230 netdev_err(dev, "unknown portnum %x\n", portnum); 1231 break; 1232 } 1233 1234 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { 1235 if (!netif_carrier_ok(dev)) { 1236 ret = ehea_sense_port_attr(port); 1237 if (ret) { 1238 netdev_err(dev, "failed resensing port attributes\n"); 1239 break; 1240 } 1241 1242 netif_info(port, link, dev, 1243 "Logical port up: %dMbps %s Duplex\n", 1244 port->port_speed, 1245 port->full_duplex == 1 ? 1246 "Full" : "Half"); 1247 1248 netif_carrier_on(dev); 1249 netif_wake_queue(dev); 1250 } 1251 } else 1252 if (netif_carrier_ok(dev)) { 1253 netif_info(port, link, dev, 1254 "Logical port down\n"); 1255 netif_carrier_off(dev); 1256 netif_stop_queue(dev); 1257 } 1258 1259 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) { 1260 port->phy_link = EHEA_PHY_LINK_UP; 1261 netif_info(port, link, dev, 1262 "Physical port up\n"); 1263 if (prop_carrier_state) 1264 netif_carrier_on(dev); 1265 } else { 1266 port->phy_link = EHEA_PHY_LINK_DOWN; 1267 netif_info(port, link, dev, 1268 "Physical port down\n"); 1269 if (prop_carrier_state) 1270 netif_carrier_off(dev); 1271 } 1272 1273 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe)) 1274 netdev_info(dev, 1275 "External switch port is primary port\n"); 1276 else 1277 netdev_info(dev, 1278 "External switch port is backup port\n"); 1279 1280 break; 1281 case EHEA_EC_ADAPTER_MALFUNC: 1282 netdev_err(dev, "Adapter malfunction\n"); 1283 break; 1284 case EHEA_EC_PORT_MALFUNC: 1285 netdev_info(dev, "Port malfunction\n"); 1286 netif_carrier_off(dev); 1287 netif_stop_queue(dev); 1288 break; 1289 default: 1290 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe); 1291 break; 1292 } 1293} 1294 1295static void ehea_neq_tasklet(unsigned long data) 1296{ 1297 struct ehea_adapter *adapter = (struct ehea_adapter *)data; 1298 struct ehea_eqe *eqe; 1299 u64 event_mask; 1300 1301 eqe = ehea_poll_eq(adapter->neq); 1302 pr_debug("eqe=%p\n", eqe); 1303 1304 while (eqe) { 1305 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry); 1306 ehea_parse_eqe(adapter, eqe->entry); 1307 eqe = ehea_poll_eq(adapter->neq); 1308 pr_debug("next eqe=%p\n", eqe); 1309 } 1310 1311 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1) 1312 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1) 1313 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1); 1314 1315 ehea_h_reset_events(adapter->handle, 1316 adapter->neq->fw_handle, event_mask); 1317} 1318 1319static irqreturn_t ehea_interrupt_neq(int irq, void *param) 1320{ 1321 struct ehea_adapter *adapter = param; 1322 tasklet_hi_schedule(&adapter->neq_tasklet); 1323 return IRQ_HANDLED; 1324} 1325 1326 1327static int ehea_fill_port_res(struct ehea_port_res *pr) 1328{ 1329 int ret; 1330 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; 1331 1332 ehea_init_fill_rq1(pr, pr->rq1_skba.len); 1333 1334 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); 1335 1336 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); 1337 1338 return ret; 1339} 1340 1341static int ehea_reg_interrupts(struct net_device *dev) 1342{ 1343 struct ehea_port *port = netdev_priv(dev); 1344 struct ehea_port_res *pr; 1345 int i, ret; 1346 1347 1348 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff", 1349 dev->name); 1350 1351 ret = ibmebus_request_irq(port->qp_eq->attr.ist1, 1352 ehea_qp_aff_irq_handler, 1353 IRQF_DISABLED, port->int_aff_name, port); 1354 if (ret) { 1355 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n", 1356 port->qp_eq->attr.ist1); 1357 goto out_free_qpeq; 1358 } 1359 1360 netif_info(port, ifup, dev, 1361 "irq_handle 0x%X for function qp_aff_irq_handler registered\n", 1362 port->qp_eq->attr.ist1); 1363 1364 1365 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 1366 pr = &port->port_res[i]; 1367 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, 1368 "%s-queue%d", dev->name, i); 1369 ret = ibmebus_request_irq(pr->eq->attr.ist1, 1370 ehea_recv_irq_handler, 1371 IRQF_DISABLED, pr->int_send_name, 1372 pr); 1373 if (ret) { 1374 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n", 1375 i, pr->eq->attr.ist1); 1376 goto out_free_req; 1377 } 1378 netif_info(port, ifup, dev, 1379 "irq_handle 0x%X for function ehea_queue_int %d registered\n", 1380 pr->eq->attr.ist1, i); 1381 } 1382out: 1383 return ret; 1384 1385 1386out_free_req: 1387 while (--i >= 0) { 1388 u32 ist = port->port_res[i].eq->attr.ist1; 1389 ibmebus_free_irq(ist, &port->port_res[i]); 1390 } 1391 1392out_free_qpeq: 1393 ibmebus_free_irq(port->qp_eq->attr.ist1, port); 1394 i = port->num_def_qps; 1395 1396 goto out; 1397 1398} 1399 1400static void ehea_free_interrupts(struct net_device *dev) 1401{ 1402 struct ehea_port *port = netdev_priv(dev); 1403 struct ehea_port_res *pr; 1404 int i; 1405 1406 /* send */ 1407 1408 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 1409 pr = &port->port_res[i]; 1410 ibmebus_free_irq(pr->eq->attr.ist1, pr); 1411 netif_info(port, intr, dev, 1412 "free send irq for res %d with handle 0x%X\n", 1413 i, pr->eq->attr.ist1); 1414 } 1415 1416 /* associated events */ 1417 ibmebus_free_irq(port->qp_eq->attr.ist1, port); 1418 netif_info(port, intr, dev, 1419 "associated event interrupt for handle 0x%X freed\n", 1420 port->qp_eq->attr.ist1); 1421} 1422 1423static int ehea_configure_port(struct ehea_port *port) 1424{ 1425 int ret, i; 1426 u64 hret, mask; 1427 struct hcp_ehea_port_cb0 *cb0; 1428 1429 ret = -ENOMEM; 1430 cb0 = (void *)get_zeroed_page(GFP_KERNEL); 1431 if (!cb0) 1432 goto out; 1433 1434 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1) 1435 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1) 1436 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1) 1437 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1) 1438 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER, 1439 PXLY_RC_VLAN_FILTER) 1440 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1); 1441 1442 for (i = 0; i < port->num_mcs; i++) 1443 if (use_mcs) 1444 cb0->default_qpn_arr[i] = 1445 port->port_res[i].qp->init_attr.qp_nr; 1446 else 1447 cb0->default_qpn_arr[i] = 1448 port->port_res[0].qp->init_attr.qp_nr; 1449 1450 if (netif_msg_ifup(port)) 1451 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port"); 1452 1453 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1) 1454 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1); 1455 1456 hret = ehea_h_modify_ehea_port(port->adapter->handle, 1457 port->logical_port_id, 1458 H_PORT_CB0, mask, cb0); 1459 ret = -EIO; 1460 if (hret != H_SUCCESS) 1461 goto out_free; 1462 1463 ret = 0; 1464 1465out_free: 1466 free_page((unsigned long)cb0); 1467out: 1468 return ret; 1469} 1470 1471int ehea_gen_smrs(struct ehea_port_res *pr) 1472{ 1473 int ret; 1474 struct ehea_adapter *adapter = pr->port->adapter; 1475 1476 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr); 1477 if (ret) 1478 goto out; 1479 1480 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr); 1481 if (ret) 1482 goto out_free; 1483 1484 return 0; 1485 1486out_free: 1487 ehea_rem_mr(&pr->send_mr); 1488out: 1489 pr_err("Generating SMRS failed\n"); 1490 return -EIO; 1491} 1492 1493int ehea_rem_smrs(struct ehea_port_res *pr) 1494{ 1495 if ((ehea_rem_mr(&pr->send_mr)) || 1496 (ehea_rem_mr(&pr->recv_mr))) 1497 return -EIO; 1498 else 1499 return 0; 1500} 1501 1502static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries) 1503{ 1504 int arr_size = sizeof(void *) * max_q_entries; 1505 1506 q_skba->arr = vzalloc(arr_size); 1507 if (!q_skba->arr) 1508 return -ENOMEM; 1509 1510 q_skba->len = max_q_entries; 1511 q_skba->index = 0; 1512 q_skba->os_skbs = 0; 1513 1514 return 0; 1515} 1516 1517static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, 1518 struct port_res_cfg *pr_cfg, int queue_token) 1519{ 1520 struct ehea_adapter *adapter = port->adapter; 1521 enum ehea_eq_type eq_type = EHEA_EQ; 1522 struct ehea_qp_init_attr *init_attr = NULL; 1523 int ret = -EIO; 1524 u64 tx_bytes, rx_bytes, tx_packets, rx_packets; 1525 1526 tx_bytes = pr->tx_bytes; 1527 tx_packets = pr->tx_packets; 1528 rx_bytes = pr->rx_bytes; 1529 rx_packets = pr->rx_packets; 1530 1531 memset(pr, 0, sizeof(struct ehea_port_res)); 1532 1533 pr->tx_bytes = rx_bytes; 1534 pr->tx_packets = tx_packets; 1535 pr->rx_bytes = rx_bytes; 1536 pr->rx_packets = rx_packets; 1537 1538 pr->port = port; 1539 spin_lock_init(&pr->xmit_lock); 1540 spin_lock_init(&pr->netif_queue); 1541 1542 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); 1543 if (!pr->eq) { 1544 pr_err("create_eq failed (eq)\n"); 1545 goto out_free; 1546 } 1547 1548 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, 1549 pr->eq->fw_handle, 1550 port->logical_port_id); 1551 if (!pr->recv_cq) { 1552 pr_err("create_cq failed (cq_recv)\n"); 1553 goto out_free; 1554 } 1555 1556 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, 1557 pr->eq->fw_handle, 1558 port->logical_port_id); 1559 if (!pr->send_cq) { 1560 pr_err("create_cq failed (cq_send)\n"); 1561 goto out_free; 1562 } 1563 1564 if (netif_msg_ifup(port)) 1565 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n", 1566 pr->send_cq->attr.act_nr_of_cqes, 1567 pr->recv_cq->attr.act_nr_of_cqes); 1568 1569 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); 1570 if (!init_attr) { 1571 ret = -ENOMEM; 1572 pr_err("no mem for ehea_qp_init_attr\n"); 1573 goto out_free; 1574 } 1575 1576 init_attr->low_lat_rq1 = 1; 1577 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */ 1578 init_attr->rq_count = 3; 1579 init_attr->qp_token = queue_token; 1580 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq; 1581 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1; 1582 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2; 1583 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3; 1584 init_attr->wqe_size_enc_sq = EHEA_SG_SQ; 1585 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1; 1586 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2; 1587 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3; 1588 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD; 1589 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD; 1590 init_attr->port_nr = port->logical_port_id; 1591 init_attr->send_cq_handle = pr->send_cq->fw_handle; 1592 init_attr->recv_cq_handle = pr->recv_cq->fw_handle; 1593 init_attr->aff_eq_handle = port->qp_eq->fw_handle; 1594 1595 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); 1596 if (!pr->qp) { 1597 pr_err("create_qp failed\n"); 1598 ret = -EIO; 1599 goto out_free; 1600 } 1601 1602 if (netif_msg_ifup(port)) 1603 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n", 1604 init_attr->qp_nr, 1605 init_attr->act_nr_send_wqes, 1606 init_attr->act_nr_rwqes_rq1, 1607 init_attr->act_nr_rwqes_rq2, 1608 init_attr->act_nr_rwqes_rq3); 1609 1610 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; 1611 1612 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size); 1613 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1); 1614 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1); 1615 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1); 1616 if (ret) 1617 goto out_free; 1618 1619 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10; 1620 if (ehea_gen_smrs(pr) != 0) { 1621 ret = -EIO; 1622 goto out_free; 1623 } 1624 1625 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); 1626 1627 kfree(init_attr); 1628 1629 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64); 1630 1631 pr->lro_mgr.max_aggr = pr->port->lro_max_aggr; 1632 pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; 1633 pr->lro_mgr.lro_arr = pr->lro_desc; 1634 pr->lro_mgr.get_skb_header = get_skb_hdr; 1635 pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; 1636 pr->lro_mgr.dev = port->netdev; 1637 pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; 1638 pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; 1639 1640 ret = 0; 1641 goto out; 1642 1643out_free: 1644 kfree(init_attr); 1645 vfree(pr->sq_skba.arr); 1646 vfree(pr->rq1_skba.arr); 1647 vfree(pr->rq2_skba.arr); 1648 vfree(pr->rq3_skba.arr); 1649 ehea_destroy_qp(pr->qp); 1650 ehea_destroy_cq(pr->send_cq); 1651 ehea_destroy_cq(pr->recv_cq); 1652 ehea_destroy_eq(pr->eq); 1653out: 1654 return ret; 1655} 1656 1657static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) 1658{ 1659 int ret, i; 1660 1661 if (pr->qp) 1662 netif_napi_del(&pr->napi); 1663 1664 ret = ehea_destroy_qp(pr->qp); 1665 1666 if (!ret) { 1667 ehea_destroy_cq(pr->send_cq); 1668 ehea_destroy_cq(pr->recv_cq); 1669 ehea_destroy_eq(pr->eq); 1670 1671 for (i = 0; i < pr->rq1_skba.len; i++) 1672 if (pr->rq1_skba.arr[i]) 1673 dev_kfree_skb(pr->rq1_skba.arr[i]); 1674 1675 for (i = 0; i < pr->rq2_skba.len; i++) 1676 if (pr->rq2_skba.arr[i]) 1677 dev_kfree_skb(pr->rq2_skba.arr[i]); 1678 1679 for (i = 0; i < pr->rq3_skba.len; i++) 1680 if (pr->rq3_skba.arr[i]) 1681 dev_kfree_skb(pr->rq3_skba.arr[i]); 1682 1683 for (i = 0; i < pr->sq_skba.len; i++) 1684 if (pr->sq_skba.arr[i]) 1685 dev_kfree_skb(pr->sq_skba.arr[i]); 1686 1687 vfree(pr->rq1_skba.arr); 1688 vfree(pr->rq2_skba.arr); 1689 vfree(pr->rq3_skba.arr); 1690 vfree(pr->sq_skba.arr); 1691 ret = ehea_rem_smrs(pr); 1692 } 1693 return ret; 1694} 1695 1696/* 1697 * The write_* functions store information in swqe which is used by 1698 * the hardware to calculate the ip/tcp/udp checksum 1699 */ 1700 1701static inline void write_ip_start_end(struct ehea_swqe *swqe, 1702 const struct sk_buff *skb) 1703{ 1704 swqe->ip_start = skb_network_offset(skb); 1705 swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1); 1706} 1707 1708static inline void write_tcp_offset_end(struct ehea_swqe *swqe, 1709 const struct sk_buff *skb) 1710{ 1711 swqe->tcp_offset = 1712 (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check)); 1713 1714 swqe->tcp_end = (u16)skb->len - 1; 1715} 1716 1717static inline void write_udp_offset_end(struct ehea_swqe *swqe, 1718 const struct sk_buff *skb) 1719{ 1720 swqe->tcp_offset = 1721 (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check)); 1722 1723 swqe->tcp_end = (u16)skb->len - 1; 1724} 1725 1726 1727static void write_swqe2_TSO(struct sk_buff *skb, 1728 struct ehea_swqe *swqe, u32 lkey) 1729{ 1730 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; 1731 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; 1732 int skb_data_size = skb_headlen(skb); 1733 int headersize; 1734 1735 /* Packet is TCP with TSO enabled */ 1736 swqe->tx_control |= EHEA_SWQE_TSO; 1737 swqe->mss = skb_shinfo(skb)->gso_size; 1738 /* copy only eth/ip/tcp headers to immediate data and 1739 * the rest of skb->data to sg1entry 1740 */ 1741 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); 1742 1743 skb_data_size = skb_headlen(skb); 1744 1745 if (skb_data_size >= headersize) { 1746 /* copy immediate data */ 1747 skb_copy_from_linear_data(skb, imm_data, headersize); 1748 swqe->immediate_data_length = headersize; 1749 1750 if (skb_data_size > headersize) { 1751 /* set sg1entry data */ 1752 sg1entry->l_key = lkey; 1753 sg1entry->len = skb_data_size - headersize; 1754 sg1entry->vaddr = 1755 ehea_map_vaddr(skb->data + headersize); 1756 swqe->descriptors++; 1757 } 1758 } else 1759 pr_err("cannot handle fragmented headers\n"); 1760} 1761 1762static void write_swqe2_nonTSO(struct sk_buff *skb, 1763 struct ehea_swqe *swqe, u32 lkey) 1764{ 1765 int skb_data_size = skb_headlen(skb); 1766 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; 1767 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; 1768 1769 /* Packet is any nonTSO type 1770 * 1771 * Copy as much as possible skb->data to immediate data and 1772 * the rest to sg1entry 1773 */ 1774 if (skb_data_size >= SWQE2_MAX_IMM) { 1775 /* copy immediate data */ 1776 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM); 1777 1778 swqe->immediate_data_length = SWQE2_MAX_IMM; 1779 1780 if (skb_data_size > SWQE2_MAX_IMM) { 1781 /* copy sg1entry data */ 1782 sg1entry->l_key = lkey; 1783 sg1entry->len = skb_data_size - SWQE2_MAX_IMM; 1784 sg1entry->vaddr = 1785 ehea_map_vaddr(skb->data + SWQE2_MAX_IMM); 1786 swqe->descriptors++; 1787 } 1788 } else { 1789 skb_copy_from_linear_data(skb, imm_data, skb_data_size); 1790 swqe->immediate_data_length = skb_data_size; 1791 } 1792} 1793 1794static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, 1795 struct ehea_swqe *swqe, u32 lkey) 1796{ 1797 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry; 1798 skb_frag_t *frag; 1799 int nfrags, sg1entry_contains_frag_data, i; 1800 1801 nfrags = skb_shinfo(skb)->nr_frags; 1802 sg1entry = &swqe->u.immdata_desc.sg_entry; 1803 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list; 1804 swqe->descriptors = 0; 1805 sg1entry_contains_frag_data = 0; 1806 1807 if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size) 1808 write_swqe2_TSO(skb, swqe, lkey); 1809 else 1810 write_swqe2_nonTSO(skb, swqe, lkey); 1811 1812 /* write descriptors */ 1813 if (nfrags > 0) { 1814 if (swqe->descriptors == 0) { 1815 /* sg1entry not yet used */ 1816 frag = &skb_shinfo(skb)->frags[0]; 1817 1818 /* copy sg1entry data */ 1819 sg1entry->l_key = lkey; 1820 sg1entry->len = frag->size; 1821 sg1entry->vaddr = 1822 ehea_map_vaddr(page_address(frag->page) 1823 + frag->page_offset); 1824 swqe->descriptors++; 1825 sg1entry_contains_frag_data = 1; 1826 } 1827 1828 for (i = sg1entry_contains_frag_data; i < nfrags; i++) { 1829 1830 frag = &skb_shinfo(skb)->frags[i]; 1831 sgentry = &sg_list[i - sg1entry_contains_frag_data]; 1832 1833 sgentry->l_key = lkey; 1834 sgentry->len = frag->size; 1835 sgentry->vaddr = 1836 ehea_map_vaddr(page_address(frag->page) 1837 + frag->page_offset); 1838 swqe->descriptors++; 1839 } 1840 } 1841} 1842 1843static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid) 1844{ 1845 int ret = 0; 1846 u64 hret; 1847 u8 reg_type; 1848 1849 /* De/Register untagged packets */ 1850 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED; 1851 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, 1852 port->logical_port_id, 1853 reg_type, port->mac_addr, 0, hcallid); 1854 if (hret != H_SUCCESS) { 1855 pr_err("%sregistering bc address failed (tagged)\n", 1856 hcallid == H_REG_BCMC ? "" : "de"); 1857 ret = -EIO; 1858 goto out_herr; 1859 } 1860 1861 /* De/Register VLAN packets */ 1862 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL; 1863 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, 1864 port->logical_port_id, 1865 reg_type, port->mac_addr, 0, hcallid); 1866 if (hret != H_SUCCESS) { 1867 pr_err("%sregistering bc address failed (vlan)\n", 1868 hcallid == H_REG_BCMC ? "" : "de"); 1869 ret = -EIO; 1870 } 1871out_herr: 1872 return ret; 1873} 1874 1875static int ehea_set_mac_addr(struct net_device *dev, void *sa) 1876{ 1877 struct ehea_port *port = netdev_priv(dev); 1878 struct sockaddr *mac_addr = sa; 1879 struct hcp_ehea_port_cb0 *cb0; 1880 int ret; 1881 u64 hret; 1882 1883 if (!is_valid_ether_addr(mac_addr->sa_data)) { 1884 ret = -EADDRNOTAVAIL; 1885 goto out; 1886 } 1887 1888 cb0 = (void *)get_zeroed_page(GFP_KERNEL); 1889 if (!cb0) { 1890 pr_err("no mem for cb0\n"); 1891 ret = -ENOMEM; 1892 goto out; 1893 } 1894 1895 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN); 1896 1897 cb0->port_mac_addr = cb0->port_mac_addr >> 16; 1898 1899 hret = ehea_h_modify_ehea_port(port->adapter->handle, 1900 port->logical_port_id, H_PORT_CB0, 1901 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0); 1902 if (hret != H_SUCCESS) { 1903 ret = -EIO; 1904 goto out_free; 1905 } 1906 1907 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); 1908 1909 /* Deregister old MAC in pHYP */ 1910 if (port->state == EHEA_PORT_UP) { 1911 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 1912 if (ret) 1913 goto out_upregs; 1914 } 1915 1916 port->mac_addr = cb0->port_mac_addr << 16; 1917 1918 /* Register new MAC in pHYP */ 1919 if (port->state == EHEA_PORT_UP) { 1920 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); 1921 if (ret) 1922 goto out_upregs; 1923 } 1924 1925 ret = 0; 1926 1927out_upregs: 1928 ehea_update_bcmc_registrations(); 1929out_free: 1930 free_page((unsigned long)cb0); 1931out: 1932 return ret; 1933} 1934 1935static void ehea_promiscuous_error(u64 hret, int enable) 1936{ 1937 if (hret == H_AUTHORITY) 1938 pr_info("Hypervisor denied %sabling promiscuous mode\n", 1939 enable == 1 ? "en" : "dis"); 1940 else 1941 pr_err("failed %sabling promiscuous mode\n", 1942 enable == 1 ? "en" : "dis"); 1943} 1944 1945static void ehea_promiscuous(struct net_device *dev, int enable) 1946{ 1947 struct ehea_port *port = netdev_priv(dev); 1948 struct hcp_ehea_port_cb7 *cb7; 1949 u64 hret; 1950 1951 if (enable == port->promisc) 1952 return; 1953 1954 cb7 = (void *)get_zeroed_page(GFP_ATOMIC); 1955 if (!cb7) { 1956 pr_err("no mem for cb7\n"); 1957 goto out; 1958 } 1959 1960 /* Modify Pxs_DUCQPN in CB7 */ 1961 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0; 1962 1963 hret = ehea_h_modify_ehea_port(port->adapter->handle, 1964 port->logical_port_id, 1965 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7); 1966 if (hret) { 1967 ehea_promiscuous_error(hret, enable); 1968 goto out; 1969 } 1970 1971 port->promisc = enable; 1972out: 1973 free_page((unsigned long)cb7); 1974} 1975 1976static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr, 1977 u32 hcallid) 1978{ 1979 u64 hret; 1980 u8 reg_type; 1981 1982 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST 1983 | EHEA_BCMC_UNTAGGED; 1984 1985 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, 1986 port->logical_port_id, 1987 reg_type, mc_mac_addr, 0, hcallid); 1988 if (hret) 1989 goto out; 1990 1991 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST 1992 | EHEA_BCMC_VLANID_ALL; 1993 1994 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, 1995 port->logical_port_id, 1996 reg_type, mc_mac_addr, 0, hcallid); 1997out: 1998 return hret; 1999} 2000 2001static int ehea_drop_multicast_list(struct net_device *dev) 2002{ 2003 struct ehea_port *port = netdev_priv(dev); 2004 struct ehea_mc_list *mc_entry = port->mc_list; 2005 struct list_head *pos; 2006 struct list_head *temp; 2007 int ret = 0; 2008 u64 hret; 2009 2010 list_for_each_safe(pos, temp, &(port->mc_list->list)) { 2011 mc_entry = list_entry(pos, struct ehea_mc_list, list); 2012 2013 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr, 2014 H_DEREG_BCMC); 2015 if (hret) { 2016 pr_err("failed deregistering mcast MAC\n"); 2017 ret = -EIO; 2018 } 2019 2020 list_del(pos); 2021 kfree(mc_entry); 2022 } 2023 return ret; 2024} 2025 2026static void ehea_allmulti(struct net_device *dev, int enable) 2027{ 2028 struct ehea_port *port = netdev_priv(dev); 2029 u64 hret; 2030 2031 if (!port->allmulti) { 2032 if (enable) { 2033 /* Enable ALLMULTI */ 2034 ehea_drop_multicast_list(dev); 2035 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC); 2036 if (!hret) 2037 port->allmulti = 1; 2038 else 2039 netdev_err(dev, 2040 "failed enabling IFF_ALLMULTI\n"); 2041 } 2042 } else 2043 if (!enable) { 2044 /* Disable ALLMULTI */ 2045 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC); 2046 if (!hret) 2047 port->allmulti = 0; 2048 else 2049 netdev_err(dev, 2050 "failed disabling IFF_ALLMULTI\n"); 2051 } 2052} 2053 2054static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) 2055{ 2056 struct ehea_mc_list *ehea_mcl_entry; 2057 u64 hret; 2058 2059 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC); 2060 if (!ehea_mcl_entry) { 2061 pr_err("no mem for mcl_entry\n"); 2062 return; 2063 } 2064 2065 INIT_LIST_HEAD(&ehea_mcl_entry->list); 2066 2067 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN); 2068 2069 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr, 2070 H_REG_BCMC); 2071 if (!hret) 2072 list_add(&ehea_mcl_entry->list, &port->mc_list->list); 2073 else { 2074 pr_err("failed registering mcast MAC\n"); 2075 kfree(ehea_mcl_entry); 2076 } 2077} 2078 2079static void ehea_set_multicast_list(struct net_device *dev) 2080{ 2081 struct ehea_port *port = netdev_priv(dev); 2082 struct netdev_hw_addr *ha; 2083 int ret; 2084 2085 if (dev->flags & IFF_PROMISC) { 2086 ehea_promiscuous(dev, 1); 2087 return; 2088 } 2089 ehea_promiscuous(dev, 0); 2090 2091 if (dev->flags & IFF_ALLMULTI) { 2092 ehea_allmulti(dev, 1); 2093 goto out; 2094 } 2095 ehea_allmulti(dev, 0); 2096 2097 if (!netdev_mc_empty(dev)) { 2098 ret = ehea_drop_multicast_list(dev); 2099 if (ret) { 2100 /* Dropping the current multicast list failed. 2101 * Enabling ALL_MULTI is the best we can do. 2102 */ 2103 ehea_allmulti(dev, 1); 2104 } 2105 2106 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) { 2107 pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n", 2108 port->adapter->max_mc_mac); 2109 goto out; 2110 } 2111 2112 netdev_for_each_mc_addr(ha, dev) 2113 ehea_add_multicast_entry(port, ha->addr); 2114 2115 } 2116out: 2117 ehea_update_bcmc_registrations(); 2118} 2119 2120static int ehea_change_mtu(struct net_device *dev, int new_mtu) 2121{ 2122 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE)) 2123 return -EINVAL; 2124 dev->mtu = new_mtu; 2125 return 0; 2126} 2127 2128static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, 2129 struct ehea_swqe *swqe, u32 lkey) 2130{ 2131 if (skb->protocol == htons(ETH_P_IP)) { 2132 const struct iphdr *iph = ip_hdr(skb); 2133 2134 /* IPv4 */ 2135 swqe->tx_control |= EHEA_SWQE_CRC 2136 | EHEA_SWQE_IP_CHECKSUM 2137 | EHEA_SWQE_TCP_CHECKSUM 2138 | EHEA_SWQE_IMM_DATA_PRESENT 2139 | EHEA_SWQE_DESCRIPTORS_PRESENT; 2140 2141 write_ip_start_end(swqe, skb); 2142 2143 if (iph->protocol == IPPROTO_UDP) { 2144 if ((iph->frag_off & IP_MF) || 2145 (iph->frag_off & IP_OFFSET)) 2146 /* IP fragment, so don't change cs */ 2147 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; 2148 else 2149 write_udp_offset_end(swqe, skb); 2150 } else if (iph->protocol == IPPROTO_TCP) { 2151 write_tcp_offset_end(swqe, skb); 2152 } 2153 2154 /* icmp (big data) and ip segmentation packets (all other ip 2155 packets) do not require any special handling */ 2156 2157 } else { 2158 /* Other Ethernet Protocol */ 2159 swqe->tx_control |= EHEA_SWQE_CRC 2160 | EHEA_SWQE_IMM_DATA_PRESENT 2161 | EHEA_SWQE_DESCRIPTORS_PRESENT; 2162 } 2163 2164 write_swqe2_data(skb, dev, swqe, lkey); 2165} 2166 2167static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, 2168 struct ehea_swqe *swqe) 2169{ 2170 int nfrags = skb_shinfo(skb)->nr_frags; 2171 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0]; 2172 skb_frag_t *frag; 2173 int i; 2174 2175 if (skb->protocol == htons(ETH_P_IP)) { 2176 const struct iphdr *iph = ip_hdr(skb); 2177 2178 /* IPv4 */ 2179 write_ip_start_end(swqe, skb); 2180 2181 if (iph->protocol == IPPROTO_TCP) { 2182 swqe->tx_control |= EHEA_SWQE_CRC 2183 | EHEA_SWQE_IP_CHECKSUM 2184 | EHEA_SWQE_TCP_CHECKSUM 2185 | EHEA_SWQE_IMM_DATA_PRESENT; 2186 2187 write_tcp_offset_end(swqe, skb); 2188 2189 } else if (iph->protocol == IPPROTO_UDP) { 2190 if ((iph->frag_off & IP_MF) || 2191 (iph->frag_off & IP_OFFSET)) 2192 /* IP fragment, so don't change cs */ 2193 swqe->tx_control |= EHEA_SWQE_CRC 2194 | EHEA_SWQE_IMM_DATA_PRESENT; 2195 else { 2196 swqe->tx_control |= EHEA_SWQE_CRC 2197 | EHEA_SWQE_IP_CHECKSUM 2198 | EHEA_SWQE_TCP_CHECKSUM 2199 | EHEA_SWQE_IMM_DATA_PRESENT; 2200 2201 write_udp_offset_end(swqe, skb); 2202 } 2203 } else { 2204 /* icmp (big data) and 2205 ip segmentation packets (all other ip packets) */ 2206 swqe->tx_control |= EHEA_SWQE_CRC 2207 | EHEA_SWQE_IP_CHECKSUM 2208 | EHEA_SWQE_IMM_DATA_PRESENT; 2209 } 2210 } else { 2211 /* Other Ethernet Protocol */ 2212 swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT; 2213 } 2214 /* copy (immediate) data */ 2215 if (nfrags == 0) { 2216 /* data is in a single piece */ 2217 skb_copy_from_linear_data(skb, imm_data, skb->len); 2218 } else { 2219 /* first copy data from the skb->data buffer ... */ 2220 skb_copy_from_linear_data(skb, imm_data, 2221 skb_headlen(skb)); 2222 imm_data += skb_headlen(skb); 2223 2224 /* ... then copy data from the fragments */ 2225 for (i = 0; i < nfrags; i++) { 2226 frag = &skb_shinfo(skb)->frags[i]; 2227 memcpy(imm_data, 2228 page_address(frag->page) + frag->page_offset, 2229 frag->size); 2230 imm_data += frag->size; 2231 } 2232 } 2233 swqe->immediate_data_length = skb->len; 2234 dev_kfree_skb(skb); 2235} 2236 2237static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps) 2238{ 2239 struct tcphdr *tcp; 2240 u32 tmp; 2241 2242 if ((skb->protocol == htons(ETH_P_IP)) && 2243 (ip_hdr(skb)->protocol == IPPROTO_TCP)) { 2244 tcp = (struct tcphdr *)(skb_network_header(skb) + 2245 (ip_hdr(skb)->ihl * 4)); 2246 tmp = (tcp->source + (tcp->dest << 16)) % 31; 2247 tmp += ip_hdr(skb)->daddr % 31; 2248 return tmp % num_qps; 2249 } else 2250 return 0; 2251} 2252 2253static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) 2254{ 2255 struct ehea_port *port = netdev_priv(dev); 2256 struct ehea_swqe *swqe; 2257 unsigned long flags; 2258 u32 lkey; 2259 int swqe_index; 2260 struct ehea_port_res *pr; 2261 2262 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)]; 2263 2264 if (!spin_trylock(&pr->xmit_lock)) 2265 return NETDEV_TX_BUSY; 2266 2267 if (pr->queue_stopped) { 2268 spin_unlock(&pr->xmit_lock); 2269 return NETDEV_TX_BUSY; 2270 } 2271 2272 swqe = ehea_get_swqe(pr->qp, &swqe_index); 2273 memset(swqe, 0, SWQE_HEADER_SIZE); 2274 atomic_dec(&pr->swqe_avail); 2275 2276 if (vlan_tx_tag_present(skb)) { 2277 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT; 2278 swqe->vlan_tag = vlan_tx_tag_get(skb); 2279 } 2280 2281 pr->tx_packets++; 2282 pr->tx_bytes += skb->len; 2283 2284 if (skb->len <= SWQE3_MAX_IMM) { 2285 u32 sig_iv = port->sig_comp_iv; 2286 u32 swqe_num = pr->swqe_id_counter; 2287 ehea_xmit3(skb, dev, swqe); 2288 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE) 2289 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num); 2290 if (pr->swqe_ll_count >= (sig_iv - 1)) { 2291 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 2292 sig_iv); 2293 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; 2294 pr->swqe_ll_count = 0; 2295 } else 2296 pr->swqe_ll_count += 1; 2297 } else { 2298 swqe->wr_id = 2299 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE) 2300 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) 2301 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1) 2302 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); 2303 pr->sq_skba.arr[pr->sq_skba.index] = skb; 2304 2305 pr->sq_skba.index++; 2306 pr->sq_skba.index &= (pr->sq_skba.len - 1); 2307 2308 lkey = pr->send_mr.lkey; 2309 ehea_xmit2(skb, dev, swqe, lkey); 2310 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; 2311 } 2312 pr->swqe_id_counter += 1; 2313 2314 netif_info(port, tx_queued, dev, 2315 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr); 2316 if (netif_msg_tx_queued(port)) 2317 ehea_dump(swqe, 512, "swqe"); 2318 2319 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { 2320 netif_stop_queue(dev); 2321 swqe->tx_control |= EHEA_SWQE_PURGE; 2322 } 2323 2324 ehea_post_swqe(pr->qp, swqe); 2325 2326 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { 2327 spin_lock_irqsave(&pr->netif_queue, flags); 2328 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { 2329 pr->p_stats.queue_stopped++; 2330 netif_stop_queue(dev); 2331 pr->queue_stopped = 1; 2332 } 2333 spin_unlock_irqrestore(&pr->netif_queue, flags); 2334 } 2335 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ 2336 spin_unlock(&pr->xmit_lock); 2337 2338 return NETDEV_TX_OK; 2339} 2340 2341static void ehea_vlan_rx_register(struct net_device *dev, 2342 struct vlan_group *grp) 2343{ 2344 struct ehea_port *port = netdev_priv(dev); 2345 struct ehea_adapter *adapter = port->adapter; 2346 struct hcp_ehea_port_cb1 *cb1; 2347 u64 hret; 2348 2349 port->vgrp = grp; 2350 2351 cb1 = (void *)get_zeroed_page(GFP_KERNEL); 2352 if (!cb1) { 2353 pr_err("no mem for cb1\n"); 2354 goto out; 2355 } 2356 2357 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2358 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2359 if (hret != H_SUCCESS) 2360 pr_err("modify_ehea_port failed\n"); 2361 2362 free_page((unsigned long)cb1); 2363out: 2364 return; 2365} 2366 2367static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 2368{ 2369 struct ehea_port *port = netdev_priv(dev); 2370 struct ehea_adapter *adapter = port->adapter; 2371 struct hcp_ehea_port_cb1 *cb1; 2372 int index; 2373 u64 hret; 2374 2375 cb1 = (void *)get_zeroed_page(GFP_KERNEL); 2376 if (!cb1) { 2377 pr_err("no mem for cb1\n"); 2378 goto out; 2379 } 2380 2381 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, 2382 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2383 if (hret != H_SUCCESS) { 2384 pr_err("query_ehea_port failed\n"); 2385 goto out; 2386 } 2387 2388 index = (vid / 64); 2389 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F))); 2390 2391 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2392 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2393 if (hret != H_SUCCESS) 2394 pr_err("modify_ehea_port failed\n"); 2395out: 2396 free_page((unsigned long)cb1); 2397 return; 2398} 2399 2400static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 2401{ 2402 struct ehea_port *port = netdev_priv(dev); 2403 struct ehea_adapter *adapter = port->adapter; 2404 struct hcp_ehea_port_cb1 *cb1; 2405 int index; 2406 u64 hret; 2407 2408 vlan_group_set_device(port->vgrp, vid, NULL); 2409 2410 cb1 = (void *)get_zeroed_page(GFP_KERNEL); 2411 if (!cb1) { 2412 pr_err("no mem for cb1\n"); 2413 goto out; 2414 } 2415 2416 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, 2417 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2418 if (hret != H_SUCCESS) { 2419 pr_err("query_ehea_port failed\n"); 2420 goto out; 2421 } 2422 2423 index = (vid / 64); 2424 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F))); 2425 2426 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2427 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2428 if (hret != H_SUCCESS) 2429 pr_err("modify_ehea_port failed\n"); 2430out: 2431 free_page((unsigned long)cb1); 2432} 2433 2434int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) 2435{ 2436 int ret = -EIO; 2437 u64 hret; 2438 u16 dummy16 = 0; 2439 u64 dummy64 = 0; 2440 struct hcp_modify_qp_cb0 *cb0; 2441 2442 cb0 = (void *)get_zeroed_page(GFP_KERNEL); 2443 if (!cb0) { 2444 ret = -ENOMEM; 2445 goto out; 2446 } 2447 2448 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2449 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2450 if (hret != H_SUCCESS) { 2451 pr_err("query_ehea_qp failed (1)\n"); 2452 goto out; 2453 } 2454 2455 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED; 2456 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, 2457 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, 2458 &dummy64, &dummy64, &dummy16, &dummy16); 2459 if (hret != H_SUCCESS) { 2460 pr_err("modify_ehea_qp failed (1)\n"); 2461 goto out; 2462 } 2463 2464 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2465 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2466 if (hret != H_SUCCESS) { 2467 pr_err("query_ehea_qp failed (2)\n"); 2468 goto out; 2469 } 2470 2471 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED; 2472 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, 2473 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, 2474 &dummy64, &dummy64, &dummy16, &dummy16); 2475 if (hret != H_SUCCESS) { 2476 pr_err("modify_ehea_qp failed (2)\n"); 2477 goto out; 2478 } 2479 2480 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2481 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2482 if (hret != H_SUCCESS) { 2483 pr_err("query_ehea_qp failed (3)\n"); 2484 goto out; 2485 } 2486 2487 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND; 2488 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, 2489 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, 2490 &dummy64, &dummy64, &dummy16, &dummy16); 2491 if (hret != H_SUCCESS) { 2492 pr_err("modify_ehea_qp failed (3)\n"); 2493 goto out; 2494 } 2495 2496 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2497 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2498 if (hret != H_SUCCESS) { 2499 pr_err("query_ehea_qp failed (4)\n"); 2500 goto out; 2501 } 2502 2503 ret = 0; 2504out: 2505 free_page((unsigned long)cb0); 2506 return ret; 2507} 2508 2509static int ehea_port_res_setup(struct ehea_port *port, int def_qps, 2510 int add_tx_qps) 2511{ 2512 int ret, i; 2513 struct port_res_cfg pr_cfg, pr_cfg_small_rx; 2514 enum ehea_eq_type eq_type = EHEA_EQ; 2515 2516 port->qp_eq = ehea_create_eq(port->adapter, eq_type, 2517 EHEA_MAX_ENTRIES_EQ, 1); 2518 if (!port->qp_eq) { 2519 ret = -EINVAL; 2520 pr_err("ehea_create_eq failed (qp_eq)\n"); 2521 goto out_kill_eq; 2522 } 2523 2524 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries; 2525 pr_cfg.max_entries_scq = sq_entries * 2; 2526 pr_cfg.max_entries_sq = sq_entries; 2527 pr_cfg.max_entries_rq1 = rq1_entries; 2528 pr_cfg.max_entries_rq2 = rq2_entries; 2529 pr_cfg.max_entries_rq3 = rq3_entries; 2530 2531 pr_cfg_small_rx.max_entries_rcq = 1; 2532 pr_cfg_small_rx.max_entries_scq = sq_entries; 2533 pr_cfg_small_rx.max_entries_sq = sq_entries; 2534 pr_cfg_small_rx.max_entries_rq1 = 1; 2535 pr_cfg_small_rx.max_entries_rq2 = 1; 2536 pr_cfg_small_rx.max_entries_rq3 = 1; 2537 2538 for (i = 0; i < def_qps; i++) { 2539 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i); 2540 if (ret) 2541 goto out_clean_pr; 2542 } 2543 for (i = def_qps; i < def_qps + add_tx_qps; i++) { 2544 ret = ehea_init_port_res(port, &port->port_res[i], 2545 &pr_cfg_small_rx, i); 2546 if (ret) 2547 goto out_clean_pr; 2548 } 2549 2550 return 0; 2551 2552out_clean_pr: 2553 while (--i >= 0) 2554 ehea_clean_portres(port, &port->port_res[i]); 2555 2556out_kill_eq: 2557 ehea_destroy_eq(port->qp_eq); 2558 return ret; 2559} 2560 2561static int ehea_clean_all_portres(struct ehea_port *port) 2562{ 2563 int ret = 0; 2564 int i; 2565 2566 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) 2567 ret |= ehea_clean_portres(port, &port->port_res[i]); 2568 2569 ret |= ehea_destroy_eq(port->qp_eq); 2570 2571 return ret; 2572} 2573 2574static void ehea_remove_adapter_mr(struct ehea_adapter *adapter) 2575{ 2576 if (adapter->active_ports) 2577 return; 2578 2579 ehea_rem_mr(&adapter->mr); 2580} 2581 2582static int ehea_add_adapter_mr(struct ehea_adapter *adapter) 2583{ 2584 if (adapter->active_ports) 2585 return 0; 2586 2587 return ehea_reg_kernel_mr(adapter, &adapter->mr); 2588} 2589 2590static int ehea_up(struct net_device *dev) 2591{ 2592 int ret, i; 2593 struct ehea_port *port = netdev_priv(dev); 2594 2595 if (port->state == EHEA_PORT_UP) 2596 return 0; 2597 2598 ret = ehea_port_res_setup(port, port->num_def_qps, 2599 port->num_add_tx_qps); 2600 if (ret) { 2601 netdev_err(dev, "port_res_failed\n"); 2602 goto out; 2603 } 2604 2605 /* Set default QP for this port */ 2606 ret = ehea_configure_port(port); 2607 if (ret) { 2608 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret); 2609 goto out_clean_pr; 2610 } 2611 2612 ret = ehea_reg_interrupts(dev); 2613 if (ret) { 2614 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret); 2615 goto out_clean_pr; 2616 } 2617 2618 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 2619 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); 2620 if (ret) { 2621 netdev_err(dev, "activate_qp failed\n"); 2622 goto out_free_irqs; 2623 } 2624 } 2625 2626 for (i = 0; i < port->num_def_qps; i++) { 2627 ret = ehea_fill_port_res(&port->port_res[i]); 2628 if (ret) { 2629 netdev_err(dev, "out_free_irqs\n"); 2630 goto out_free_irqs; 2631 } 2632 } 2633 2634 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); 2635 if (ret) { 2636 ret = -EIO; 2637 goto out_free_irqs; 2638 } 2639 2640 port->state = EHEA_PORT_UP; 2641 2642 ret = 0; 2643 goto out; 2644 2645out_free_irqs: 2646 ehea_free_interrupts(dev); 2647 2648out_clean_pr: 2649 ehea_clean_all_portres(port); 2650out: 2651 if (ret) 2652 netdev_info(dev, "Failed starting. ret=%i\n", ret); 2653 2654 ehea_update_bcmc_registrations(); 2655 ehea_update_firmware_handles(); 2656 2657 return ret; 2658} 2659 2660static void port_napi_disable(struct ehea_port *port) 2661{ 2662 int i; 2663 2664 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) 2665 napi_disable(&port->port_res[i].napi); 2666} 2667 2668static void port_napi_enable(struct ehea_port *port) 2669{ 2670 int i; 2671 2672 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) 2673 napi_enable(&port->port_res[i].napi); 2674} 2675 2676static int ehea_open(struct net_device *dev) 2677{ 2678 int ret; 2679 struct ehea_port *port = netdev_priv(dev); 2680 2681 mutex_lock(&port->port_lock); 2682 2683 netif_info(port, ifup, dev, "enabling port\n"); 2684 2685 ret = ehea_up(dev); 2686 if (!ret) { 2687 port_napi_enable(port); 2688 netif_start_queue(dev); 2689 } 2690 2691 init_waitqueue_head(&port->swqe_avail_wq); 2692 init_waitqueue_head(&port->restart_wq); 2693 2694 mutex_unlock(&port->port_lock); 2695 2696 return ret; 2697} 2698 2699static int ehea_down(struct net_device *dev) 2700{ 2701 int ret; 2702 struct ehea_port *port = netdev_priv(dev); 2703 2704 if (port->state == EHEA_PORT_DOWN) 2705 return 0; 2706 2707 ehea_drop_multicast_list(dev); 2708 ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 2709 2710 ehea_free_interrupts(dev); 2711 2712 port->state = EHEA_PORT_DOWN; 2713 2714 ehea_update_bcmc_registrations(); 2715 2716 ret = ehea_clean_all_portres(port); 2717 if (ret) 2718 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret); 2719 2720 ehea_update_firmware_handles(); 2721 2722 return ret; 2723} 2724 2725static int ehea_stop(struct net_device *dev) 2726{ 2727 int ret; 2728 struct ehea_port *port = netdev_priv(dev); 2729 2730 netif_info(port, ifdown, dev, "disabling port\n"); 2731 2732 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); 2733 cancel_work_sync(&port->reset_task); 2734 mutex_lock(&port->port_lock); 2735 netif_stop_queue(dev); 2736 port_napi_disable(port); 2737 ret = ehea_down(dev); 2738 mutex_unlock(&port->port_lock); 2739 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); 2740 return ret; 2741} 2742 2743static void ehea_purge_sq(struct ehea_qp *orig_qp) 2744{ 2745 struct ehea_qp qp = *orig_qp; 2746 struct ehea_qp_init_attr *init_attr = &qp.init_attr; 2747 struct ehea_swqe *swqe; 2748 int wqe_index; 2749 int i; 2750 2751 for (i = 0; i < init_attr->act_nr_send_wqes; i++) { 2752 swqe = ehea_get_swqe(&qp, &wqe_index); 2753 swqe->tx_control |= EHEA_SWQE_PURGE; 2754 } 2755} 2756 2757static void ehea_flush_sq(struct ehea_port *port) 2758{ 2759 int i; 2760 2761 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 2762 struct ehea_port_res *pr = &port->port_res[i]; 2763 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count; 2764 int ret; 2765 2766 ret = wait_event_timeout(port->swqe_avail_wq, 2767 atomic_read(&pr->swqe_avail) >= swqe_max, 2768 msecs_to_jiffies(100)); 2769 2770 if (!ret) { 2771 pr_err("WARNING: sq not flushed completely\n"); 2772 break; 2773 } 2774 } 2775} 2776 2777int ehea_stop_qps(struct net_device *dev) 2778{ 2779 struct ehea_port *port = netdev_priv(dev); 2780 struct ehea_adapter *adapter = port->adapter; 2781 struct hcp_modify_qp_cb0 *cb0; 2782 int ret = -EIO; 2783 int dret; 2784 int i; 2785 u64 hret; 2786 u64 dummy64 = 0; 2787 u16 dummy16 = 0; 2788 2789 cb0 = (void *)get_zeroed_page(GFP_KERNEL); 2790 if (!cb0) { 2791 ret = -ENOMEM; 2792 goto out; 2793 } 2794 2795 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) { 2796 struct ehea_port_res *pr = &port->port_res[i]; 2797 struct ehea_qp *qp = pr->qp; 2798 2799 /* Purge send queue */ 2800 ehea_purge_sq(qp); 2801 2802 /* Disable queue pair */ 2803 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2804 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2805 cb0); 2806 if (hret != H_SUCCESS) { 2807 pr_err("query_ehea_qp failed (1)\n"); 2808 goto out; 2809 } 2810 2811 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; 2812 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED; 2813 2814 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, 2815 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 2816 1), cb0, &dummy64, 2817 &dummy64, &dummy16, &dummy16); 2818 if (hret != H_SUCCESS) { 2819 pr_err("modify_ehea_qp failed (1)\n"); 2820 goto out; 2821 } 2822 2823 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2824 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2825 cb0); 2826 if (hret != H_SUCCESS) { 2827 pr_err("query_ehea_qp failed (2)\n"); 2828 goto out; 2829 } 2830 2831 /* deregister shared memory regions */ 2832 dret = ehea_rem_smrs(pr); 2833 if (dret) { 2834 pr_err("unreg shared memory region failed\n"); 2835 goto out; 2836 } 2837 } 2838 2839 ret = 0; 2840out: 2841 free_page((unsigned long)cb0); 2842 2843 return ret; 2844} 2845 2846void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr) 2847{ 2848 struct ehea_qp qp = *orig_qp; 2849 struct ehea_qp_init_attr *init_attr = &qp.init_attr; 2850 struct ehea_rwqe *rwqe; 2851 struct sk_buff **skba_rq2 = pr->rq2_skba.arr; 2852 struct sk_buff **skba_rq3 = pr->rq3_skba.arr; 2853 struct sk_buff *skb; 2854 u32 lkey = pr->recv_mr.lkey; 2855 2856 2857 int i; 2858 int index; 2859 2860 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) { 2861 rwqe = ehea_get_next_rwqe(&qp, 2); 2862 rwqe->sg_list[0].l_key = lkey; 2863 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); 2864 skb = skba_rq2[index]; 2865 if (skb) 2866 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); 2867 } 2868 2869 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) { 2870 rwqe = ehea_get_next_rwqe(&qp, 3); 2871 rwqe->sg_list[0].l_key = lkey; 2872 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); 2873 skb = skba_rq3[index]; 2874 if (skb) 2875 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); 2876 } 2877} 2878 2879int ehea_restart_qps(struct net_device *dev) 2880{ 2881 struct ehea_port *port = netdev_priv(dev); 2882 struct ehea_adapter *adapter = port->adapter; 2883 int ret = 0; 2884 int i; 2885 2886 struct hcp_modify_qp_cb0 *cb0; 2887 u64 hret; 2888 u64 dummy64 = 0; 2889 u16 dummy16 = 0; 2890 2891 cb0 = (void *)get_zeroed_page(GFP_KERNEL); 2892 if (!cb0) { 2893 ret = -ENOMEM; 2894 goto out; 2895 } 2896 2897 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) { 2898 struct ehea_port_res *pr = &port->port_res[i]; 2899 struct ehea_qp *qp = pr->qp; 2900 2901 ret = ehea_gen_smrs(pr); 2902 if (ret) { 2903 netdev_err(dev, "creation of shared memory regions failed\n"); 2904 goto out; 2905 } 2906 2907 ehea_update_rqs(qp, pr); 2908 2909 /* Enable queue pair */ 2910 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2911 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2912 cb0); 2913 if (hret != H_SUCCESS) { 2914 netdev_err(dev, "query_ehea_qp failed (1)\n"); 2915 goto out; 2916 } 2917 2918 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; 2919 cb0->qp_ctl_reg |= H_QP_CR_ENABLED; 2920 2921 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, 2922 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 2923 1), cb0, &dummy64, 2924 &dummy64, &dummy16, &dummy16); 2925 if (hret != H_SUCCESS) { 2926 netdev_err(dev, "modify_ehea_qp failed (1)\n"); 2927 goto out; 2928 } 2929 2930 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2931 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2932 cb0); 2933 if (hret != H_SUCCESS) { 2934 netdev_err(dev, "query_ehea_qp failed (2)\n"); 2935 goto out; 2936 } 2937 2938 /* refill entire queue */ 2939 ehea_refill_rq1(pr, pr->rq1_skba.index, 0); 2940 ehea_refill_rq2(pr, 0); 2941 ehea_refill_rq3(pr, 0); 2942 } 2943out: 2944 free_page((unsigned long)cb0); 2945 2946 return ret; 2947} 2948 2949static void ehea_reset_port(struct work_struct *work) 2950{ 2951 int ret; 2952 struct ehea_port *port = 2953 container_of(work, struct ehea_port, reset_task); 2954 struct net_device *dev = port->netdev; 2955 2956 mutex_lock(&dlpar_mem_lock); 2957 port->resets++; 2958 mutex_lock(&port->port_lock); 2959 netif_stop_queue(dev); 2960 2961 port_napi_disable(port); 2962 2963 ehea_down(dev); 2964 2965 ret = ehea_up(dev); 2966 if (ret) 2967 goto out; 2968 2969 ehea_set_multicast_list(dev); 2970 2971 netif_info(port, timer, dev, "reset successful\n"); 2972 2973 port_napi_enable(port); 2974 2975 netif_wake_queue(dev); 2976out: 2977 mutex_unlock(&port->port_lock); 2978 mutex_unlock(&dlpar_mem_lock); 2979} 2980 2981static void ehea_rereg_mrs(void) 2982{ 2983 int ret, i; 2984 struct ehea_adapter *adapter; 2985 2986 pr_info("LPAR memory changed - re-initializing driver\n"); 2987 2988 list_for_each_entry(adapter, &adapter_list, list) 2989 if (adapter->active_ports) { 2990 /* Shutdown all ports */ 2991 for (i = 0; i < EHEA_MAX_PORTS; i++) { 2992 struct ehea_port *port = adapter->port[i]; 2993 struct net_device *dev; 2994 2995 if (!port) 2996 continue; 2997 2998 dev = port->netdev; 2999 3000 if (dev->flags & IFF_UP) { 3001 mutex_lock(&port->port_lock); 3002 netif_stop_queue(dev); 3003 ehea_flush_sq(port); 3004 ret = ehea_stop_qps(dev); 3005 if (ret) { 3006 mutex_unlock(&port->port_lock); 3007 goto out; 3008 } 3009 port_napi_disable(port); 3010 mutex_unlock(&port->port_lock); 3011 } 3012 reset_sq_restart_flag(port); 3013 } 3014 3015 /* Unregister old memory region */ 3016 ret = ehea_rem_mr(&adapter->mr); 3017 if (ret) { 3018 pr_err("unregister MR failed - driver inoperable!\n"); 3019 goto out; 3020 } 3021 } 3022 3023 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3024 3025 list_for_each_entry(adapter, &adapter_list, list) 3026 if (adapter->active_ports) { 3027 /* Register new memory region */ 3028 ret = ehea_reg_kernel_mr(adapter, &adapter->mr); 3029 if (ret) { 3030 pr_err("register MR failed - driver inoperable!\n"); 3031 goto out; 3032 } 3033 3034 /* Restart all ports */ 3035 for (i = 0; i < EHEA_MAX_PORTS; i++) { 3036 struct ehea_port *port = adapter->port[i]; 3037 3038 if (port) { 3039 struct net_device *dev = port->netdev; 3040 3041 if (dev->flags & IFF_UP) { 3042 mutex_lock(&port->port_lock); 3043 port_napi_enable(port); 3044 ret = ehea_restart_qps(dev); 3045 check_sqs(port); 3046 if (!ret) 3047 netif_wake_queue(dev); 3048 mutex_unlock(&port->port_lock); 3049 } 3050 } 3051 } 3052 } 3053 pr_info("re-initializing driver complete\n"); 3054out: 3055 return; 3056} 3057 3058static void ehea_tx_watchdog(struct net_device *dev) 3059{ 3060 struct ehea_port *port = netdev_priv(dev); 3061 3062 if (netif_carrier_ok(dev) && 3063 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) 3064 ehea_schedule_port_reset(port); 3065} 3066 3067int ehea_sense_adapter_attr(struct ehea_adapter *adapter) 3068{ 3069 struct hcp_query_ehea *cb; 3070 u64 hret; 3071 int ret; 3072 3073 cb = (void *)get_zeroed_page(GFP_KERNEL); 3074 if (!cb) { 3075 ret = -ENOMEM; 3076 goto out; 3077 } 3078 3079 hret = ehea_h_query_ehea(adapter->handle, cb); 3080 3081 if (hret != H_SUCCESS) { 3082 ret = -EIO; 3083 goto out_herr; 3084 } 3085 3086 adapter->max_mc_mac = cb->max_mc_mac - 1; 3087 ret = 0; 3088 3089out_herr: 3090 free_page((unsigned long)cb); 3091out: 3092 return ret; 3093} 3094 3095int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo) 3096{ 3097 struct hcp_ehea_port_cb4 *cb4; 3098 u64 hret; 3099 int ret = 0; 3100 3101 *jumbo = 0; 3102 3103 /* (Try to) enable *jumbo frames */ 3104 cb4 = (void *)get_zeroed_page(GFP_KERNEL); 3105 if (!cb4) { 3106 pr_err("no mem for cb4\n"); 3107 ret = -ENOMEM; 3108 goto out; 3109 } else { 3110 hret = ehea_h_query_ehea_port(port->adapter->handle, 3111 port->logical_port_id, 3112 H_PORT_CB4, 3113 H_PORT_CB4_JUMBO, cb4); 3114 if (hret == H_SUCCESS) { 3115 if (cb4->jumbo_frame) 3116 *jumbo = 1; 3117 else { 3118 cb4->jumbo_frame = 1; 3119 hret = ehea_h_modify_ehea_port(port->adapter-> 3120 handle, 3121 port-> 3122 logical_port_id, 3123 H_PORT_CB4, 3124 H_PORT_CB4_JUMBO, 3125 cb4); 3126 if (hret == H_SUCCESS) 3127 *jumbo = 1; 3128 } 3129 } else 3130 ret = -EINVAL; 3131 3132 free_page((unsigned long)cb4); 3133 } 3134out: 3135 return ret; 3136} 3137 3138static ssize_t ehea_show_port_id(struct device *dev, 3139 struct device_attribute *attr, char *buf) 3140{ 3141 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev); 3142 return sprintf(buf, "%d", port->logical_port_id); 3143} 3144 3145static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id, 3146 NULL); 3147 3148static void __devinit logical_port_release(struct device *dev) 3149{ 3150 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev); 3151 of_node_put(port->ofdev.dev.of_node); 3152} 3153 3154static struct device *ehea_register_port(struct ehea_port *port, 3155 struct device_node *dn) 3156{ 3157 int ret; 3158 3159 port->ofdev.dev.of_node = of_node_get(dn); 3160 port->ofdev.dev.parent = &port->adapter->ofdev->dev; 3161 port->ofdev.dev.bus = &ibmebus_bus_type; 3162 3163 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++); 3164 port->ofdev.dev.release = logical_port_release; 3165 3166 ret = of_device_register(&port->ofdev); 3167 if (ret) { 3168 pr_err("failed to register device. ret=%d\n", ret); 3169 goto out; 3170 } 3171 3172 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); 3173 if (ret) { 3174 pr_err("failed to register attributes, ret=%d\n", ret); 3175 goto out_unreg_of_dev; 3176 } 3177 3178 return &port->ofdev.dev; 3179 3180out_unreg_of_dev: 3181 of_device_unregister(&port->ofdev); 3182out: 3183 return NULL; 3184} 3185 3186static void ehea_unregister_port(struct ehea_port *port) 3187{ 3188 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id); 3189 of_device_unregister(&port->ofdev); 3190} 3191 3192static const struct net_device_ops ehea_netdev_ops = { 3193 .ndo_open = ehea_open, 3194 .ndo_stop = ehea_stop, 3195 .ndo_start_xmit = ehea_start_xmit, 3196#ifdef CONFIG_NET_POLL_CONTROLLER 3197 .ndo_poll_controller = ehea_netpoll, 3198#endif 3199 .ndo_get_stats = ehea_get_stats, 3200 .ndo_set_mac_address = ehea_set_mac_addr, 3201 .ndo_validate_addr = eth_validate_addr, 3202 .ndo_set_multicast_list = ehea_set_multicast_list, 3203 .ndo_change_mtu = ehea_change_mtu, 3204 .ndo_vlan_rx_register = ehea_vlan_rx_register, 3205 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid, 3206 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid, 3207 .ndo_tx_timeout = ehea_tx_watchdog, 3208}; 3209 3210struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, 3211 u32 logical_port_id, 3212 struct device_node *dn) 3213{ 3214 int ret; 3215 struct net_device *dev; 3216 struct ehea_port *port; 3217 struct device *port_dev; 3218 int jumbo; 3219 3220 /* allocate memory for the port structures */ 3221 dev = alloc_etherdev(sizeof(struct ehea_port)); 3222 3223 if (!dev) { 3224 pr_err("no mem for net_device\n"); 3225 ret = -ENOMEM; 3226 goto out_err; 3227 } 3228 3229 port = netdev_priv(dev); 3230 3231 mutex_init(&port->port_lock); 3232 port->state = EHEA_PORT_DOWN; 3233 port->sig_comp_iv = sq_entries / 10; 3234 3235 port->adapter = adapter; 3236 port->netdev = dev; 3237 port->logical_port_id = logical_port_id; 3238 3239 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT); 3240 3241 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL); 3242 if (!port->mc_list) { 3243 ret = -ENOMEM; 3244 goto out_free_ethdev; 3245 } 3246 3247 INIT_LIST_HEAD(&port->mc_list->list); 3248 3249 ret = ehea_sense_port_attr(port); 3250 if (ret) 3251 goto out_free_mc_list; 3252 3253 port_dev = ehea_register_port(port, dn); 3254 if (!port_dev) 3255 goto out_free_mc_list; 3256 3257 SET_NETDEV_DEV(dev, port_dev); 3258 3259 /* initialize net_device structure */ 3260 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); 3261 3262 dev->netdev_ops = &ehea_netdev_ops; 3263 ehea_set_ethtool_ops(dev); 3264 3265 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO 3266 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX 3267 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER 3268 | NETIF_F_LLTX; 3269 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; 3270 3271 if (use_lro) 3272 dev->features |= NETIF_F_LRO; 3273 3274 INIT_WORK(&port->reset_task, ehea_reset_port); 3275 3276 ret = register_netdev(dev); 3277 if (ret) { 3278 pr_err("register_netdev failed. ret=%d\n", ret); 3279 goto out_unreg_port; 3280 } 3281 3282 port->lro_max_aggr = lro_max_aggr; 3283 3284 ret = ehea_get_jumboframe_status(port, &jumbo); 3285 if (ret) 3286 netdev_err(dev, "failed determining jumbo frame status\n"); 3287 3288 netdev_info(dev, "Jumbo frames are %sabled\n", 3289 jumbo == 1 ? "en" : "dis"); 3290 3291 adapter->active_ports++; 3292 3293 return port; 3294 3295out_unreg_port: 3296 ehea_unregister_port(port); 3297 3298out_free_mc_list: 3299 kfree(port->mc_list); 3300 3301out_free_ethdev: 3302 free_netdev(dev); 3303 3304out_err: 3305 pr_err("setting up logical port with id=%d failed, ret=%d\n", 3306 logical_port_id, ret); 3307 return NULL; 3308} 3309 3310static void ehea_shutdown_single_port(struct ehea_port *port) 3311{ 3312 struct ehea_adapter *adapter = port->adapter; 3313 3314 cancel_work_sync(&port->reset_task); 3315 unregister_netdev(port->netdev); 3316 ehea_unregister_port(port); 3317 kfree(port->mc_list); 3318 free_netdev(port->netdev); 3319 adapter->active_ports--; 3320} 3321 3322static int ehea_setup_ports(struct ehea_adapter *adapter) 3323{ 3324 struct device_node *lhea_dn; 3325 struct device_node *eth_dn = NULL; 3326 3327 const u32 *dn_log_port_id; 3328 int i = 0; 3329 3330 lhea_dn = adapter->ofdev->dev.of_node; 3331 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { 3332 3333 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", 3334 NULL); 3335 if (!dn_log_port_id) { 3336 pr_err("bad device node: eth_dn name=%s\n", 3337 eth_dn->full_name); 3338 continue; 3339 } 3340 3341 if (ehea_add_adapter_mr(adapter)) { 3342 pr_err("creating MR failed\n"); 3343 of_node_put(eth_dn); 3344 return -EIO; 3345 } 3346 3347 adapter->port[i] = ehea_setup_single_port(adapter, 3348 *dn_log_port_id, 3349 eth_dn); 3350 if (adapter->port[i]) 3351 netdev_info(adapter->port[i]->netdev, 3352 "logical port id #%d\n", *dn_log_port_id); 3353 else 3354 ehea_remove_adapter_mr(adapter); 3355 3356 i++; 3357 } 3358 return 0; 3359} 3360 3361static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter, 3362 u32 logical_port_id) 3363{ 3364 struct device_node *lhea_dn; 3365 struct device_node *eth_dn = NULL; 3366 const u32 *dn_log_port_id; 3367 3368 lhea_dn = adapter->ofdev->dev.of_node; 3369 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { 3370 3371 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", 3372 NULL); 3373 if (dn_log_port_id) 3374 if (*dn_log_port_id == logical_port_id) 3375 return eth_dn; 3376 } 3377 3378 return NULL; 3379} 3380 3381static ssize_t ehea_probe_port(struct device *dev, 3382 struct device_attribute *attr, 3383 const char *buf, size_t count) 3384{ 3385 struct ehea_adapter *adapter = dev_get_drvdata(dev); 3386 struct ehea_port *port; 3387 struct device_node *eth_dn = NULL; 3388 int i; 3389 3390 u32 logical_port_id; 3391 3392 sscanf(buf, "%d", &logical_port_id); 3393 3394 port = ehea_get_port(adapter, logical_port_id); 3395 3396 if (port) { 3397 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n", 3398 logical_port_id); 3399 return -EINVAL; 3400 } 3401 3402 eth_dn = ehea_get_eth_dn(adapter, logical_port_id); 3403 3404 if (!eth_dn) { 3405 pr_info("no logical port with id %d found\n", logical_port_id); 3406 return -EINVAL; 3407 } 3408 3409 if (ehea_add_adapter_mr(adapter)) { 3410 pr_err("creating MR failed\n"); 3411 return -EIO; 3412 } 3413 3414 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn); 3415 3416 of_node_put(eth_dn); 3417 3418 if (port) { 3419 for (i = 0; i < EHEA_MAX_PORTS; i++) 3420 if (!adapter->port[i]) { 3421 adapter->port[i] = port; 3422 break; 3423 } 3424 3425 netdev_info(port->netdev, "added: (logical port id=%d)\n", 3426 logical_port_id); 3427 } else { 3428 ehea_remove_adapter_mr(adapter); 3429 return -EIO; 3430 } 3431 3432 return (ssize_t) count; 3433} 3434 3435static ssize_t ehea_remove_port(struct device *dev, 3436 struct device_attribute *attr, 3437 const char *buf, size_t count) 3438{ 3439 struct ehea_adapter *adapter = dev_get_drvdata(dev); 3440 struct ehea_port *port; 3441 int i; 3442 u32 logical_port_id; 3443 3444 sscanf(buf, "%d", &logical_port_id); 3445 3446 port = ehea_get_port(adapter, logical_port_id); 3447 3448 if (port) { 3449 netdev_info(port->netdev, "removed: (logical port id=%d)\n", 3450 logical_port_id); 3451 3452 ehea_shutdown_single_port(port); 3453 3454 for (i = 0; i < EHEA_MAX_PORTS; i++) 3455 if (adapter->port[i] == port) { 3456 adapter->port[i] = NULL; 3457 break; 3458 } 3459 } else { 3460 pr_err("removing port with logical port id=%d failed. port not configured.\n", 3461 logical_port_id); 3462 return -EINVAL; 3463 } 3464 3465 ehea_remove_adapter_mr(adapter); 3466 3467 return (ssize_t) count; 3468} 3469 3470static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port); 3471static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port); 3472 3473int ehea_create_device_sysfs(struct platform_device *dev) 3474{ 3475 int ret = device_create_file(&dev->dev, &dev_attr_probe_port); 3476 if (ret) 3477 goto out; 3478 3479 ret = device_create_file(&dev->dev, &dev_attr_remove_port); 3480out: 3481 return ret; 3482} 3483 3484void ehea_remove_device_sysfs(struct platform_device *dev) 3485{ 3486 device_remove_file(&dev->dev, &dev_attr_probe_port); 3487 device_remove_file(&dev->dev, &dev_attr_remove_port); 3488} 3489 3490static int __devinit ehea_probe_adapter(struct platform_device *dev, 3491 const struct of_device_id *id) 3492{ 3493 struct ehea_adapter *adapter; 3494 const u64 *adapter_handle; 3495 int ret; 3496 3497 if (!dev || !dev->dev.of_node) { 3498 pr_err("Invalid ibmebus device probed\n"); 3499 return -EINVAL; 3500 } 3501 3502 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 3503 if (!adapter) { 3504 ret = -ENOMEM; 3505 dev_err(&dev->dev, "no mem for ehea_adapter\n"); 3506 goto out; 3507 } 3508 3509 list_add(&adapter->list, &adapter_list); 3510 3511 adapter->ofdev = dev; 3512 3513 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle", 3514 NULL); 3515 if (adapter_handle) 3516 adapter->handle = *adapter_handle; 3517 3518 if (!adapter->handle) { 3519 dev_err(&dev->dev, "failed getting handle for adapter" 3520 " '%s'\n", dev->dev.of_node->full_name); 3521 ret = -ENODEV; 3522 goto out_free_ad; 3523 } 3524 3525 adapter->pd = EHEA_PD_ID; 3526 3527 dev_set_drvdata(&dev->dev, adapter); 3528 3529 3530 /* initialize adapter and ports */ 3531 /* get adapter properties */ 3532 ret = ehea_sense_adapter_attr(adapter); 3533 if (ret) { 3534 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret); 3535 goto out_free_ad; 3536 } 3537 3538 adapter->neq = ehea_create_eq(adapter, 3539 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1); 3540 if (!adapter->neq) { 3541 ret = -EIO; 3542 dev_err(&dev->dev, "NEQ creation failed\n"); 3543 goto out_free_ad; 3544 } 3545 3546 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet, 3547 (unsigned long)adapter); 3548 3549 ret = ibmebus_request_irq(adapter->neq->attr.ist1, 3550 ehea_interrupt_neq, IRQF_DISABLED, 3551 "ehea_neq", adapter); 3552 if (ret) { 3553 dev_err(&dev->dev, "requesting NEQ IRQ failed\n"); 3554 goto out_kill_eq; 3555 } 3556 3557 ret = ehea_create_device_sysfs(dev); 3558 if (ret) 3559 goto out_free_irq; 3560 3561 ret = ehea_setup_ports(adapter); 3562 if (ret) { 3563 dev_err(&dev->dev, "setup_ports failed\n"); 3564 goto out_rem_dev_sysfs; 3565 } 3566 3567 ret = 0; 3568 goto out; 3569 3570out_rem_dev_sysfs: 3571 ehea_remove_device_sysfs(dev); 3572 3573out_free_irq: 3574 ibmebus_free_irq(adapter->neq->attr.ist1, adapter); 3575 3576out_kill_eq: 3577 ehea_destroy_eq(adapter->neq); 3578 3579out_free_ad: 3580 list_del(&adapter->list); 3581 kfree(adapter); 3582 3583out: 3584 ehea_update_firmware_handles(); 3585 3586 return ret; 3587} 3588 3589static int __devexit ehea_remove(struct platform_device *dev) 3590{ 3591 struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev); 3592 int i; 3593 3594 for (i = 0; i < EHEA_MAX_PORTS; i++) 3595 if (adapter->port[i]) { 3596 ehea_shutdown_single_port(adapter->port[i]); 3597 adapter->port[i] = NULL; 3598 } 3599 3600 ehea_remove_device_sysfs(dev); 3601 3602 ibmebus_free_irq(adapter->neq->attr.ist1, adapter); 3603 tasklet_kill(&adapter->neq_tasklet); 3604 3605 ehea_destroy_eq(adapter->neq); 3606 ehea_remove_adapter_mr(adapter); 3607 list_del(&adapter->list); 3608 kfree(adapter); 3609 3610 ehea_update_firmware_handles(); 3611 3612 return 0; 3613} 3614 3615void ehea_crash_handler(void) 3616{ 3617 int i; 3618 3619 if (ehea_fw_handles.arr) 3620 for (i = 0; i < ehea_fw_handles.num_entries; i++) 3621 ehea_h_free_resource(ehea_fw_handles.arr[i].adh, 3622 ehea_fw_handles.arr[i].fwh, 3623 FORCE_FREE); 3624 3625 if (ehea_bcmc_regs.arr) 3626 for (i = 0; i < ehea_bcmc_regs.num_entries; i++) 3627 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh, 3628 ehea_bcmc_regs.arr[i].port_id, 3629 ehea_bcmc_regs.arr[i].reg_type, 3630 ehea_bcmc_regs.arr[i].macaddr, 3631 0, H_DEREG_BCMC); 3632} 3633 3634static int ehea_mem_notifier(struct notifier_block *nb, 3635 unsigned long action, void *data) 3636{ 3637 int ret = NOTIFY_BAD; 3638 struct memory_notify *arg = data; 3639 3640 mutex_lock(&dlpar_mem_lock); 3641 3642 switch (action) { 3643 case MEM_CANCEL_OFFLINE: 3644 pr_info("memory offlining canceled"); 3645 /* Readd canceled memory block */ 3646 case MEM_ONLINE: 3647 pr_info("memory is going online"); 3648 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3649 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) 3650 goto out_unlock; 3651 ehea_rereg_mrs(); 3652 break; 3653 case MEM_GOING_OFFLINE: 3654 pr_info("memory is going offline"); 3655 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3656 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) 3657 goto out_unlock; 3658 ehea_rereg_mrs(); 3659 break; 3660 default: 3661 break; 3662 } 3663 3664 ehea_update_firmware_handles(); 3665 ret = NOTIFY_OK; 3666 3667out_unlock: 3668 mutex_unlock(&dlpar_mem_lock); 3669 return ret; 3670} 3671 3672static struct notifier_block ehea_mem_nb = { 3673 .notifier_call = ehea_mem_notifier, 3674}; 3675 3676static int ehea_reboot_notifier(struct notifier_block *nb, 3677 unsigned long action, void *unused) 3678{ 3679 if (action == SYS_RESTART) { 3680 pr_info("Reboot: freeing all eHEA resources\n"); 3681 ibmebus_unregister_driver(&ehea_driver); 3682 } 3683 return NOTIFY_DONE; 3684} 3685 3686static struct notifier_block ehea_reboot_nb = { 3687 .notifier_call = ehea_reboot_notifier, 3688}; 3689 3690static int check_module_parm(void) 3691{ 3692 int ret = 0; 3693 3694 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) || 3695 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) { 3696 pr_info("Bad parameter: rq1_entries\n"); 3697 ret = -EINVAL; 3698 } 3699 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) || 3700 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) { 3701 pr_info("Bad parameter: rq2_entries\n"); 3702 ret = -EINVAL; 3703 } 3704 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) || 3705 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) { 3706 pr_info("Bad parameter: rq3_entries\n"); 3707 ret = -EINVAL; 3708 } 3709 if ((sq_entries < EHEA_MIN_ENTRIES_QP) || 3710 (sq_entries > EHEA_MAX_ENTRIES_SQ)) { 3711 pr_info("Bad parameter: sq_entries\n"); 3712 ret = -EINVAL; 3713 } 3714 3715 return ret; 3716} 3717 3718static ssize_t ehea_show_capabilities(struct device_driver *drv, 3719 char *buf) 3720{ 3721 return sprintf(buf, "%d", EHEA_CAPABILITIES); 3722} 3723 3724static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH, 3725 ehea_show_capabilities, NULL); 3726 3727int __init ehea_module_init(void) 3728{ 3729 int ret; 3730 3731 pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION); 3732 3733 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles)); 3734 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs)); 3735 3736 mutex_init(&ehea_fw_handles.lock); 3737 spin_lock_init(&ehea_bcmc_regs.lock); 3738 3739 ret = check_module_parm(); 3740 if (ret) 3741 goto out; 3742 3743 ret = ehea_create_busmap(); 3744 if (ret) 3745 goto out; 3746 3747 ret = register_reboot_notifier(&ehea_reboot_nb); 3748 if (ret) 3749 pr_info("failed registering reboot notifier\n"); 3750 3751 ret = register_memory_notifier(&ehea_mem_nb); 3752 if (ret) 3753 pr_info("failed registering memory remove notifier\n"); 3754 3755 ret = crash_shutdown_register(ehea_crash_handler); 3756 if (ret) 3757 pr_info("failed registering crash handler\n"); 3758 3759 ret = ibmebus_register_driver(&ehea_driver); 3760 if (ret) { 3761 pr_err("failed registering eHEA device driver on ebus\n"); 3762 goto out2; 3763 } 3764 3765 ret = driver_create_file(&ehea_driver.driver, 3766 &driver_attr_capabilities); 3767 if (ret) { 3768 pr_err("failed to register capabilities attribute, ret=%d\n", 3769 ret); 3770 goto out3; 3771 } 3772 3773 return ret; 3774 3775out3: 3776 ibmebus_unregister_driver(&ehea_driver); 3777out2: 3778 unregister_memory_notifier(&ehea_mem_nb); 3779 unregister_reboot_notifier(&ehea_reboot_nb); 3780 crash_shutdown_unregister(ehea_crash_handler); 3781out: 3782 return ret; 3783} 3784 3785static void __exit ehea_module_exit(void) 3786{ 3787 int ret; 3788 3789 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); 3790 ibmebus_unregister_driver(&ehea_driver); 3791 unregister_reboot_notifier(&ehea_reboot_nb); 3792 ret = crash_shutdown_unregister(ehea_crash_handler); 3793 if (ret) 3794 pr_info("failed unregistering crash handler\n"); 3795 unregister_memory_notifier(&ehea_mem_nb); 3796 kfree(ehea_fw_handles.arr); 3797 kfree(ehea_bcmc_regs.arr); 3798 ehea_destroy_busmap(); 3799} 3800 3801module_init(ehea_module_init); 3802module_exit(ehea_module_exit);