Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'be2net-next'

Sathya Perla says:

====================
be2net: patch set

This patch set contains the following modificatons:
* three patches (1/7 to 3/7) that fix indentation style issues
* convert the u8 vlan[] array to a bit-map to reduce memory usage
* use MCCQ instead of MBOX in be_cmd_rss_config() as the MCCQ is already
created by that time
* include rx-comp-error counter in ethtool stats
* remove the unused promiscuous setting from be_cmd_vlan_config()

Pls apply to net-next tree. Thanks!
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+346 -348
+1 -1
drivers/net/ethernet/emulex/benet/be.h
··· 455 455 struct be_drv_stats drv_stats; 456 456 struct be_aic_obj aic_obj[MAX_EVT_QS]; 457 457 u16 vlans_added; 458 - u8 vlan_tag[VLAN_N_VID]; 458 + unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)]; 459 459 u8 vlan_prio_bmap; /* Available Priority BitMap */ 460 460 u16 recommended_prio; /* Recommended Priority */ 461 461 struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
+191 -174
drivers/net/ethernet/emulex/benet/be_cmds.c
··· 52 52 } 53 53 }; 54 54 55 - static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, 56 - u8 subsystem) 55 + static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) 57 56 { 58 57 int i; 59 58 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map); ··· 196 197 197 198 /* Link state evt is a string of bytes; no need for endian swapping */ 198 199 static void be_async_link_state_process(struct be_adapter *adapter, 199 - struct be_async_event_link_state *evt) 200 + struct be_async_event_link_state *evt) 200 201 { 201 202 /* When link status changes, link speed must be re-queried from FW */ 202 203 adapter->phy.link_speed = -1; ··· 220 221 221 222 /* Grp5 CoS Priority evt */ 222 223 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, 223 - struct be_async_event_grp5_cos_priority *evt) 224 + struct 225 + be_async_event_grp5_cos_priority 226 + *evt) 224 227 { 225 228 if (evt->valid) { 226 229 adapter->vlan_prio_bmap = evt->available_priority_bmap; ··· 234 233 235 234 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */ 236 235 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, 237 - struct be_async_event_grp5_qos_link_speed *evt) 236 + struct 237 + be_async_event_grp5_qos_link_speed 238 + *evt) 238 239 { 239 240 if (adapter->phy.link_speed >= 0 && 240 241 evt->physical_port == adapter->port_num) ··· 245 242 246 243 /*Grp5 PVID evt*/ 247 244 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, 248 - struct be_async_event_grp5_pvid_state *evt) 245 + struct 246 + be_async_event_grp5_pvid_state 247 + *evt) 249 248 { 250 249 if (evt->enabled) { 251 250 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; ··· 258 253 } 259 254 260 255 static void be_async_grp5_evt_process(struct be_adapter *adapter, 261 - u32 trailer, struct be_mcc_compl *evt) 256 + u32 trailer, struct be_mcc_compl *evt) 262 257 { 263 258 u8 event_type = 0; 264 259 ··· 286 281 } 287 282 288 283 static void be_async_dbg_evt_process(struct be_adapter *adapter, 289 - u32 trailer, struct be_mcc_compl *cmp) 284 + u32 trailer, struct be_mcc_compl *cmp) 290 285 { 291 286 u8 event_type = 0; 292 287 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp; ··· 375 370 (struct be_async_event_link_state *) compl); 376 371 else if (is_grp5_evt(compl->flags)) 377 372 be_async_grp5_evt_process(adapter, 378 - compl->flags, compl); 373 + compl->flags, compl); 379 374 else if (is_dbg_evt(compl->flags)) 380 375 be_async_dbg_evt_process(adapter, 381 - compl->flags, compl); 376 + compl->flags, compl); 382 377 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 383 378 status = be_mcc_compl_process(adapter, compl); 384 379 atomic_dec(&mcc_obj->q.used); ··· 565 560 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; 566 561 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 567 562 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 568 - sliport_err1 = ioread32(adapter->db + 569 - SLIPORT_ERROR1_OFFSET); 570 - sliport_err2 = ioread32(adapter->db + 571 - SLIPORT_ERROR2_OFFSET); 563 + sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET); 564 + sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET); 572 565 573 566 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 && 574 567 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2) ··· 633 630 if (stage == POST_STAGE_ARMFW_RDY) 634 631 return 0; 635 632 636 - dev_info(dev, "Waiting for POST, %ds elapsed\n", 637 - timeout); 633 + dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout); 638 634 if (msleep_interruptible(2000)) { 639 635 dev_err(dev, "Waiting for POST aborted\n"); 640 636 return -EINTR; ··· 651 649 return &wrb->payload.sgl[0]; 652 650 } 653 651 654 - static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, 655 - unsigned long addr) 652 + static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr) 656 653 { 657 654 wrb->tag0 = addr & 0xFFFFFFFF; 658 655 wrb->tag1 = upper_32_bits(addr); ··· 660 659 /* Don't touch the hdr after it's prepared */ 661 660 /* mem will be NULL for embedded commands */ 662 661 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 663 - u8 subsystem, u8 opcode, int cmd_len, 664 - struct be_mcc_wrb *wrb, struct be_dma_mem *mem) 662 + u8 subsystem, u8 opcode, int cmd_len, 663 + struct be_mcc_wrb *wrb, 664 + struct be_dma_mem *mem) 665 665 { 666 666 struct be_sge *sge; 667 667 ··· 685 683 } 686 684 687 685 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 688 - struct be_dma_mem *mem) 686 + struct be_dma_mem *mem) 689 687 { 690 688 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); 691 689 u64 dma = (u64)mem->dma; ··· 870 868 req = embedded_payload(wrb); 871 869 872 870 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 873 - OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL); 871 + OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, 872 + NULL); 874 873 875 874 /* Support for EQ_CREATEv2 available only SH-R onwards */ 876 875 if (!(BEx_chip(adapter) || lancer_chip(adapter))) ··· 920 917 req = embedded_payload(wrb); 921 918 922 919 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 923 - OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL); 920 + OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, 921 + NULL); 924 922 req->type = MAC_ADDRESS_TYPE_NETWORK; 925 923 if (permanent) { 926 924 req->permanent = 1; ··· 944 940 945 941 /* Uses synchronous MCCQ */ 946 942 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 947 - u32 if_id, u32 *pmac_id, u32 domain) 943 + u32 if_id, u32 *pmac_id, u32 domain) 948 944 { 949 945 struct be_mcc_wrb *wrb; 950 946 struct be_cmd_req_pmac_add *req; ··· 960 956 req = embedded_payload(wrb); 961 957 962 958 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 963 - OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL); 959 + OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, 960 + NULL); 964 961 965 962 req->hdr.domain = domain; 966 963 req->if_id = cpu_to_le32(if_id); ··· 1017 1012 1018 1013 /* Uses Mbox */ 1019 1014 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, 1020 - struct be_queue_info *eq, bool no_delay, int coalesce_wm) 1015 + struct be_queue_info *eq, bool no_delay, int coalesce_wm) 1021 1016 { 1022 1017 struct be_mcc_wrb *wrb; 1023 1018 struct be_cmd_req_cq_create *req; ··· 1033 1028 ctxt = &req->context; 1034 1029 1035 1030 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1036 - OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL); 1031 + OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, 1032 + NULL); 1037 1033 1038 1034 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1039 1035 1040 1036 if (BEx_chip(adapter)) { 1041 1037 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, 1042 - coalesce_wm); 1038 + coalesce_wm); 1043 1039 AMAP_SET_BITS(struct amap_cq_context_be, nodelay, 1044 - ctxt, no_delay); 1040 + ctxt, no_delay); 1045 1041 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, 1046 - __ilog2_u32(cq->len/256)); 1042 + __ilog2_u32(cq->len / 256)); 1047 1043 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); 1048 1044 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); 1049 1045 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); ··· 1059 1053 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, 1060 1054 ctxt, coalesce_wm); 1061 1055 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, 1062 - no_delay); 1056 + no_delay); 1063 1057 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, 1064 - __ilog2_u32(cq->len/256)); 1058 + __ilog2_u32(cq->len / 256)); 1065 1059 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); 1066 - AMAP_SET_BITS(struct amap_cq_context_v2, eventable, 1067 - ctxt, 1); 1068 - AMAP_SET_BITS(struct amap_cq_context_v2, eqid, 1069 - ctxt, eq->id); 1060 + AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); 1061 + AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); 1070 1062 } 1071 1063 1072 1064 be_dws_cpu_to_le(ctxt, sizeof(req->context)); ··· 1092 1088 } 1093 1089 1094 1090 static int be_cmd_mccq_ext_create(struct be_adapter *adapter, 1095 - struct be_queue_info *mccq, 1096 - struct be_queue_info *cq) 1091 + struct be_queue_info *mccq, 1092 + struct be_queue_info *cq) 1097 1093 { 1098 1094 struct be_mcc_wrb *wrb; 1099 1095 struct be_cmd_req_mcc_ext_create *req; ··· 1109 1105 ctxt = &req->context; 1110 1106 1111 1107 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1112 - OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL); 1108 + OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, 1109 + NULL); 1113 1110 1114 1111 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1115 1112 if (BEx_chip(adapter)) { 1116 1113 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1117 1114 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1118 - be_encoded_q_len(mccq->len)); 1115 + be_encoded_q_len(mccq->len)); 1119 1116 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1120 1117 } else { 1121 1118 req->hdr.version = 1; ··· 1150 1145 } 1151 1146 1152 1147 static int be_cmd_mccq_org_create(struct be_adapter *adapter, 1153 - struct be_queue_info *mccq, 1154 - struct be_queue_info *cq) 1148 + struct be_queue_info *mccq, 1149 + struct be_queue_info *cq) 1155 1150 { 1156 1151 struct be_mcc_wrb *wrb; 1157 1152 struct be_cmd_req_mcc_create *req; ··· 1167 1162 ctxt = &req->context; 1168 1163 1169 1164 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1170 - OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL); 1165 + OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, 1166 + NULL); 1171 1167 1172 1168 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1173 1169 1174 1170 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1175 1171 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1176 - be_encoded_q_len(mccq->len)); 1172 + be_encoded_q_len(mccq->len)); 1177 1173 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1178 1174 1179 1175 be_dws_cpu_to_le(ctxt, sizeof(req->context)); ··· 1193 1187 } 1194 1188 1195 1189 int be_cmd_mccq_create(struct be_adapter *adapter, 1196 - struct be_queue_info *mccq, 1197 - struct be_queue_info *cq) 1190 + struct be_queue_info *mccq, struct be_queue_info *cq) 1198 1191 { 1199 1192 int status; 1200 1193 ··· 1218 1213 1219 1214 req = embedded_payload(&wrb); 1220 1215 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1221 - OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL); 1216 + OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL); 1222 1217 1223 1218 if (lancer_chip(adapter)) { 1224 1219 req->hdr.version = 1; ··· 1255 1250 1256 1251 /* Uses MCC */ 1257 1252 int be_cmd_rxq_create(struct be_adapter *adapter, 1258 - struct be_queue_info *rxq, u16 cq_id, u16 frag_size, 1259 - u32 if_id, u32 rss, u8 *rss_id) 1253 + struct be_queue_info *rxq, u16 cq_id, u16 frag_size, 1254 + u32 if_id, u32 rss, u8 *rss_id) 1260 1255 { 1261 1256 struct be_mcc_wrb *wrb; 1262 1257 struct be_cmd_req_eth_rx_create *req; ··· 1273 1268 req = embedded_payload(wrb); 1274 1269 1275 1270 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1276 - OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); 1271 + OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); 1277 1272 1278 1273 req->cq_id = cpu_to_le16(cq_id); 1279 1274 req->frag_size = fls(frag_size) - 1; ··· 1300 1295 * Uses Mbox 1301 1296 */ 1302 1297 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 1303 - int queue_type) 1298 + int queue_type) 1304 1299 { 1305 1300 struct be_mcc_wrb *wrb; 1306 1301 struct be_cmd_req_q_destroy *req; ··· 1339 1334 } 1340 1335 1341 1336 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, 1342 - NULL); 1337 + NULL); 1343 1338 req->id = cpu_to_le16(q->id); 1344 1339 1345 1340 status = be_mbox_notify_wait(adapter); ··· 1366 1361 req = embedded_payload(wrb); 1367 1362 1368 1363 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1369 - OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); 1364 + OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); 1370 1365 req->id = cpu_to_le16(q->id); 1371 1366 1372 1367 status = be_mcc_notify_wait(adapter); ··· 1389 1384 1390 1385 req = embedded_payload(&wrb); 1391 1386 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1392 - OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL); 1387 + OPCODE_COMMON_NTWK_INTERFACE_CREATE, 1388 + sizeof(*req), &wrb, NULL); 1393 1389 req->hdr.domain = domain; 1394 1390 req->capability_flags = cpu_to_le32(cap_flags); 1395 1391 req->enable_flags = cpu_to_le32(en_flags); ··· 1428 1422 req = embedded_payload(wrb); 1429 1423 1430 1424 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1431 - OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL); 1425 + OPCODE_COMMON_NTWK_INTERFACE_DESTROY, 1426 + sizeof(*req), wrb, NULL); 1432 1427 req->hdr.domain = domain; 1433 1428 req->interface_id = cpu_to_le32(interface_id); 1434 1429 ··· 1459 1452 hdr = nonemb_cmd->va; 1460 1453 1461 1454 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, 1462 - OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd); 1455 + OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, 1456 + nonemb_cmd); 1463 1457 1464 1458 /* version 1 of the cmd is not supported only by BE2 */ 1465 1459 if (BE2_chip(adapter)) ··· 1480 1472 1481 1473 /* Lancer Stats */ 1482 1474 int lancer_cmd_get_pport_stats(struct be_adapter *adapter, 1483 - struct be_dma_mem *nonemb_cmd) 1475 + struct be_dma_mem *nonemb_cmd) 1484 1476 { 1485 1477 1486 1478 struct be_mcc_wrb *wrb; ··· 1501 1493 req = nonemb_cmd->va; 1502 1494 1503 1495 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1504 - OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb, 1505 - nonemb_cmd); 1496 + OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, 1497 + wrb, nonemb_cmd); 1506 1498 1507 1499 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); 1508 1500 req->cmd_params.params.reset_stats = 0; ··· 1561 1553 req = embedded_payload(wrb); 1562 1554 1563 1555 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1564 - OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL); 1556 + OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, 1557 + sizeof(*req), wrb, NULL); 1565 1558 1566 1559 /* version 1 of the cmd is not supported only by BE2 */ 1567 1560 if (!BE2_chip(adapter)) ··· 1607 1598 req = embedded_payload(wrb); 1608 1599 1609 1600 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1610 - OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req), 1611 - wrb, NULL); 1601 + OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, 1602 + sizeof(*req), wrb, NULL); 1612 1603 1613 1604 be_mcc_notify(adapter); 1614 1605 ··· 1634 1625 req = embedded_payload(wrb); 1635 1626 1636 1627 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1637 - OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL); 1628 + OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, 1629 + NULL); 1638 1630 req->fat_operation = cpu_to_le32(QUERY_FAT); 1639 1631 status = be_mcc_notify_wait(adapter); 1640 1632 if (!status) { ··· 1665 1655 1666 1656 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1667 1657 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, 1668 - get_fat_cmd.size, 1669 - &get_fat_cmd.dma); 1658 + get_fat_cmd.size, 1659 + &get_fat_cmd.dma); 1670 1660 if (!get_fat_cmd.va) { 1671 1661 status = -ENOMEM; 1672 1662 dev_err(&adapter->pdev->dev, ··· 1689 1679 1690 1680 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; 1691 1681 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1692 - OPCODE_COMMON_MANAGE_FAT, payload_len, wrb, 1693 - &get_fat_cmd); 1682 + OPCODE_COMMON_MANAGE_FAT, payload_len, 1683 + wrb, &get_fat_cmd); 1694 1684 1695 1685 req->fat_operation = cpu_to_le32(RETRIEVE_FAT); 1696 1686 req->read_log_offset = cpu_to_le32(log_offset); ··· 1701 1691 if (!status) { 1702 1692 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; 1703 1693 memcpy(buf + offset, 1704 - resp->data_buffer, 1705 - le32_to_cpu(resp->read_log_length)); 1694 + resp->data_buffer, 1695 + le32_to_cpu(resp->read_log_length)); 1706 1696 } else { 1707 1697 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); 1708 1698 goto err; ··· 1712 1702 } 1713 1703 err: 1714 1704 pci_free_consistent(adapter->pdev, get_fat_cmd.size, 1715 - get_fat_cmd.va, 1716 - get_fat_cmd.dma); 1705 + get_fat_cmd.va, get_fat_cmd.dma); 1717 1706 spin_unlock_bh(&adapter->mcc_lock); 1718 1707 } 1719 1708 1720 1709 /* Uses synchronous mcc */ 1721 1710 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, 1722 - char *fw_on_flash) 1711 + char *fw_on_flash) 1723 1712 { 1724 1713 struct be_mcc_wrb *wrb; 1725 1714 struct be_cmd_req_get_fw_version *req; ··· 1735 1726 req = embedded_payload(wrb); 1736 1727 1737 1728 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1738 - OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL); 1729 + OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, 1730 + NULL); 1739 1731 status = be_mcc_notify_wait(adapter); 1740 1732 if (!status) { 1741 1733 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); ··· 1769 1759 req = embedded_payload(wrb); 1770 1760 1771 1761 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1772 - OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL); 1762 + OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, 1763 + NULL); 1773 1764 1774 1765 req->num_eq = cpu_to_le32(num); 1775 1766 for (i = 0; i < num; i++) { ··· 1788 1777 1789 1778 /* Uses sycnhronous mcc */ 1790 1779 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1791 - u32 num, bool promiscuous) 1780 + u32 num) 1792 1781 { 1793 1782 struct be_mcc_wrb *wrb; 1794 1783 struct be_cmd_req_vlan_config *req; ··· 1804 1793 req = embedded_payload(wrb); 1805 1794 1806 1795 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1807 - OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL); 1796 + OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), 1797 + wrb, NULL); 1808 1798 1809 1799 req->interface_id = if_id; 1810 - req->promiscuous = promiscuous; 1811 1800 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; 1812 1801 req->num_vlan = num; 1813 - if (!promiscuous) { 1814 - memcpy(req->normal_vlan, vtag_array, 1815 - req->num_vlan * sizeof(vtag_array[0])); 1816 - } 1802 + memcpy(req->normal_vlan, vtag_array, 1803 + req->num_vlan * sizeof(vtag_array[0])); 1817 1804 1818 1805 status = be_mcc_notify_wait(adapter); 1819 - 1820 1806 err: 1821 1807 spin_unlock_bh(&adapter->mcc_lock); 1822 1808 return status; ··· 1835 1827 } 1836 1828 memset(req, 0, sizeof(*req)); 1837 1829 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1838 - OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), 1839 - wrb, mem); 1830 + OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), 1831 + wrb, mem); 1840 1832 1841 1833 req->if_id = cpu_to_le32(adapter->if_handle); 1842 1834 if (flags & IFF_PROMISC) { 1843 1835 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1844 - BE_IF_FLAGS_VLAN_PROMISCUOUS | 1845 - BE_IF_FLAGS_MCAST_PROMISCUOUS); 1836 + BE_IF_FLAGS_VLAN_PROMISCUOUS | 1837 + BE_IF_FLAGS_MCAST_PROMISCUOUS); 1846 1838 if (value == ON) 1847 - req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1848 - BE_IF_FLAGS_VLAN_PROMISCUOUS | 1849 - BE_IF_FLAGS_MCAST_PROMISCUOUS); 1839 + req->if_flags = 1840 + cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1841 + BE_IF_FLAGS_VLAN_PROMISCUOUS | 1842 + BE_IF_FLAGS_MCAST_PROMISCUOUS); 1850 1843 } else if (flags & IFF_ALLMULTI) { 1851 1844 req->if_flags_mask = req->if_flags = 1852 1845 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); ··· 1876 1867 } 1877 1868 1878 1869 if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) != 1879 - req->if_flags_mask) { 1870 + req->if_flags_mask) { 1880 1871 dev_warn(&adapter->pdev->dev, 1881 1872 "Cannot set rx filter flags 0x%x\n", 1882 1873 req->if_flags_mask); ··· 1914 1905 req = embedded_payload(wrb); 1915 1906 1916 1907 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1917 - OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL); 1908 + OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), 1909 + wrb, NULL); 1918 1910 1919 1911 req->tx_flow_control = cpu_to_le16((u16)tx_fc); 1920 1912 req->rx_flow_control = cpu_to_le16((u16)rx_fc); ··· 1948 1938 req = embedded_payload(wrb); 1949 1939 1950 1940 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1951 - OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL); 1941 + OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), 1942 + wrb, NULL); 1952 1943 1953 1944 status = be_mcc_notify_wait(adapter); 1954 1945 if (!status) { ··· 1979 1968 req = embedded_payload(wrb); 1980 1969 1981 1970 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1982 - OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL); 1971 + OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, 1972 + sizeof(*req), wrb, NULL); 1983 1973 1984 1974 status = be_mbox_notify_wait(adapter); 1985 1975 if (!status) { ··· 2023 2011 req = embedded_payload(wrb); 2024 2012 2025 2013 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, 2026 - OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL); 2014 + OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, 2015 + NULL); 2027 2016 2028 2017 status = be_mbox_notify_wait(adapter); 2029 2018 ··· 2033 2020 } 2034 2021 2035 2022 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 2036 - u32 rss_hash_opts, u16 table_size, u8 *rss_hkey) 2023 + u32 rss_hash_opts, u16 table_size, u8 *rss_hkey) 2037 2024 { 2038 2025 struct be_mcc_wrb *wrb; 2039 2026 struct be_cmd_req_rss_config *req; ··· 2042 2029 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) 2043 2030 return 0; 2044 2031 2045 - if (mutex_lock_interruptible(&adapter->mbox_lock)) 2046 - return -1; 2032 + spin_lock_bh(&adapter->mcc_lock); 2047 2033 2048 - wrb = wrb_from_mbox(adapter); 2034 + wrb = wrb_from_mccq(adapter); 2035 + if (!wrb) { 2036 + status = -EBUSY; 2037 + goto err; 2038 + } 2049 2039 req = embedded_payload(wrb); 2050 2040 2051 2041 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2052 - OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); 2042 + OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); 2053 2043 2054 2044 req->if_id = cpu_to_le32(adapter->if_handle); 2055 2045 req->enable_rss = cpu_to_le16(rss_hash_opts); 2056 2046 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); 2057 2047 2058 - if (lancer_chip(adapter) || skyhawk_chip(adapter)) 2048 + if (!BEx_chip(adapter)) 2059 2049 req->hdr.version = 1; 2060 2050 2061 2051 memcpy(req->cpu_table, rsstable, table_size); 2062 2052 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN); 2063 2053 be_dws_cpu_to_le(req->hash, sizeof(req->hash)); 2064 2054 2065 - status = be_mbox_notify_wait(adapter); 2066 - 2067 - mutex_unlock(&adapter->mbox_lock); 2055 + status = be_mcc_notify_wait(adapter); 2056 + err: 2057 + spin_unlock_bh(&adapter->mcc_lock); 2068 2058 return status; 2069 2059 } 2070 2060 2071 2061 /* Uses sync mcc */ 2072 2062 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, 2073 - u8 bcn, u8 sts, u8 state) 2063 + u8 bcn, u8 sts, u8 state) 2074 2064 { 2075 2065 struct be_mcc_wrb *wrb; 2076 2066 struct be_cmd_req_enable_disable_beacon *req; ··· 2089 2073 req = embedded_payload(wrb); 2090 2074 2091 2075 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2092 - OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL); 2076 + OPCODE_COMMON_ENABLE_DISABLE_BEACON, 2077 + sizeof(*req), wrb, NULL); 2093 2078 2094 2079 req->port_num = port_num; 2095 2080 req->beacon_state = state; ··· 2121 2104 req = embedded_payload(wrb); 2122 2105 2123 2106 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2124 - OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL); 2107 + OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), 2108 + wrb, NULL); 2125 2109 2126 2110 req->port_num = port_num; 2127 2111 ··· 2161 2143 req = embedded_payload(wrb); 2162 2144 2163 2145 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2164 - OPCODE_COMMON_WRITE_OBJECT, 2165 - sizeof(struct lancer_cmd_req_write_object), wrb, 2166 - NULL); 2146 + OPCODE_COMMON_WRITE_OBJECT, 2147 + sizeof(struct lancer_cmd_req_write_object), wrb, 2148 + NULL); 2167 2149 2168 2150 ctxt = &req->context; 2169 2151 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2170 - write_length, ctxt, data_size); 2152 + write_length, ctxt, data_size); 2171 2153 2172 2154 if (data_size == 0) 2173 2155 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2174 - eof, ctxt, 1); 2156 + eof, ctxt, 1); 2175 2157 else 2176 2158 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2177 - eof, ctxt, 0); 2159 + eof, ctxt, 0); 2178 2160 2179 2161 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 2180 2162 req->write_offset = cpu_to_le32(data_offset); ··· 2182 2164 req->descriptor_count = cpu_to_le32(1); 2183 2165 req->buf_len = cpu_to_le32(data_size); 2184 2166 req->addr_low = cpu_to_le32((cmd->dma + 2185 - sizeof(struct lancer_cmd_req_write_object)) 2186 - & 0xFFFFFFFF); 2167 + sizeof(struct lancer_cmd_req_write_object)) 2168 + & 0xFFFFFFFF); 2187 2169 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + 2188 2170 sizeof(struct lancer_cmd_req_write_object))); 2189 2171 ··· 2212 2194 } 2213 2195 2214 2196 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2215 - u32 data_size, u32 data_offset, const char *obj_name, 2216 - u32 *data_read, u32 *eof, u8 *addn_status) 2197 + u32 data_size, u32 data_offset, const char *obj_name, 2198 + u32 *data_read, u32 *eof, u8 *addn_status) 2217 2199 { 2218 2200 struct be_mcc_wrb *wrb; 2219 2201 struct lancer_cmd_req_read_object *req; ··· 2231 2213 req = embedded_payload(wrb); 2232 2214 2233 2215 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2234 - OPCODE_COMMON_READ_OBJECT, 2235 - sizeof(struct lancer_cmd_req_read_object), wrb, 2236 - NULL); 2216 + OPCODE_COMMON_READ_OBJECT, 2217 + sizeof(struct lancer_cmd_req_read_object), wrb, 2218 + NULL); 2237 2219 2238 2220 req->desired_read_len = cpu_to_le32(data_size); 2239 2221 req->read_offset = cpu_to_le32(data_offset); ··· 2259 2241 } 2260 2242 2261 2243 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 2262 - u32 flash_type, u32 flash_opcode, u32 buf_size) 2244 + u32 flash_type, u32 flash_opcode, u32 buf_size) 2263 2245 { 2264 2246 struct be_mcc_wrb *wrb; 2265 2247 struct be_cmd_write_flashrom *req; ··· 2276 2258 req = cmd->va; 2277 2259 2278 2260 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2279 - OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd); 2261 + OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, 2262 + cmd); 2280 2263 2281 2264 req->params.op_type = cpu_to_le32(flash_type); 2282 2265 req->params.op_code = cpu_to_le32(flash_opcode); ··· 2334 2315 } 2335 2316 2336 2317 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 2337 - struct be_dma_mem *nonemb_cmd) 2318 + struct be_dma_mem *nonemb_cmd) 2338 2319 { 2339 2320 struct be_mcc_wrb *wrb; 2340 2321 struct be_cmd_req_acpi_wol_magic_config *req; ··· 2350 2331 req = nonemb_cmd->va; 2351 2332 2352 2333 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2353 - OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb, 2354 - nonemb_cmd); 2334 + OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), 2335 + wrb, nonemb_cmd); 2355 2336 memcpy(req->magic_mac, mac, ETH_ALEN); 2356 2337 2357 2338 status = be_mcc_notify_wait(adapter); ··· 2379 2360 req = embedded_payload(wrb); 2380 2361 2381 2362 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2382 - OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb, 2383 - NULL); 2363 + OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), 2364 + wrb, NULL); 2384 2365 2385 2366 req->src_port = port_num; 2386 2367 req->dest_port = port_num; ··· 2394 2375 } 2395 2376 2396 2377 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 2397 - u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) 2378 + u32 loopback_type, u32 pkt_size, u32 num_pkts, 2379 + u64 pattern) 2398 2380 { 2399 2381 struct be_mcc_wrb *wrb; 2400 2382 struct be_cmd_req_loopback_test *req; ··· 2413 2393 req = embedded_payload(wrb); 2414 2394 2415 2395 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2416 - OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); 2396 + OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, 2397 + NULL); 2417 2398 2418 2399 req->hdr.timeout = cpu_to_le32(15); 2419 2400 req->pattern = cpu_to_le64(pattern); ··· 2439 2418 } 2440 2419 2441 2420 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 2442 - u32 byte_cnt, struct be_dma_mem *cmd) 2421 + u32 byte_cnt, struct be_dma_mem *cmd) 2443 2422 { 2444 2423 struct be_mcc_wrb *wrb; 2445 2424 struct be_cmd_req_ddrdma_test *req; ··· 2455 2434 } 2456 2435 req = cmd->va; 2457 2436 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2458 - OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd); 2437 + OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, 2438 + cmd); 2459 2439 2460 2440 req->pattern = cpu_to_le64(pattern); 2461 2441 req->byte_count = cpu_to_le32(byte_cnt); ··· 2484 2462 } 2485 2463 2486 2464 int be_cmd_get_seeprom_data(struct be_adapter *adapter, 2487 - struct be_dma_mem *nonemb_cmd) 2465 + struct be_dma_mem *nonemb_cmd) 2488 2466 { 2489 2467 struct be_mcc_wrb *wrb; 2490 2468 struct be_cmd_req_seeprom_read *req; ··· 2500 2478 req = nonemb_cmd->va; 2501 2479 2502 2480 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2503 - OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, 2504 - nonemb_cmd); 2481 + OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, 2482 + nonemb_cmd); 2505 2483 2506 2484 status = be_mcc_notify_wait(adapter); 2507 2485 ··· 2529 2507 goto err; 2530 2508 } 2531 2509 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 2532 - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 2533 - &cmd.dma); 2510 + cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 2534 2511 if (!cmd.va) { 2535 2512 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 2536 2513 status = -ENOMEM; ··· 2539 2518 req = cmd.va; 2540 2519 2541 2520 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2542 - OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), 2543 - wrb, &cmd); 2521 + OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), 2522 + wrb, &cmd); 2544 2523 2545 2524 status = be_mcc_notify_wait(adapter); 2546 2525 if (!status) { ··· 2562 2541 BE_SUPPORTED_SPEED_1GBPS; 2563 2542 } 2564 2543 } 2565 - pci_free_consistent(adapter->pdev, cmd.size, 2566 - cmd.va, cmd.dma); 2544 + pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2567 2545 err: 2568 2546 spin_unlock_bh(&adapter->mcc_lock); 2569 2547 return status; ··· 2585 2565 req = embedded_payload(wrb); 2586 2566 2587 2567 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2588 - OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); 2568 + OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); 2589 2569 2590 2570 req->hdr.domain = domain; 2591 2571 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); ··· 2614 2594 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 2615 2595 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 2616 2596 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, 2617 - &attribs_cmd.dma); 2597 + &attribs_cmd.dma); 2618 2598 if (!attribs_cmd.va) { 2619 - dev_err(&adapter->pdev->dev, 2620 - "Memory allocation failure\n"); 2599 + dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 2621 2600 status = -ENOMEM; 2622 2601 goto err; 2623 2602 } ··· 2629 2610 req = attribs_cmd.va; 2630 2611 2631 2612 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2632 - OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb, 2633 - &attribs_cmd); 2613 + OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, 2614 + wrb, &attribs_cmd); 2634 2615 2635 2616 status = be_mbox_notify_wait(adapter); 2636 2617 if (!status) { ··· 2665 2646 req = embedded_payload(wrb); 2666 2647 2667 2648 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2668 - OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL); 2649 + OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, 2650 + sizeof(*req), wrb, NULL); 2669 2651 2670 2652 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | 2671 2653 CAPABILITY_BE3_NATIVE_ERX_API); ··· 2779 2759 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 2780 2760 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 2781 2761 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, 2782 - get_mac_list_cmd.size, 2783 - &get_mac_list_cmd.dma); 2762 + get_mac_list_cmd.size, 2763 + &get_mac_list_cmd.dma); 2784 2764 2785 2765 if (!get_mac_list_cmd.va) { 2786 2766 dev_err(&adapter->pdev->dev, 2787 - "Memory allocation failure during GET_MAC_LIST\n"); 2767 + "Memory allocation failure during GET_MAC_LIST\n"); 2788 2768 return -ENOMEM; 2789 2769 } 2790 2770 ··· 2848 2828 /* If no active mac_id found, return first mac addr */ 2849 2829 *pmac_id_valid = false; 2850 2830 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, 2851 - ETH_ALEN); 2831 + ETH_ALEN); 2852 2832 } 2853 2833 2854 2834 out: 2855 2835 spin_unlock_bh(&adapter->mcc_lock); 2856 2836 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, 2857 - get_mac_list_cmd.va, get_mac_list_cmd.dma); 2837 + get_mac_list_cmd.va, get_mac_list_cmd.dma); 2858 2838 return status; 2859 2839 } 2860 2840 2861 - int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac, 2862 - u32 if_handle, bool active, u32 domain) 2841 + int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, 2842 + u8 *mac, u32 if_handle, bool active, u32 domain) 2863 2843 { 2864 2844 2865 2845 if (!active) ··· 2909 2889 memset(&cmd, 0, sizeof(struct be_dma_mem)); 2910 2890 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 2911 2891 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, 2912 - &cmd.dma, GFP_KERNEL); 2892 + &cmd.dma, GFP_KERNEL); 2913 2893 if (!cmd.va) 2914 2894 return -ENOMEM; 2915 2895 ··· 2923 2903 2924 2904 req = cmd.va; 2925 2905 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2926 - OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), 2927 - wrb, &cmd); 2906 + OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), 2907 + wrb, &cmd); 2928 2908 2929 2909 req->hdr.domain = domain; 2930 2910 req->mac_count = mac_count; ··· 2934 2914 status = be_mcc_notify_wait(adapter); 2935 2915 2936 2916 err: 2937 - dma_free_coherent(&adapter->pdev->dev, cmd.size, 2938 - cmd.va, cmd.dma); 2917 + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); 2939 2918 spin_unlock_bh(&adapter->mcc_lock); 2940 2919 return status; 2941 2920 } ··· 2979 2960 ctxt = &req->context; 2980 2961 2981 2962 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2982 - OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL); 2963 + OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, 2964 + NULL); 2983 2965 2984 2966 req->hdr.domain = domain; 2985 2967 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); ··· 3026 3006 ctxt = &req->context; 3027 3007 3028 3008 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3029 - OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL); 3009 + OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, 3010 + NULL); 3030 3011 3031 3012 req->hdr.domain = domain; 3032 3013 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ··· 3045 3024 if (!status) { 3046 3025 struct be_cmd_resp_get_hsw_config *resp = 3047 3026 embedded_payload(wrb); 3048 - be_dws_le_to_cpu(&resp->context, 3049 - sizeof(resp->context)); 3027 + be_dws_le_to_cpu(&resp->context, sizeof(resp->context)); 3050 3028 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3051 - pvid, &resp->context); 3029 + pvid, &resp->context); 3052 3030 if (pvid) 3053 3031 *pvid = le16_to_cpu(vid); 3054 3032 if (mode) ··· 3079 3059 3080 3060 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3081 3061 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 3082 - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 3083 - &cmd.dma); 3062 + cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3084 3063 if (!cmd.va) { 3085 - dev_err(&adapter->pdev->dev, 3086 - "Memory allocation failure\n"); 3064 + dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 3087 3065 status = -ENOMEM; 3088 3066 goto err; 3089 3067 } ··· 3364 3346 3365 3347 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3366 3348 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 3367 - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 3368 - &cmd.dma); 3349 + cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 3369 3350 if (!cmd.va) { 3370 3351 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3371 3352 status = -ENOMEM; ··· 3410 3393 3411 3394 /* Uses mbox */ 3412 3395 static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter, 3413 - u8 domain, struct be_dma_mem *cmd) 3396 + u8 domain, struct be_dma_mem *cmd) 3414 3397 { 3415 3398 struct be_mcc_wrb *wrb; 3416 3399 struct be_cmd_req_get_profile_config *req; ··· 3438 3421 3439 3422 /* Uses sync mcc */ 3440 3423 static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter, 3441 - u8 domain, struct be_dma_mem *cmd) 3424 + u8 domain, struct be_dma_mem *cmd) 3442 3425 { 3443 3426 struct be_mcc_wrb *wrb; 3444 3427 struct be_cmd_req_get_profile_config *req; ··· 3498 3481 resp = cmd.va; 3499 3482 desc_count = le32_to_cpu(resp->desc_count); 3500 3483 3501 - pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, 3502 - desc_count); 3484 + pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, 3485 + desc_count); 3503 3486 if (pcie) 3504 3487 res->max_vfs = le16_to_cpu(pcie->num_vfs); 3505 3488 ··· 3873 3856 } 3874 3857 3875 3858 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 3876 - int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 3859 + int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 3877 3860 { 3878 3861 struct be_adapter *adapter = netdev_priv(netdev_handle); 3879 3862 struct be_mcc_wrb *wrb;
+1 -1
drivers/net/ethernet/emulex/benet/be_cmds.h
··· 2060 2060 char *fw_on_flash); 2061 2061 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); 2062 2062 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 2063 - u32 num, bool promiscuous); 2063 + u32 num); 2064 2064 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); 2065 2065 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); 2066 2066 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
+43 -58
drivers/net/ethernet/emulex/benet/be_ethtool.c
··· 132 132 {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */ 133 133 {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */ 134 134 {DRVSTAT_RX_INFO(rx_compl)}, 135 + {DRVSTAT_RX_INFO(rx_compl_err)}, 135 136 {DRVSTAT_RX_INFO(rx_mcast_pkts)}, 136 137 /* Number of page allocation failures while posting receive buffers 137 138 * to HW. ··· 182 181 #define BE_NO_LOOPBACK 0xff 183 182 184 183 static void be_get_drvinfo(struct net_device *netdev, 185 - struct ethtool_drvinfo *drvinfo) 184 + struct ethtool_drvinfo *drvinfo) 186 185 { 187 186 struct be_adapter *adapter = netdev_priv(netdev); 188 187 ··· 202 201 drvinfo->eedump_len = 0; 203 202 } 204 203 205 - static u32 206 - lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name) 204 + static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name) 207 205 { 208 206 u32 data_read = 0, eof; 209 207 u8 addn_status; ··· 212 212 memset(&data_len_cmd, 0, sizeof(data_len_cmd)); 213 213 /* data_offset and data_size should be 0 to get reg len */ 214 214 status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, 215 - file_name, &data_read, &eof, &addn_status); 215 + file_name, &data_read, &eof, 216 + &addn_status); 216 217 217 218 return data_read; 218 219 } 219 220 220 - static int 221 - lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, 222 - u32 buf_len, void *buf) 221 + static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, 222 + u32 buf_len, void *buf) 223 223 { 224 224 struct be_dma_mem read_cmd; 225 225 u32 read_len = 0, total_read_len = 0, chunk_size; ··· 229 229 230 230 read_cmd.size = LANCER_READ_FILE_CHUNK; 231 231 read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, 232 - &read_cmd.dma); 232 + &read_cmd.dma); 233 233 234 234 if (!read_cmd.va) { 235 235 dev_err(&adapter->pdev->dev, 236 - "Memory allocation failure while reading dump\n"); 236 + "Memory allocation failure while reading dump\n"); 237 237 return -ENOMEM; 238 238 } 239 239 ··· 242 242 LANCER_READ_FILE_CHUNK); 243 243 chunk_size = ALIGN(chunk_size, 4); 244 244 status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size, 245 - total_read_len, file_name, &read_len, 246 - &eof, &addn_status); 245 + total_read_len, file_name, 246 + &read_len, &eof, &addn_status); 247 247 if (!status) { 248 248 memcpy(buf + total_read_len, read_cmd.va, read_len); 249 249 total_read_len += read_len; ··· 254 254 } 255 255 } 256 256 pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, 257 - read_cmd.dma); 257 + read_cmd.dma); 258 258 259 259 return status; 260 260 } 261 261 262 - static int 263 - be_get_reg_len(struct net_device *netdev) 262 + static int be_get_reg_len(struct net_device *netdev) 264 263 { 265 264 struct be_adapter *adapter = netdev_priv(netdev); 266 265 u32 log_size = 0; ··· 270 271 if (be_physfn(adapter)) { 271 272 if (lancer_chip(adapter)) 272 273 log_size = lancer_cmd_get_file_len(adapter, 273 - LANCER_FW_DUMP_FILE); 274 + LANCER_FW_DUMP_FILE); 274 275 else 275 276 be_cmd_get_reg_len(adapter, &log_size); 276 277 } ··· 286 287 memset(buf, 0, regs->len); 287 288 if (lancer_chip(adapter)) 288 289 lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE, 289 - regs->len, buf); 290 + regs->len, buf); 290 291 else 291 292 be_cmd_get_regs(adapter, regs->len, buf); 292 293 } ··· 336 337 return 0; 337 338 } 338 339 339 - static void 340 - be_get_ethtool_stats(struct net_device *netdev, 341 - struct ethtool_stats *stats, uint64_t *data) 340 + static void be_get_ethtool_stats(struct net_device *netdev, 341 + struct ethtool_stats *stats, uint64_t *data) 342 342 { 343 343 struct be_adapter *adapter = netdev_priv(netdev); 344 344 struct be_rx_obj *rxo; ··· 388 390 } 389 391 } 390 392 391 - static void 392 - be_get_stat_strings(struct net_device *netdev, uint32_t stringset, 393 - uint8_t *data) 393 + static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset, 394 + uint8_t *data) 394 395 { 395 396 struct be_adapter *adapter = netdev_priv(netdev); 396 397 int i, j; ··· 639 642 adapter->rx_fc = ecmd->rx_pause; 640 643 641 644 status = be_cmd_set_flow_control(adapter, 642 - adapter->tx_fc, adapter->rx_fc); 645 + adapter->tx_fc, adapter->rx_fc); 643 646 if (status) 644 647 dev_warn(&adapter->pdev->dev, "Pause param set failed.\n"); 645 648 646 649 return status; 647 650 } 648 651 649 - static int 650 - be_set_phys_id(struct net_device *netdev, 651 - enum ethtool_phys_id_state state) 652 + static int be_set_phys_id(struct net_device *netdev, 653 + enum ethtool_phys_id_state state) 652 654 { 653 655 struct be_adapter *adapter = netdev_priv(netdev); 654 656 ··· 704 708 return status; 705 709 } 706 710 707 - static void 708 - be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 711 + static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 709 712 { 710 713 struct be_adapter *adapter = netdev_priv(netdev); 711 714 ··· 718 723 memset(&wol->sopass, 0, sizeof(wol->sopass)); 719 724 } 720 725 721 - static int 722 - be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 726 + static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 723 727 { 724 728 struct be_adapter *adapter = netdev_priv(netdev); 725 729 ··· 738 744 return 0; 739 745 } 740 746 741 - static int 742 - be_test_ddr_dma(struct be_adapter *adapter) 747 + static int be_test_ddr_dma(struct be_adapter *adapter) 743 748 { 744 749 int ret, i; 745 750 struct be_dma_mem ddrdma_cmd; ··· 754 761 755 762 for (i = 0; i < 2; i++) { 756 763 ret = be_cmd_ddr_dma_test(adapter, pattern[i], 757 - 4096, &ddrdma_cmd); 764 + 4096, &ddrdma_cmd); 758 765 if (ret != 0) 759 766 goto err; 760 767 } ··· 766 773 } 767 774 768 775 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, 769 - u64 *status) 776 + u64 *status) 770 777 { 771 - be_cmd_set_loopback(adapter, adapter->hba_port_num, 772 - loopback_type, 1); 778 + be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1); 773 779 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num, 774 - loopback_type, 1500, 775 - 2, 0xabc); 776 - be_cmd_set_loopback(adapter, adapter->hba_port_num, 777 - BE_NO_LOOPBACK, 1); 780 + loopback_type, 1500, 2, 0xabc); 781 + be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1); 778 782 return *status; 779 783 } 780 784 781 - static void 782 - be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) 785 + static void be_self_test(struct net_device *netdev, struct ethtool_test *test, 786 + u64 *data) 783 787 { 784 788 struct be_adapter *adapter = netdev_priv(netdev); 785 789 int status; ··· 791 801 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); 792 802 793 803 if (test->flags & ETH_TEST_FL_OFFLINE) { 794 - if (be_loopback_test(adapter, BE_MAC_LOOPBACK, 795 - &data[0]) != 0) 804 + if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0) 796 805 test->flags |= ETH_TEST_FL_FAILED; 797 806 798 - if (be_loopback_test(adapter, BE_PHY_LOOPBACK, 799 - &data[1]) != 0) 807 + if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0) 800 808 test->flags |= ETH_TEST_FL_FAILED; 801 809 802 810 if (test->flags & ETH_TEST_FL_EXTERNAL_LB) { ··· 820 832 } 821 833 } 822 834 823 - static int 824 - be_do_flash(struct net_device *netdev, struct ethtool_flash *efl) 835 + static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl) 825 836 { 826 837 struct be_adapter *adapter = netdev_priv(netdev); 827 838 828 839 return be_load_fw(adapter, efl->data); 829 840 } 830 841 831 - static int 832 - be_get_eeprom_len(struct net_device *netdev) 842 + static int be_get_eeprom_len(struct net_device *netdev) 833 843 { 834 844 struct be_adapter *adapter = netdev_priv(netdev); 835 845 ··· 837 851 if (lancer_chip(adapter)) { 838 852 if (be_physfn(adapter)) 839 853 return lancer_cmd_get_file_len(adapter, 840 - LANCER_VPD_PF_FILE); 854 + LANCER_VPD_PF_FILE); 841 855 else 842 856 return lancer_cmd_get_file_len(adapter, 843 - LANCER_VPD_VF_FILE); 857 + LANCER_VPD_VF_FILE); 844 858 } else { 845 859 return BE_READ_SEEPROM_LEN; 846 860 } 847 861 } 848 862 849 - static int 850 - be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 851 - uint8_t *data) 863 + static int be_read_eeprom(struct net_device *netdev, 864 + struct ethtool_eeprom *eeprom, uint8_t *data) 852 865 { 853 866 struct be_adapter *adapter = netdev_priv(netdev); 854 867 struct be_dma_mem eeprom_cmd; ··· 860 875 if (lancer_chip(adapter)) { 861 876 if (be_physfn(adapter)) 862 877 return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE, 863 - eeprom->len, data); 878 + eeprom->len, data); 864 879 else 865 880 return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE, 866 - eeprom->len, data); 881 + eeprom->len, data); 867 882 } 868 883 869 884 eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16); ··· 947 962 } 948 963 949 964 static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 950 - u32 *rule_locs) 965 + u32 *rule_locs) 951 966 { 952 967 struct be_adapter *adapter = netdev_priv(netdev); 953 968
+110 -114
drivers/net/ethernet/emulex/benet/be_main.c
··· 134 134 } 135 135 136 136 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, 137 - u16 len, u16 entry_size) 137 + u16 len, u16 entry_size) 138 138 { 139 139 struct be_dma_mem *mem = &q->dma_mem; 140 140 ··· 154 154 u32 reg, enabled; 155 155 156 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, 157 - &reg); 157 + &reg); 158 158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 159 159 160 160 if (!enabled && enable) ··· 165 165 return; 166 166 167 167 pci_write_config_dword(adapter->pdev, 168 - PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg); 168 + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg); 169 169 } 170 170 171 171 static void be_intr_set(struct be_adapter *adapter, bool enable) ··· 206 206 } 207 207 208 208 static void be_eq_notify(struct be_adapter *adapter, u16 qid, 209 - bool arm, bool clear_int, u16 num_popped) 209 + bool arm, bool clear_int, u16 num_popped) 210 210 { 211 211 u32 val = 0; 212 212 val |= qid & DB_EQ_RING_ID_MASK; 213 - val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << 214 - DB_EQ_RING_ID_EXT_MASK_SHIFT); 213 + val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT); 215 214 216 215 if (adapter->eeh_error) 217 216 return; ··· 476 477 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; 477 478 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags; 478 479 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; 479 - if (be_roce_supported(adapter)) { 480 + if (be_roce_supported(adapter)) { 480 481 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd; 481 482 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd; 482 483 drvs->rx_roce_frames = port_stats->roce_frames_received; ··· 490 491 { 491 492 492 493 struct be_drv_stats *drvs = &adapter->drv_stats; 493 - struct lancer_pport_stats *pport_stats = 494 - pport_stats_from_cmd(adapter); 494 + struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter); 495 495 496 496 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats)); 497 497 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo; ··· 537 539 } 538 540 539 541 static void populate_erx_stats(struct be_adapter *adapter, 540 - struct be_rx_obj *rxo, 541 - u32 erx_stat) 542 + struct be_rx_obj *rxo, u32 erx_stat) 542 543 { 543 544 if (!BEx_chip(adapter)) 544 545 rx_stats(rxo)->rx_drops_no_frags = erx_stat; ··· 576 579 } 577 580 578 581 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, 579 - struct rtnl_link_stats64 *stats) 582 + struct rtnl_link_stats64 *stats) 580 583 { 581 584 struct be_adapter *adapter = netdev_priv(netdev); 582 585 struct be_drv_stats *drvs = &adapter->drv_stats; ··· 657 660 } 658 661 659 662 static void be_tx_stats_update(struct be_tx_obj *txo, 660 - u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped) 663 + u32 wrb_cnt, u32 copied, u32 gso_segs, 664 + bool stopped) 661 665 { 662 666 struct be_tx_stats *stats = tx_stats(txo); 663 667 ··· 674 676 675 677 /* Determine number of WRB entries needed to xmit data in an skb */ 676 678 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb, 677 - bool *dummy) 679 + bool *dummy) 678 680 { 679 681 int cnt = (skb->len > skb->data_len); 680 682 ··· 702 704 } 703 705 704 706 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, 705 - struct sk_buff *skb) 707 + struct sk_buff *skb) 706 708 { 707 709 u8 vlan_prio; 708 710 u16 vlan_tag; ··· 731 733 } 732 734 733 735 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, 734 - struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan) 736 + struct sk_buff *skb, u32 wrb_cnt, u32 len, 737 + bool skip_hw_vlan) 735 738 { 736 739 u16 vlan_tag, proto; 737 740 ··· 773 774 } 774 775 775 776 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, 776 - bool unmap_single) 777 + bool unmap_single) 777 778 { 778 779 dma_addr_t dma; 779 780 ··· 790 791 } 791 792 792 793 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, 793 - struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb, 794 - bool skip_hw_vlan) 794 + struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb, 795 + bool skip_hw_vlan) 795 796 { 796 797 dma_addr_t busaddr; 797 798 int i, copied = 0; ··· 820 821 } 821 822 822 823 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 823 - const struct skb_frag_struct *frag = 824 - &skb_shinfo(skb)->frags[i]; 824 + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 825 825 busaddr = skb_frag_dma_map(dev, frag, 0, 826 826 skb_frag_size(frag), DMA_TO_DEVICE); 827 827 if (dma_mapping_error(dev, busaddr)) ··· 925 927 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid; 926 928 } 927 929 928 - static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, 929 - struct sk_buff *skb) 930 + static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) 930 931 { 931 932 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); 932 933 } ··· 956 959 */ 957 960 if (be_pvid_tagging_enabled(adapter) && 958 961 veh->h_vlan_proto == htons(ETH_P_8021Q)) 959 - *skip_hw_vlan = true; 962 + *skip_hw_vlan = true; 960 963 961 964 /* HW has a bug wherein it will calculate CSUM for VLAN 962 965 * pkts even though it is disabled. ··· 1074 1077 { 1075 1078 struct be_adapter *adapter = netdev_priv(netdev); 1076 1079 if (new_mtu < BE_MIN_MTU || 1077 - new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - 1078 - (ETH_HLEN + ETH_FCS_LEN))) { 1080 + new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) { 1079 1081 dev_info(&adapter->pdev->dev, 1080 - "MTU must be between %d and %d bytes\n", 1081 - BE_MIN_MTU, 1082 - (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))); 1082 + "MTU must be between %d and %d bytes\n", 1083 + BE_MIN_MTU, 1084 + (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))); 1083 1085 return -EINVAL; 1084 1086 } 1085 1087 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", 1086 - netdev->mtu, new_mtu); 1088 + netdev->mtu, new_mtu); 1087 1089 netdev->mtu = new_mtu; 1088 1090 return 0; 1089 1091 } ··· 1094 1098 static int be_vid_config(struct be_adapter *adapter) 1095 1099 { 1096 1100 u16 vids[BE_NUM_VLANS_SUPPORTED]; 1097 - u16 num = 0, i; 1101 + u16 num = 0, i = 0; 1098 1102 int status = 0; 1099 1103 1100 1104 /* No need to further configure vids if in promiscuous mode */ ··· 1105 1109 goto set_vlan_promisc; 1106 1110 1107 1111 /* Construct VLAN Table to give to HW */ 1108 - for (i = 0; i < VLAN_N_VID; i++) 1109 - if (adapter->vlan_tag[i]) 1110 - vids[num++] = cpu_to_le16(i); 1112 + for_each_set_bit(i, adapter->vids, VLAN_N_VID) 1113 + vids[num++] = cpu_to_le16(i); 1111 1114 1112 - status = be_cmd_vlan_config(adapter, adapter->if_handle, 1113 - vids, num, 0); 1114 - 1115 + status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); 1115 1116 if (status) { 1116 1117 /* Set to VLAN promisc mode as setting VLAN filter failed */ 1117 1118 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES) ··· 1153 1160 if (lancer_chip(adapter) && vid == 0) 1154 1161 return status; 1155 1162 1156 - if (adapter->vlan_tag[vid]) 1163 + if (test_bit(vid, adapter->vids)) 1157 1164 return status; 1158 1165 1159 - adapter->vlan_tag[vid] = 1; 1166 + set_bit(vid, adapter->vids); 1160 1167 adapter->vlans_added++; 1161 1168 1162 1169 status = be_vid_config(adapter); 1163 1170 if (status) { 1164 1171 adapter->vlans_added--; 1165 - adapter->vlan_tag[vid] = 0; 1172 + clear_bit(vid, adapter->vids); 1166 1173 } 1167 1174 1168 1175 return status; ··· 1177 1184 if (lancer_chip(adapter) && vid == 0) 1178 1185 goto ret; 1179 1186 1180 - adapter->vlan_tag[vid] = 0; 1187 + clear_bit(vid, adapter->vids); 1181 1188 status = be_vid_config(adapter); 1182 1189 if (!status) 1183 1190 adapter->vlans_added--; 1184 1191 else 1185 - adapter->vlan_tag[vid] = 1; 1192 + set_bit(vid, adapter->vids); 1186 1193 ret: 1187 1194 return status; 1188 1195 } ··· 1247 1254 1248 1255 /* Set to MCAST promisc mode if setting MULTICAST address fails */ 1249 1256 if (status) { 1250 - dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n"); 1251 - dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n"); 1257 + dev_info(&adapter->pdev->dev, 1258 + "Exhausted multicast HW filters.\n"); 1259 + dev_info(&adapter->pdev->dev, 1260 + "Disabling HW multicast filtering.\n"); 1252 1261 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); 1253 1262 } 1254 1263 done: ··· 1282 1287 1283 1288 if (status) 1284 1289 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", 1285 - mac, vf); 1290 + mac, vf); 1286 1291 else 1287 1292 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); 1288 1293 ··· 1290 1295 } 1291 1296 1292 1297 static int be_get_vf_config(struct net_device *netdev, int vf, 1293 - struct ifla_vf_info *vi) 1298 + struct ifla_vf_info *vi) 1294 1299 { 1295 1300 struct be_adapter *adapter = netdev_priv(netdev); 1296 1301 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; ··· 1311 1316 return 0; 1312 1317 } 1313 1318 1314 - static int be_set_vf_vlan(struct net_device *netdev, 1315 - int vf, u16 vlan, u8 qos) 1319 + static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) 1316 1320 { 1317 1321 struct be_adapter *adapter = netdev_priv(netdev); 1318 1322 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; ··· 1342 1348 return status; 1343 1349 } 1344 1350 1345 - static int be_set_vf_tx_rate(struct net_device *netdev, 1346 - int vf, int rate) 1351 + static int be_set_vf_tx_rate(struct net_device *netdev, int vf, int rate) 1347 1352 { 1348 1353 struct be_adapter *adapter = netdev_priv(netdev); 1349 1354 int status = 0; ··· 1362 1369 status = be_cmd_config_qos(adapter, rate / 10, vf + 1); 1363 1370 if (status) 1364 1371 dev_err(&adapter->pdev->dev, 1365 - "tx rate %d on VF %d failed\n", rate, vf); 1372 + "tx rate %d on VF %d failed\n", rate, vf); 1366 1373 else 1367 1374 adapter->vf_cfg[vf].tx_rate = rate; 1368 1375 return status; ··· 1462 1469 } 1463 1470 1464 1471 static void be_rx_stats_update(struct be_rx_obj *rxo, 1465 - struct be_rx_compl_info *rxcp) 1472 + struct be_rx_compl_info *rxcp) 1466 1473 { 1467 1474 struct be_rx_stats *stats = rx_stats(rxo); 1468 1475 ··· 1559 1566 skb_frag_set_page(skb, 0, page_info->page); 1560 1567 skb_shinfo(skb)->frags[0].page_offset = 1561 1568 page_info->page_offset + hdr_len; 1562 - skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len); 1569 + skb_frag_size_set(&skb_shinfo(skb)->frags[0], 1570 + curr_frag_len - hdr_len); 1563 1571 skb->data_len = curr_frag_len - hdr_len; 1564 1572 skb->truesize += rx_frag_size; 1565 1573 skb->tail += hdr_len; ··· 1719 1725 if (rxcp->vlanf) { 1720 1726 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq, 1721 1727 compl); 1722 - rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, 1723 - compl); 1728 + rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, 1729 + vlan_tag, compl); 1724 1730 } 1725 1731 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl); 1726 1732 rxcp->tunneled = ··· 1751 1757 if (rxcp->vlanf) { 1752 1758 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq, 1753 1759 compl); 1754 - rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, 1755 - compl); 1760 + rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, 1761 + vlan_tag, compl); 1756 1762 } 1757 1763 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl); 1758 1764 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ··· 1793 1799 rxcp->vlan_tag = swab16(rxcp->vlan_tag); 1794 1800 1795 1801 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) && 1796 - !adapter->vlan_tag[rxcp->vlan_tag]) 1802 + !test_bit(rxcp->vlan_tag, adapter->vids)) 1797 1803 rxcp->vlanf = 0; 1798 1804 } 1799 1805 ··· 1909 1915 } 1910 1916 1911 1917 static u16 be_tx_compl_process(struct be_adapter *adapter, 1912 - struct be_tx_obj *txo, u16 last_index) 1918 + struct be_tx_obj *txo, u16 last_index) 1913 1919 { 1914 1920 struct be_queue_info *txq = &txo->q; 1915 1921 struct be_eth_wrb *wrb; ··· 2116 2122 2117 2123 eq = &eqo->q; 2118 2124 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, 2119 - sizeof(struct be_eq_entry)); 2125 + sizeof(struct be_eq_entry)); 2120 2126 if (rc) 2121 2127 return rc; 2122 2128 ··· 2149 2155 2150 2156 cq = &adapter->mcc_obj.cq; 2151 2157 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN, 2152 - sizeof(struct be_mcc_compl))) 2158 + sizeof(struct be_mcc_compl))) 2153 2159 goto err; 2154 2160 2155 2161 /* Use the default EQ for MCC completions */ ··· 2269 2275 rxo->adapter = adapter; 2270 2276 cq = &rxo->cq; 2271 2277 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, 2272 - sizeof(struct be_eth_rx_compl)); 2278 + sizeof(struct be_eth_rx_compl)); 2273 2279 if (rc) 2274 2280 return rc; 2275 2281 ··· 2333 2339 } 2334 2340 2335 2341 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, 2336 - int budget, int polling) 2342 + int budget, int polling) 2337 2343 { 2338 2344 struct be_adapter *adapter = rxo->adapter; 2339 2345 struct be_queue_info *rx_cq = &rxo->cq; ··· 2359 2365 * promiscuous mode on some skews 2360 2366 */ 2361 2367 if (unlikely(rxcp->port != adapter->port_num && 2362 - !lancer_chip(adapter))) { 2368 + !lancer_chip(adapter))) { 2363 2369 be_rx_compl_discard(rxo, rxcp); 2364 2370 goto loop_continue; 2365 2371 } ··· 2399 2405 if (!txcp) 2400 2406 break; 2401 2407 num_wrbs += be_tx_compl_process(adapter, txo, 2402 - AMAP_GET_BITS(struct amap_eth_tx_compl, 2403 - wrb_index, txcp)); 2408 + AMAP_GET_BITS(struct 2409 + amap_eth_tx_compl, 2410 + wrb_index, txcp)); 2404 2411 } 2405 2412 2406 2413 if (work_done) { ··· 2411 2416 /* As Tx wrbs have been freed up, wake up netdev queue 2412 2417 * if it was stopped due to lack of tx wrbs. */ 2413 2418 if (__netif_subqueue_stopped(adapter->netdev, idx) && 2414 - atomic_read(&txo->q.used) < txo->q.len / 2) { 2419 + atomic_read(&txo->q.used) < txo->q.len / 2) { 2415 2420 netif_wake_subqueue(adapter->netdev, idx); 2416 2421 } 2417 2422 ··· 2505 2510 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 2506 2511 if (sliport_status & SLIPORT_STATUS_ERR_MASK) { 2507 2512 sliport_err1 = ioread32(adapter->db + 2508 - SLIPORT_ERROR1_OFFSET); 2513 + SLIPORT_ERROR1_OFFSET); 2509 2514 sliport_err2 = ioread32(adapter->db + 2510 - SLIPORT_ERROR2_OFFSET); 2515 + SLIPORT_ERROR2_OFFSET); 2511 2516 adapter->hw_error = true; 2512 2517 /* Do not log error messages if its a FW reset */ 2513 2518 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && ··· 2526 2531 } 2527 2532 } else { 2528 2533 pci_read_config_dword(adapter->pdev, 2529 - PCICFG_UE_STATUS_LOW, &ue_lo); 2534 + PCICFG_UE_STATUS_LOW, &ue_lo); 2530 2535 pci_read_config_dword(adapter->pdev, 2531 - PCICFG_UE_STATUS_HIGH, &ue_hi); 2536 + PCICFG_UE_STATUS_HIGH, &ue_hi); 2532 2537 pci_read_config_dword(adapter->pdev, 2533 - PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); 2538 + PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); 2534 2539 pci_read_config_dword(adapter->pdev, 2535 - PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask); 2540 + PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask); 2536 2541 2537 2542 ue_lo = (ue_lo & ~ue_lo_mask); 2538 2543 ue_hi = (ue_hi & ~ue_hi_mask); ··· 2619 2624 } 2620 2625 2621 2626 static inline int be_msix_vec_get(struct be_adapter *adapter, 2622 - struct be_eq_obj *eqo) 2627 + struct be_eq_obj *eqo) 2623 2628 { 2624 2629 return adapter->msix_entries[eqo->msix_idx].vector; 2625 2630 } ··· 2643 2648 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--) 2644 2649 free_irq(be_msix_vec_get(adapter, eqo), eqo); 2645 2650 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n", 2646 - status); 2651 + status); 2647 2652 be_msix_disable(adapter); 2648 2653 return status; 2649 2654 } ··· 2816 2821 } 2817 2822 2818 2823 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN); 2819 - rc = be_cmd_rss_config(adapter, rss->rsstable, 2820 - rss->rss_flags, 2824 + rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, 2821 2825 128, rss_hkey); 2822 2826 if (rc) { 2823 2827 rss->rss_flags = RSS_ENABLE_NONE; ··· 2897 2903 2898 2904 if (enable) { 2899 2905 status = pci_write_config_dword(adapter->pdev, 2900 - PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK); 2906 + PCICFG_PM_CONTROL_OFFSET, 2907 + PCICFG_PM_CONTROL_MASK); 2901 2908 if (status) { 2902 2909 dev_err(&adapter->pdev->dev, 2903 2910 "Could not enable Wake-on-lan\n"); ··· 2907 2912 return status; 2908 2913 } 2909 2914 status = be_cmd_enable_magic_wol(adapter, 2910 - adapter->netdev->dev_addr, &cmd); 2915 + adapter->netdev->dev_addr, 2916 + &cmd); 2911 2917 pci_enable_wake(adapter->pdev, PCI_D3hot, 1); 2912 2918 pci_enable_wake(adapter->pdev, PCI_D3cold, 1); 2913 2919 } else { ··· 2947 2951 2948 2952 if (status) 2949 2953 dev_err(&adapter->pdev->dev, 2950 - "Mac address assignment failed for VF %d\n", vf); 2954 + "Mac address assignment failed for VF %d\n", 2955 + vf); 2951 2956 else 2952 2957 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); 2953 2958 ··· 3090 3093 3091 3094 /* If a FW profile exists, then cap_flags are updated */ 3092 3095 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | 3093 - BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST); 3094 - status = be_cmd_if_create(adapter, cap_flags, en_flags, 3095 - &vf_cfg->if_handle, vf + 1); 3096 + BE_IF_FLAGS_BROADCAST | 3097 + BE_IF_FLAGS_MULTICAST); 3098 + status = 3099 + be_cmd_if_create(adapter, cap_flags, en_flags, 3100 + &vf_cfg->if_handle, vf + 1); 3096 3101 if (status) 3097 3102 goto err; 3098 3103 } ··· 3600 3601 static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "}; 3601 3602 3602 3603 static bool be_flash_redboot(struct be_adapter *adapter, 3603 - const u8 *p, u32 img_start, int image_size, 3604 - int hdr_size) 3604 + const u8 *p, u32 img_start, int image_size, 3605 + int hdr_size) 3605 3606 { 3606 3607 u32 crc_offset; 3607 3608 u8 flashed_crc[4]; ··· 3611 3612 3612 3613 p += crc_offset; 3613 3614 3614 - status = be_cmd_get_flash_crc(adapter, flashed_crc, 3615 - (image_size - 4)); 3615 + status = be_cmd_get_flash_crc(adapter, flashed_crc, (image_size - 4)); 3616 3616 if (status) { 3617 3617 dev_err(&adapter->pdev->dev, 3618 - "could not get crc from flash, not flashing redboot\n"); 3618 + "could not get crc from flash, not flashing redboot\n"); 3619 3619 return false; 3620 3620 } 3621 3621 ··· 3654 3656 } 3655 3657 3656 3658 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter, 3657 - int header_size, 3658 - const struct firmware *fw) 3659 + int header_size, 3660 + const struct firmware *fw) 3659 3661 { 3660 3662 struct flash_section_info *fsec = NULL; 3661 3663 const u8 *p = fw->data; ··· 3671 3673 } 3672 3674 3673 3675 static int be_flash(struct be_adapter *adapter, const u8 *img, 3674 - struct be_dma_mem *flash_cmd, int optype, int img_size) 3676 + struct be_dma_mem *flash_cmd, int optype, int img_size) 3675 3677 { 3676 3678 u32 total_bytes = 0, flash_op, num_bytes = 0; 3677 3679 int status = 0; ··· 3698 3700 memcpy(req->data_buf, img, num_bytes); 3699 3701 img += num_bytes; 3700 3702 status = be_cmd_write_flashrom(adapter, flash_cmd, optype, 3701 - flash_op, num_bytes); 3703 + flash_op, num_bytes); 3702 3704 if (status) { 3703 3705 if (status == ILLEGAL_IOCTL_REQ && 3704 3706 optype == OPTYPE_PHY_FW) ··· 3713 3715 3714 3716 /* For BE2, BE3 and BE3-R */ 3715 3717 static int be_flash_BEx(struct be_adapter *adapter, 3716 - const struct firmware *fw, 3717 - struct be_dma_mem *flash_cmd, 3718 - int num_of_images) 3719 - 3718 + const struct firmware *fw, 3719 + struct be_dma_mem *flash_cmd, int num_of_images) 3720 3720 { 3721 3721 int status = 0, i, filehdr_size = 0; 3722 3722 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr)); ··· 3796 3800 3797 3801 if (pflashcomp[i].optype == OPTYPE_REDBOOT) { 3798 3802 redboot = be_flash_redboot(adapter, fw->data, 3799 - pflashcomp[i].offset, pflashcomp[i].size, 3800 - filehdr_size + img_hdrs_size); 3803 + pflashcomp[i].offset, 3804 + pflashcomp[i].size, 3805 + filehdr_size + 3806 + img_hdrs_size); 3801 3807 if (!redboot) 3802 3808 continue; 3803 3809 } ··· 3810 3812 return -1; 3811 3813 3812 3814 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype, 3813 - pflashcomp[i].size); 3815 + pflashcomp[i].size); 3814 3816 if (status) { 3815 3817 dev_err(&adapter->pdev->dev, 3816 3818 "Flashing section type %d failed.\n", ··· 3822 3824 } 3823 3825 3824 3826 static int be_flash_skyhawk(struct be_adapter *adapter, 3825 - const struct firmware *fw, 3826 - struct be_dma_mem *flash_cmd, int num_of_images) 3827 + const struct firmware *fw, 3828 + struct be_dma_mem *flash_cmd, int num_of_images) 3827 3829 { 3828 3830 int status = 0, i, filehdr_size = 0; 3829 3831 int img_offset, img_size, img_optype, redboot; ··· 3871 3873 3872 3874 if (img_optype == OPTYPE_REDBOOT) { 3873 3875 redboot = be_flash_redboot(adapter, fw->data, 3874 - img_offset, img_size, 3875 - filehdr_size + img_hdrs_size); 3876 + img_offset, img_size, 3877 + filehdr_size + 3878 + img_hdrs_size); 3876 3879 if (!redboot) 3877 3880 continue; 3878 3881 } ··· 3895 3896 } 3896 3897 3897 3898 static int lancer_fw_download(struct be_adapter *adapter, 3898 - const struct firmware *fw) 3899 + const struct firmware *fw) 3899 3900 { 3900 3901 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024) 3901 3902 #define LANCER_FW_DOWNLOAD_LOCATION "/prg" ··· 3961 3962 } 3962 3963 3963 3964 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, 3964 - flash_cmd.dma); 3965 + flash_cmd.dma); 3965 3966 if (status) { 3966 3967 dev_err(&adapter->pdev->dev, 3967 3968 "Firmware load error. " ··· 3982 3983 goto lancer_fw_exit; 3983 3984 } 3984 3985 } else if (change_status != LANCER_NO_RESET_NEEDED) { 3985 - dev_err(&adapter->pdev->dev, 3986 - "System reboot required for new FW" 3987 - " to be active\n"); 3986 + dev_err(&adapter->pdev->dev, 3987 + "System reboot required for new FW to be active\n"); 3988 3988 } 3989 3989 3990 3990 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); ··· 4047 4049 switch (ufi_type) { 4048 4050 case UFI_TYPE4: 4049 4051 status = be_flash_skyhawk(adapter, fw, 4050 - &flash_cmd, num_imgs); 4052 + &flash_cmd, num_imgs); 4051 4053 break; 4052 4054 case UFI_TYPE3R: 4053 4055 status = be_flash_BEx(adapter, fw, &flash_cmd, ··· 4117 4119 return status; 4118 4120 } 4119 4121 4120 - static int be_ndo_bridge_setlink(struct net_device *dev, 4121 - struct nlmsghdr *nlh) 4122 + static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh) 4122 4123 { 4123 4124 struct be_adapter *adapter = netdev_priv(dev); 4124 4125 struct nlattr *attr, *br_spec; ··· 4159 4162 } 4160 4163 4161 4164 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 4162 - struct net_device *dev, 4163 - u32 filter_mask) 4165 + struct net_device *dev, u32 filter_mask) 4164 4166 { 4165 4167 struct be_adapter *adapter = netdev_priv(dev); 4166 4168 int status = 0; ··· 4873 4877 } 4874 4878 4875 4879 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, 4876 - pci_channel_state_t state) 4880 + pci_channel_state_t state) 4877 4881 { 4878 4882 struct be_adapter *adapter = pci_get_drvdata(pdev); 4879 4883 struct net_device *netdev = adapter->netdev;