Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'cma', 'cxgb3', 'cxgb4', 'ipoib', 'misc', 'mlx4', 'mlx5', 'nes', 'ocrdma' and 'qib' into for-next

+223 -47
+7 -1
drivers/infiniband/core/mad.c
··· 2663 2663 int ret, i; 2664 2664 struct ib_qp_attr *attr; 2665 2665 struct ib_qp *qp; 2666 + u16 pkey_index; 2666 2667 2667 2668 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2668 2669 if (!attr) { 2669 2670 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n"); 2670 2671 return -ENOMEM; 2671 2672 } 2673 + 2674 + ret = ib_find_pkey(port_priv->device, port_priv->port_num, 2675 + IB_DEFAULT_PKEY_FULL, &pkey_index); 2676 + if (ret) 2677 + pkey_index = 0; 2672 2678 2673 2679 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 2674 2680 qp = port_priv->qp_info[i].qp; ··· 2686 2680 * one is needed for the Reset to Init transition 2687 2681 */ 2688 2682 attr->qp_state = IB_QPS_INIT; 2689 - attr->pkey_index = 0; 2683 + attr->pkey_index = pkey_index; 2690 2684 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; 2691 2685 ret = ib_modify_qp(qp, attr, IB_QP_STATE | 2692 2686 IB_QP_PKEY_INDEX | IB_QP_QKEY);
+1
drivers/infiniband/hw/cxgb3/iwch_provider.c
··· 226 226 mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) * 227 227 sizeof(struct t3_cqe)); 228 228 uresp.memsize = mm->len; 229 + uresp.reserved = 0; 229 230 resplen = sizeof uresp; 230 231 } 231 232 if (ib_copy_to_udata(udata, &uresp, resplen)) {
+2
drivers/infiniband/hw/cxgb4/qp.c
··· 1657 1657 if (mm5) { 1658 1658 uresp.ma_sync_key = ucontext->key; 1659 1659 ucontext->key += PAGE_SIZE; 1660 + } else { 1661 + uresp.ma_sync_key = 0; 1660 1662 } 1661 1663 uresp.sq_key = ucontext->key; 1662 1664 ucontext->key += PAGE_SIZE;
+8 -2
drivers/infiniband/hw/mlx4/mad.c
··· 1511 1511 1512 1512 memset(&attr, 0, sizeof attr); 1513 1513 attr.qp_state = IB_QPS_INIT; 1514 - attr.pkey_index = 1515 - to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; 1514 + ret = 0; 1515 + if (create_tun) 1516 + ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave, 1517 + ctx->port, IB_DEFAULT_PKEY_FULL, 1518 + &attr.pkey_index); 1519 + if (ret || !create_tun) 1520 + attr.pkey_index = 1521 + to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; 1516 1522 attr.qkey = IB_QP1_QKEY; 1517 1523 attr.port_num = ctx->port; 1518 1524 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
+7 -4
drivers/infiniband/hw/mlx5/main.c
··· 619 619 620 620 resp.tot_uuars = req.total_num_uuars; 621 621 resp.num_ports = dev->mdev.caps.num_ports; 622 - err = ib_copy_to_udata(udata, &resp, sizeof(resp)); 622 + err = ib_copy_to_udata(udata, &resp, 623 + sizeof(resp) - sizeof(resp.reserved)); 623 624 if (err) 624 625 goto out_uars; 625 626 ··· 1427 1426 if (err) 1428 1427 goto err_eqs; 1429 1428 1430 - if (ib_register_device(&dev->ib_dev, NULL)) 1429 + err = ib_register_device(&dev->ib_dev, NULL); 1430 + if (err) 1431 1431 goto err_rsrc; 1432 1432 1433 1433 err = create_umr_res(dev); ··· 1436 1434 goto err_dev; 1437 1435 1438 1436 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { 1439 - if (device_create_file(&dev->ib_dev.dev, 1440 - mlx5_class_attributes[i])) 1437 + err = device_create_file(&dev->ib_dev.dev, 1438 + mlx5_class_attributes[i]); 1439 + if (err) 1441 1440 goto err_umrc; 1442 1441 } 1443 1442
+1 -1
drivers/infiniband/hw/mlx5/qp.c
··· 199 199 200 200 static int sq_overhead(enum ib_qp_type qp_type) 201 201 { 202 - int size; 202 + int size = 0; 203 203 204 204 switch (qp_type) { 205 205 case IB_QPT_XRC_INI:
+2 -2
drivers/infiniband/hw/nes/nes_hw.c
··· 3570 3570 tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; 3571 3571 iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; 3572 3572 nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p," 3573 - " Tcp state = %d, iWARP state = %d\n", 3573 + " Tcp state = %s, iWARP state = %s\n", 3574 3574 async_event_id, 3575 3575 le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe, 3576 - tcp_state, iwarp_state); 3576 + nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]); 3577 3577 3578 3578 aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]); 3579 3579 if (aeq_info & NES_AEQE_QP) {
+2 -1
drivers/infiniband/hw/nes/nes_verbs.c
··· 1384 1384 1385 1385 if (ibpd->uobject) { 1386 1386 uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index; 1387 + uresp.mmap_rq_db_index = 0; 1387 1388 uresp.actual_sq_size = sq_size; 1388 1389 uresp.actual_rq_size = rq_size; 1389 1390 uresp.qp_id = nesqp->hwqp.qp_id; ··· 1768 1767 resp.cq_id = nescq->hw_cq.cq_number; 1769 1768 resp.cq_size = nescq->hw_cq.cq_size; 1770 1769 resp.mmap_db_index = 0; 1771 - if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 1770 + if (ib_copy_to_udata(udata, &resp, sizeof resp - sizeof resp.reserved)) { 1772 1771 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); 1773 1772 kfree(nescq); 1774 1773 return ERR_PTR(-EFAULT);
-1
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
··· 29 29 #include <net/netevent.h> 30 30 31 31 #include <rdma/ib_addr.h> 32 - #include <rdma/ib_cache.h> 33 32 34 33 #include "ocrdma.h" 35 34 #include "ocrdma_verbs.h"
+4 -1
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 242 242 memset(ctx->ah_tbl.va, 0, map_len); 243 243 ctx->ah_tbl.len = map_len; 244 244 245 + memset(&resp, 0, sizeof(resp)); 245 246 resp.ah_tbl_len = ctx->ah_tbl.len; 246 247 resp.ah_tbl_page = ctx->ah_tbl.pa; 247 248 ··· 254 253 resp.wqe_size = dev->attr.wqe_size; 255 254 resp.rqe_size = dev->attr.rqe_size; 256 255 resp.dpp_wqe_size = dev->attr.wqe_size; 257 - resp.rsvd = 0; 258 256 259 257 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver)); 260 258 status = ib_copy_to_udata(udata, &resp, sizeof(resp)); ··· 338 338 struct ocrdma_alloc_pd_uresp rsp; 339 339 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); 340 340 341 + memset(&rsp, 0, sizeof(rsp)); 341 342 rsp.id = pd->id; 342 343 rsp.dpp_enabled = pd->dpp_enabled; 343 344 db_page_addr = pd->dev->nic_info.unmapped_db + ··· 693 692 struct ocrdma_ucontext *uctx; 694 693 struct ocrdma_create_cq_uresp uresp; 695 694 695 + memset(&uresp, 0, sizeof(uresp)); 696 696 uresp.cq_id = cq->id; 697 697 uresp.page_size = cq->len; 698 698 uresp.num_pages = 1; ··· 1462 1460 int status; 1463 1461 struct ocrdma_create_srq_uresp uresp; 1464 1462 1463 + memset(&uresp, 0, sizeof(uresp)); 1465 1464 uresp.rq_dbid = srq->rq.dbid; 1466 1465 uresp.num_rq_pages = 1; 1467 1466 uresp.rq_page_addr[0] = srq->rq.pa;
+2
drivers/infiniband/hw/qib/qib_iba7322.c
··· 1596 1596 struct qib_devdata *dd = ppd->dd; 1597 1597 1598 1598 errs &= QIB_E_P_SDMAERRS; 1599 + err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf), 1600 + errs, qib_7322p_error_msgs); 1599 1601 1600 1602 if (errs & QIB_E_P_SDMAUNEXPDATA) 1601 1603 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
+1 -1
drivers/infiniband/hw/qib/qib_sdma.c
··· 717 717 struct qib_sdma_txreq *txp, *txpnext; 718 718 __le64 *descqp; 719 719 u64 desc[2]; 720 - dma_addr_t addr; 720 + u64 addr; 721 721 u16 gen, dwlen, dwoffset; 722 722 u16 head, tail, cnt; 723 723
+63 -13
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 932 932 return 0; 933 933 } 934 934 935 + /* 936 + * Takes whatever value which is in pkey index 0 and updates priv->pkey 937 + * returns 0 if the pkey value was changed. 938 + */ 939 + static inline int update_parent_pkey(struct ipoib_dev_priv *priv) 940 + { 941 + int result; 942 + u16 prev_pkey; 943 + 944 + prev_pkey = priv->pkey; 945 + result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey); 946 + if (result) { 947 + ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n", 948 + priv->port, result); 949 + return result; 950 + } 951 + 952 + priv->pkey |= 0x8000; 953 + 954 + if (prev_pkey != priv->pkey) { 955 + ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n", 956 + prev_pkey, priv->pkey); 957 + /* 958 + * Update the pkey in the broadcast address, while making sure to set 959 + * the full membership bit, so that we join the right broadcast group. 960 + */ 961 + priv->dev->broadcast[8] = priv->pkey >> 8; 962 + priv->dev->broadcast[9] = priv->pkey & 0xff; 963 + return 0; 964 + } 965 + 966 + return 1; 967 + } 968 + 935 969 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, 936 970 enum ipoib_flush_level level) 937 971 { 938 972 struct ipoib_dev_priv *cpriv; 939 973 struct net_device *dev = priv->dev; 940 974 u16 new_index; 975 + int result; 941 976 942 977 mutex_lock(&priv->vlan_mutex); 943 978 ··· 986 951 mutex_unlock(&priv->vlan_mutex); 987 952 988 953 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { 954 + /* for non-child devices must check/update the pkey value here */ 955 + if (level == IPOIB_FLUSH_HEAVY && 956 + !test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) 957 + update_parent_pkey(priv); 989 958 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); 990 959 return; 991 960 } ··· 1000 961 } 1001 962 1002 963 if (level == IPOIB_FLUSH_HEAVY) { 1003 - if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { 1004 - clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 1005 - ipoib_ib_dev_down(dev, 0); 1006 - ipoib_ib_dev_stop(dev, 0); 1007 - if (ipoib_pkey_dev_delay_open(dev)) 964 + /* child devices chase their origin pkey value, while non-child 965 + * (parent) devices should always takes what present in pkey index 0 966 + */ 967 + if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 968 + if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { 969 + clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 970 + ipoib_ib_dev_down(dev, 0); 971 + ipoib_ib_dev_stop(dev, 0); 972 + if (ipoib_pkey_dev_delay_open(dev)) 973 + return; 974 + } 975 + /* restart QP only if P_Key index is changed */ 976 + if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && 977 + new_index == priv->pkey_index) { 978 + ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); 1008 979 return; 980 + } 981 + priv->pkey_index = new_index; 982 + } else { 983 + result = update_parent_pkey(priv); 984 + /* restart QP only if P_Key value changed */ 985 + if (result) { 986 + ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n"); 987 + return; 988 + } 1009 989 } 1010 - 1011 - /* restart QP only if P_Key index is changed */ 1012 - if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && 1013 - new_index == priv->pkey_index) { 1014 - ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); 1015 - return; 1016 - } 1017 - priv->pkey_index = new_index; 1018 990 } 1019 991 1020 992 if (level == IPOIB_FLUSH_LIGHT) {
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 1461 1461 if (sscanf(buf, "%i", &pkey) != 1) 1462 1462 return -EINVAL; 1463 1463 1464 - if (pkey < 0 || pkey > 0xffff) 1464 + if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000) 1465 1465 return -EINVAL; 1466 1466 1467 1467 /*
+9
drivers/infiniband/ulp/ipoib/ipoib_netlink.c
··· 119 119 } else 120 120 child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]); 121 121 122 + if (child_pkey == 0 || child_pkey == 0x8000) 123 + return -EINVAL; 124 + 125 + /* 126 + * Set the full membership bit, so that we join the right 127 + * broadcast group, etc. 128 + */ 129 + child_pkey |= 0x8000; 130 + 122 131 err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD); 123 132 124 133 if (!err && data)
+14 -5
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 46 46 #include "mlx5_core.h" 47 47 48 48 enum { 49 - CMD_IF_REV = 3, 49 + CMD_IF_REV = 4, 50 50 }; 51 51 52 52 enum { ··· 281 281 282 282 case MLX5_CMD_OP_TEARDOWN_HCA: 283 283 return "TEARDOWN_HCA"; 284 + 285 + case MLX5_CMD_OP_ENABLE_HCA: 286 + return "MLX5_CMD_OP_ENABLE_HCA"; 287 + 288 + case MLX5_CMD_OP_DISABLE_HCA: 289 + return "MLX5_CMD_OP_DISABLE_HCA"; 284 290 285 291 case MLX5_CMD_OP_QUERY_PAGES: 286 292 return "QUERY_PAGES"; ··· 1119 1113 1120 1114 for (i = 0; i < (1 << cmd->log_sz); i++) { 1121 1115 if (test_bit(i, &vector)) { 1116 + struct semaphore *sem; 1117 + 1122 1118 ent = cmd->ent_arr[i]; 1119 + if (ent->page_queue) 1120 + sem = &cmd->pages_sem; 1121 + else 1122 + sem = &cmd->sem; 1123 1123 ktime_get_ts(&ent->ts2); 1124 1124 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); 1125 1125 dump_command(dev, ent, 0); ··· 1148 1136 } else { 1149 1137 complete(&ent->done); 1150 1138 } 1151 - if (ent->page_queue) 1152 - up(&cmd->pages_sem); 1153 - else 1154 - up(&cmd->sem); 1139 + up(sem); 1155 1140 } 1156 1141 } 1157 1142 }
+62 -7
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 249 249 return err; 250 250 } 251 251 252 + static int mlx5_core_enable_hca(struct mlx5_core_dev *dev) 253 + { 254 + int err; 255 + struct mlx5_enable_hca_mbox_in in; 256 + struct mlx5_enable_hca_mbox_out out; 257 + 258 + memset(&in, 0, sizeof(in)); 259 + memset(&out, 0, sizeof(out)); 260 + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ENABLE_HCA); 261 + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 262 + if (err) 263 + return err; 264 + 265 + if (out.hdr.status) 266 + return mlx5_cmd_status_to_err(&out.hdr); 267 + 268 + return 0; 269 + } 270 + 271 + static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) 272 + { 273 + int err; 274 + struct mlx5_disable_hca_mbox_in in; 275 + struct mlx5_disable_hca_mbox_out out; 276 + 277 + memset(&in, 0, sizeof(in)); 278 + memset(&out, 0, sizeof(out)); 279 + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DISABLE_HCA); 280 + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 281 + if (err) 282 + return err; 283 + 284 + if (out.hdr.status) 285 + return mlx5_cmd_status_to_err(&out.hdr); 286 + 287 + return 0; 288 + } 289 + 252 290 int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) 253 291 { 254 292 struct mlx5_priv *priv = &dev->priv; ··· 342 304 } 343 305 344 306 mlx5_pagealloc_init(dev); 307 + 308 + err = mlx5_core_enable_hca(dev); 309 + if (err) { 310 + dev_err(&pdev->dev, "enable hca failed\n"); 311 + goto err_pagealloc_cleanup; 312 + } 313 + 314 + err = mlx5_satisfy_startup_pages(dev, 1); 315 + if (err) { 316 + dev_err(&pdev->dev, "failed to allocate boot pages\n"); 317 + goto err_disable_hca; 318 + } 319 + 345 320 err = set_hca_ctrl(dev); 346 321 if (err) { 347 322 dev_err(&pdev->dev, "set_hca_ctrl failed\n"); 348 - goto err_pagealloc_cleanup; 323 + goto reclaim_boot_pages; 349 324 } 350 325 351 326 err = handle_hca_cap(dev); 352 327 if (err) { 353 328 dev_err(&pdev->dev, "handle_hca_cap failed\n"); 354 - goto err_pagealloc_cleanup; 329 + goto reclaim_boot_pages; 355 330 } 356 331 357 - err = mlx5_satisfy_startup_pages(dev); 332 + err = mlx5_satisfy_startup_pages(dev, 0); 358 333 if (err) { 359 - dev_err(&pdev->dev, "failed to allocate startup pages\n"); 360 - goto err_pagealloc_cleanup; 334 + dev_err(&pdev->dev, "failed to allocate init pages\n"); 335 + goto reclaim_boot_pages; 361 336 } 362 337 363 338 err = mlx5_pagealloc_start(dev); 364 339 if (err) { 365 340 dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n"); 366 - goto err_reclaim_pages; 341 + goto reclaim_boot_pages; 367 342 } 368 343 369 344 err = mlx5_cmd_init_hca(dev); ··· 447 396 err_pagealloc_stop: 448 397 mlx5_pagealloc_stop(dev); 449 398 450 - err_reclaim_pages: 399 + reclaim_boot_pages: 451 400 mlx5_reclaim_startup_pages(dev); 401 + 402 + err_disable_hca: 403 + mlx5_core_disable_hca(dev); 452 404 453 405 err_pagealloc_cleanup: 454 406 mlx5_pagealloc_cleanup(dev); ··· 488 434 mlx5_cmd_teardown_hca(dev); 489 435 mlx5_pagealloc_stop(dev); 490 436 mlx5_reclaim_startup_pages(dev); 437 + mlx5_core_disable_hca(dev); 491 438 mlx5_pagealloc_cleanup(dev); 492 439 mlx5_cmd_cleanup(dev); 493 440 iounmap(dev->iseg);
+14 -6
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
··· 64 64 65 65 struct mlx5_query_pages_outbox { 66 66 struct mlx5_outbox_hdr hdr; 67 - u8 reserved[2]; 67 + __be16 num_boot_pages; 68 68 __be16 func_id; 69 69 __be16 init_pages; 70 70 __be16 num_pages; ··· 146 146 } 147 147 148 148 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, 149 - s16 *pages, s16 *init_pages) 149 + s16 *pages, s16 *init_pages, u16 *boot_pages) 150 150 { 151 151 struct mlx5_query_pages_inbox in; 152 152 struct mlx5_query_pages_outbox out; ··· 164 164 165 165 if (pages) 166 166 *pages = be16_to_cpu(out.num_pages); 167 + 167 168 if (init_pages) 168 169 *init_pages = be16_to_cpu(out.init_pages); 170 + 171 + if (boot_pages) 172 + *boot_pages = be16_to_cpu(out.num_boot_pages); 173 + 169 174 *func_id = be16_to_cpu(out.func_id); 170 175 171 176 return err; ··· 362 357 queue_work(dev->priv.pg_wq, &req->work); 363 358 } 364 359 365 - int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev) 360 + int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) 366 361 { 362 + u16 uninitialized_var(boot_pages); 367 363 s16 uninitialized_var(init_pages); 368 364 u16 uninitialized_var(func_id); 369 365 int err; 370 366 371 - err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages); 367 + err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages, 368 + &boot_pages); 372 369 if (err) 373 370 return err; 374 371 375 - mlx5_core_dbg(dev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id); 376 372 377 - return give_pages(dev, func_id, init_pages, 0); 373 + mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n", 374 + init_pages, boot_pages, func_id); 375 + return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0); 378 376 } 379 377 380 378 static int optimal_reclaimed_pages(void)
+20
include/linux/mlx5/device.h
··· 690 690 __be64 pas[0]; 691 691 }; 692 692 693 + struct mlx5_enable_hca_mbox_in { 694 + struct mlx5_inbox_hdr hdr; 695 + u8 rsvd[8]; 696 + }; 697 + 698 + struct mlx5_enable_hca_mbox_out { 699 + struct mlx5_outbox_hdr hdr; 700 + u8 rsvd[8]; 701 + }; 702 + 703 + struct mlx5_disable_hca_mbox_in { 704 + struct mlx5_inbox_hdr hdr; 705 + u8 rsvd[8]; 706 + }; 707 + 708 + struct mlx5_disable_hca_mbox_out { 709 + struct mlx5_outbox_hdr hdr; 710 + u8 rsvd[8]; 711 + }; 712 + 693 713 struct mlx5_eq_context { 694 714 u8 status; 695 715 u8 ec_oi;
+3 -1
include/linux/mlx5/driver.h
··· 101 101 MLX5_CMD_OP_QUERY_ADAPTER = 0x101, 102 102 MLX5_CMD_OP_INIT_HCA = 0x102, 103 103 MLX5_CMD_OP_TEARDOWN_HCA = 0x103, 104 + MLX5_CMD_OP_ENABLE_HCA = 0x104, 105 + MLX5_CMD_OP_DISABLE_HCA = 0x105, 104 106 MLX5_CMD_OP_QUERY_PAGES = 0x107, 105 107 MLX5_CMD_OP_MANAGE_PAGES = 0x108, 106 108 MLX5_CMD_OP_SET_HCA_CAP = 0x109, ··· 692 690 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); 693 691 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 694 692 s16 npages); 695 - int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev); 693 + int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); 696 694 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); 697 695 void mlx5_register_debugfs(void); 698 696 void mlx5_unregister_debugfs(void);