at v2.6.26 842 lines 30 kB view raw
1/* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35#include <linux/mlx4/cmd.h> 36 37#include "fw.h" 38#include "icm.h" 39 40enum { 41 MLX4_COMMAND_INTERFACE_MIN_REV = 2, 42 MLX4_COMMAND_INTERFACE_MAX_REV = 3, 43 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3, 44}; 45 46extern void __buggy_use_of_MLX4_GET(void); 47extern void __buggy_use_of_MLX4_PUT(void); 48 49#define MLX4_GET(dest, source, offset) \ 50 do { \ 51 void *__p = (char *) (source) + (offset); \ 52 switch (sizeof (dest)) { \ 53 case 1: (dest) = *(u8 *) __p; break; \ 54 case 2: (dest) = be16_to_cpup(__p); break; \ 55 case 4: (dest) = be32_to_cpup(__p); break; \ 56 case 8: (dest) = be64_to_cpup(__p); break; \ 57 default: __buggy_use_of_MLX4_GET(); \ 58 } \ 59 } while (0) 60 61#define MLX4_PUT(dest, source, offset) \ 62 do { \ 63 void *__d = ((char *) (dest) + (offset)); \ 64 switch (sizeof(source)) { \ 65 case 1: *(u8 *) __d = (source); break; \ 66 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ 67 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ 68 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ 69 default: __buggy_use_of_MLX4_PUT(); \ 70 } \ 71 } while (0) 72 73static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags) 74{ 75 static const char *fname[] = { 76 [ 0] = "RC transport", 77 [ 1] = "UC transport", 78 [ 2] = "UD transport", 79 [ 3] = "XRC transport", 80 [ 4] = "reliable multicast", 81 [ 5] = "FCoIB support", 82 [ 6] = "SRQ support", 83 [ 7] = "IPoIB checksum offload", 84 [ 8] = "P_Key violation counter", 85 [ 9] = "Q_Key violation counter", 86 [10] = "VMM", 87 [16] = "MW support", 88 [17] = "APM support", 89 [18] = "Atomic ops support", 90 [19] = "Raw multicast support", 91 [20] = "Address vector port checking support", 92 [21] = "UD multicast support", 93 [24] = "Demand paging support", 94 [25] = "Router support" 95 }; 96 int i; 97 98 mlx4_dbg(dev, "DEV_CAP flags:\n"); 99 for (i = 0; i < ARRAY_SIZE(fname); ++i) 100 if (fname[i] && (flags & (1 << i))) 101 mlx4_dbg(dev, " %s\n", fname[i]); 102} 103 104int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 105{ 106 struct mlx4_cmd_mailbox *mailbox; 107 u32 *outbox; 108 u8 field; 109 u16 size; 110 u16 stat_rate; 111 int err; 112 int i; 113 114#define QUERY_DEV_CAP_OUT_SIZE 0x100 115#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10 116#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11 117#define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12 118#define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13 119#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14 120#define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15 121#define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16 122#define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17 123#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19 124#define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a 125#define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b 126#define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d 127#define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e 128#define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f 129#define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20 130#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 131#define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 132#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 133#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 134#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 135#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b 136#define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d 137#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f 138#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 139#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 140#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 141#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 142#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38 143#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b 144#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c 145#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 146#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 147#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 148#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 149#define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b 150#define QUERY_DEV_CAP_BF_OFFSET 0x4c 151#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d 152#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e 153#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f 154#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51 155#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52 156#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55 157#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56 158#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61 159#define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62 160#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 161#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 162#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 163#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 164#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 165#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 166#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86 167#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88 168#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a 169#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c 170#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e 171#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 172#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 173#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x97 174#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 175#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 176 177 mailbox = mlx4_alloc_cmd_mailbox(dev); 178 if (IS_ERR(mailbox)) 179 return PTR_ERR(mailbox); 180 outbox = mailbox->buf; 181 182 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 183 MLX4_CMD_TIME_CLASS_A); 184 if (err) 185 goto out; 186 187 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); 188 dev_cap->reserved_qps = 1 << (field & 0xf); 189 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); 190 dev_cap->max_qps = 1 << (field & 0x1f); 191 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET); 192 dev_cap->reserved_srqs = 1 << (field >> 4); 193 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET); 194 dev_cap->max_srqs = 1 << (field & 0x1f); 195 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET); 196 dev_cap->max_cq_sz = 1 << field; 197 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET); 198 dev_cap->reserved_cqs = 1 << (field & 0xf); 199 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET); 200 dev_cap->max_cqs = 1 << (field & 0x1f); 201 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); 202 dev_cap->max_mpts = 1 << (field & 0x3f); 203 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); 204 dev_cap->reserved_eqs = 1 << (field & 0xf); 205 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); 206 dev_cap->max_eqs = 1 << (field & 0xf); 207 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); 208 dev_cap->reserved_mtts = 1 << (field >> 4); 209 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET); 210 dev_cap->max_mrw_sz = 1 << field; 211 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET); 212 dev_cap->reserved_mrws = 1 << (field & 0xf); 213 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET); 214 dev_cap->max_mtt_seg = 1 << (field & 0x3f); 215 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); 216 dev_cap->max_requester_per_qp = 1 << (field & 0x3f); 217 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); 218 dev_cap->max_responder_per_qp = 1 << (field & 0x3f); 219 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET); 220 field &= 0x1f; 221 if (!field) 222 dev_cap->max_gso_sz = 0; 223 else 224 dev_cap->max_gso_sz = 1 << field; 225 226 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); 227 dev_cap->max_rdma_global = 1 << (field & 0x3f); 228 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); 229 dev_cap->local_ca_ack_delay = field & 0x1f; 230 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 231 dev_cap->num_ports = field & 0xf; 232 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); 233 dev_cap->max_msg_sz = 1 << (field & 0x1f); 234 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 235 dev_cap->stat_rate_support = stat_rate; 236 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 237 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 238 dev_cap->reserved_uars = field >> 4; 239 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); 240 dev_cap->uar_size = 1 << ((field & 0x3f) + 20); 241 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET); 242 dev_cap->min_page_sz = 1 << field; 243 244 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET); 245 if (field & 0x80) { 246 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); 247 dev_cap->bf_reg_size = 1 << (field & 0x1f); 248 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); 249 dev_cap->bf_regs_per_page = 1 << (field & 0x3f); 250 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", 251 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); 252 } else { 253 dev_cap->bf_reg_size = 0; 254 mlx4_dbg(dev, "BlueFlame not available\n"); 255 } 256 257 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET); 258 dev_cap->max_sq_sg = field; 259 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET); 260 dev_cap->max_sq_desc_sz = size; 261 262 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET); 263 dev_cap->max_qp_per_mcg = 1 << field; 264 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET); 265 dev_cap->reserved_mgms = field & 0xf; 266 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET); 267 dev_cap->max_mcgs = 1 << field; 268 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET); 269 dev_cap->reserved_pds = field >> 4; 270 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); 271 dev_cap->max_pds = 1 << (field & 0x3f); 272 273 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); 274 dev_cap->rdmarc_entry_sz = size; 275 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET); 276 dev_cap->qpc_entry_sz = size; 277 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET); 278 dev_cap->aux_entry_sz = size; 279 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET); 280 dev_cap->altc_entry_sz = size; 281 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET); 282 dev_cap->eqc_entry_sz = size; 283 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET); 284 dev_cap->cqc_entry_sz = size; 285 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET); 286 dev_cap->srq_entry_sz = size; 287 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET); 288 dev_cap->cmpt_entry_sz = size; 289 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET); 290 dev_cap->mtt_entry_sz = size; 291 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET); 292 dev_cap->dmpt_entry_sz = size; 293 294 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET); 295 dev_cap->max_srq_sz = 1 << field; 296 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET); 297 dev_cap->max_qp_sz = 1 << field; 298 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET); 299 dev_cap->resize_srq = field & 1; 300 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET); 301 dev_cap->max_rq_sg = field; 302 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); 303 dev_cap->max_rq_desc_sz = size; 304 305 MLX4_GET(dev_cap->bmme_flags, outbox, 306 QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 307 MLX4_GET(dev_cap->reserved_lkey, outbox, 308 QUERY_DEV_CAP_RSVD_LKEY_OFFSET); 309 MLX4_GET(dev_cap->max_icm_sz, outbox, 310 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); 311 312 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 313 for (i = 1; i <= dev_cap->num_ports; ++i) { 314 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 315 dev_cap->max_vl[i] = field >> 4; 316 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); 317 dev_cap->max_mtu[i] = field >> 4; 318 dev_cap->max_port_width[i] = field & 0xf; 319 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); 320 dev_cap->max_gids[i] = 1 << (field & 0xf); 321 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); 322 dev_cap->max_pkeys[i] = 1 << (field & 0xf); 323 } 324 } else { 325#define QUERY_PORT_MTU_OFFSET 0x01 326#define QUERY_PORT_WIDTH_OFFSET 0x06 327#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 328#define QUERY_PORT_MAX_VL_OFFSET 0x0b 329 330 for (i = 1; i <= dev_cap->num_ports; ++i) { 331 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, 332 MLX4_CMD_TIME_CLASS_B); 333 if (err) 334 goto out; 335 336 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); 337 dev_cap->max_mtu[i] = field & 0xf; 338 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); 339 dev_cap->max_port_width[i] = field & 0xf; 340 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); 341 dev_cap->max_gids[i] = 1 << (field >> 4); 342 dev_cap->max_pkeys[i] = 1 << (field & 0xf); 343 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); 344 dev_cap->max_vl[i] = field & 0xf; 345 } 346 } 347 348 if (dev_cap->bmme_flags & 1) 349 mlx4_dbg(dev, "Base MM extensions: yes " 350 "(flags %d, rsvd L_Key %08x)\n", 351 dev_cap->bmme_flags, dev_cap->reserved_lkey); 352 else 353 mlx4_dbg(dev, "Base MM extensions: no\n"); 354 355 /* 356 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then 357 * we can't use any EQs whose doorbell falls on that page, 358 * even if the EQ itself isn't reserved. 359 */ 360 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, 361 dev_cap->reserved_eqs); 362 363 mlx4_dbg(dev, "Max ICM size %lld MB\n", 364 (unsigned long long) dev_cap->max_icm_sz >> 20); 365 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", 366 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz); 367 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", 368 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); 369 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", 370 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); 371 mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", 372 dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz); 373 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", 374 dev_cap->reserved_mrws, dev_cap->reserved_mtts); 375 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", 376 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars); 377 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", 378 dev_cap->max_pds, dev_cap->reserved_mgms); 379 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", 380 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); 381 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", 382 dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu[1], 383 dev_cap->max_port_width[1]); 384 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", 385 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); 386 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", 387 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); 388 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); 389 390 dump_dev_cap_flags(dev, dev_cap->flags); 391 392out: 393 mlx4_free_cmd_mailbox(dev, mailbox); 394 return err; 395} 396 397int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) 398{ 399 struct mlx4_cmd_mailbox *mailbox; 400 struct mlx4_icm_iter iter; 401 __be64 *pages; 402 int lg; 403 int nent = 0; 404 int i; 405 int err = 0; 406 int ts = 0, tc = 0; 407 408 mailbox = mlx4_alloc_cmd_mailbox(dev); 409 if (IS_ERR(mailbox)) 410 return PTR_ERR(mailbox); 411 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); 412 pages = mailbox->buf; 413 414 for (mlx4_icm_first(icm, &iter); 415 !mlx4_icm_last(&iter); 416 mlx4_icm_next(&iter)) { 417 /* 418 * We have to pass pages that are aligned to their 419 * size, so find the least significant 1 in the 420 * address or size and use that as our log2 size. 421 */ 422 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; 423 if (lg < MLX4_ICM_PAGE_SHIFT) { 424 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n", 425 MLX4_ICM_PAGE_SIZE, 426 (unsigned long long) mlx4_icm_addr(&iter), 427 mlx4_icm_size(&iter)); 428 err = -EINVAL; 429 goto out; 430 } 431 432 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) { 433 if (virt != -1) { 434 pages[nent * 2] = cpu_to_be64(virt); 435 virt += 1 << lg; 436 } 437 438 pages[nent * 2 + 1] = 439 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) | 440 (lg - MLX4_ICM_PAGE_SHIFT)); 441 ts += 1 << (lg - 10); 442 ++tc; 443 444 if (++nent == MLX4_MAILBOX_SIZE / 16) { 445 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, 446 MLX4_CMD_TIME_CLASS_B); 447 if (err) 448 goto out; 449 nent = 0; 450 } 451 } 452 } 453 454 if (nent) 455 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B); 456 if (err) 457 goto out; 458 459 switch (op) { 460 case MLX4_CMD_MAP_FA: 461 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts); 462 break; 463 case MLX4_CMD_MAP_ICM_AUX: 464 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts); 465 break; 466 case MLX4_CMD_MAP_ICM: 467 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n", 468 tc, ts, (unsigned long long) virt - (ts << 10)); 469 break; 470 } 471 472out: 473 mlx4_free_cmd_mailbox(dev, mailbox); 474 return err; 475} 476 477int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm) 478{ 479 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1); 480} 481 482int mlx4_UNMAP_FA(struct mlx4_dev *dev) 483{ 484 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B); 485} 486 487 488int mlx4_RUN_FW(struct mlx4_dev *dev) 489{ 490 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A); 491} 492 493int mlx4_QUERY_FW(struct mlx4_dev *dev) 494{ 495 struct mlx4_fw *fw = &mlx4_priv(dev)->fw; 496 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 497 struct mlx4_cmd_mailbox *mailbox; 498 u32 *outbox; 499 int err = 0; 500 u64 fw_ver; 501 u16 cmd_if_rev; 502 u8 lg; 503 504#define QUERY_FW_OUT_SIZE 0x100 505#define QUERY_FW_VER_OFFSET 0x00 506#define QUERY_FW_CMD_IF_REV_OFFSET 0x0a 507#define QUERY_FW_MAX_CMD_OFFSET 0x0f 508#define QUERY_FW_ERR_START_OFFSET 0x30 509#define QUERY_FW_ERR_SIZE_OFFSET 0x38 510#define QUERY_FW_ERR_BAR_OFFSET 0x3c 511 512#define QUERY_FW_SIZE_OFFSET 0x00 513#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 514#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28 515 516 mailbox = mlx4_alloc_cmd_mailbox(dev); 517 if (IS_ERR(mailbox)) 518 return PTR_ERR(mailbox); 519 outbox = mailbox->buf; 520 521 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW, 522 MLX4_CMD_TIME_CLASS_A); 523 if (err) 524 goto out; 525 526 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET); 527 /* 528 * FW subminor version is at more significant bits than minor 529 * version, so swap here. 530 */ 531 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) | 532 ((fw_ver & 0xffff0000ull) >> 16) | 533 ((fw_ver & 0x0000ffffull) << 16); 534 535 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); 536 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || 537 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { 538 mlx4_err(dev, "Installed FW has unsupported " 539 "command interface revision %d.\n", 540 cmd_if_rev); 541 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", 542 (int) (dev->caps.fw_ver >> 32), 543 (int) (dev->caps.fw_ver >> 16) & 0xffff, 544 (int) dev->caps.fw_ver & 0xffff); 545 mlx4_err(dev, "This driver version supports only revisions %d to %d.\n", 546 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); 547 err = -ENODEV; 548 goto out; 549 } 550 551 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS) 552 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS; 553 554 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); 555 cmd->max_cmds = 1 << lg; 556 557 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n", 558 (int) (dev->caps.fw_ver >> 32), 559 (int) (dev->caps.fw_ver >> 16) & 0xffff, 560 (int) dev->caps.fw_ver & 0xffff, 561 cmd_if_rev, cmd->max_cmds); 562 563 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET); 564 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET); 565 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET); 566 fw->catas_bar = (fw->catas_bar >> 6) * 2; 567 568 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n", 569 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar); 570 571 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET); 572 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); 573 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET); 574 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2; 575 576 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); 577 578 /* 579 * Round up number of system pages needed in case 580 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. 581 */ 582 fw->fw_pages = 583 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> 584 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); 585 586 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n", 587 (unsigned long long) fw->clr_int_base, fw->clr_int_bar); 588 589out: 590 mlx4_free_cmd_mailbox(dev, mailbox); 591 return err; 592} 593 594static void get_board_id(void *vsd, char *board_id) 595{ 596 int i; 597 598#define VSD_OFFSET_SIG1 0x00 599#define VSD_OFFSET_SIG2 0xde 600#define VSD_OFFSET_MLX_BOARD_ID 0xd0 601#define VSD_OFFSET_TS_BOARD_ID 0x20 602 603#define VSD_SIGNATURE_TOPSPIN 0x5ad 604 605 memset(board_id, 0, MLX4_BOARD_ID_LEN); 606 607 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && 608 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { 609 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN); 610 } else { 611 /* 612 * The board ID is a string but the firmware byte 613 * swaps each 4-byte word before passing it back to 614 * us. Therefore we need to swab it before printing. 615 */ 616 for (i = 0; i < 4; ++i) 617 ((u32 *) board_id)[i] = 618 swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); 619 } 620} 621 622int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) 623{ 624 struct mlx4_cmd_mailbox *mailbox; 625 u32 *outbox; 626 int err; 627 628#define QUERY_ADAPTER_OUT_SIZE 0x100 629#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 630#define QUERY_ADAPTER_VSD_OFFSET 0x20 631 632 mailbox = mlx4_alloc_cmd_mailbox(dev); 633 if (IS_ERR(mailbox)) 634 return PTR_ERR(mailbox); 635 outbox = mailbox->buf; 636 637 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER, 638 MLX4_CMD_TIME_CLASS_A); 639 if (err) 640 goto out; 641 642 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); 643 644 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, 645 adapter->board_id); 646 647out: 648 mlx4_free_cmd_mailbox(dev, mailbox); 649 return err; 650} 651 652int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) 653{ 654 struct mlx4_cmd_mailbox *mailbox; 655 __be32 *inbox; 656 int err; 657 658#define INIT_HCA_IN_SIZE 0x200 659#define INIT_HCA_VERSION_OFFSET 0x000 660#define INIT_HCA_VERSION 2 661#define INIT_HCA_FLAGS_OFFSET 0x014 662#define INIT_HCA_QPC_OFFSET 0x020 663#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) 664#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) 665#define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) 666#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) 667#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) 668#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) 669#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) 670#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) 671#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) 672#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) 673#define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) 674#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) 675#define INIT_HCA_MCAST_OFFSET 0x0c0 676#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) 677#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) 678#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) 679#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) 680#define INIT_HCA_TPT_OFFSET 0x0f0 681#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) 682#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) 683#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) 684#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) 685#define INIT_HCA_UAR_OFFSET 0x120 686#define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) 687#define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) 688 689 mailbox = mlx4_alloc_cmd_mailbox(dev); 690 if (IS_ERR(mailbox)) 691 return PTR_ERR(mailbox); 692 inbox = mailbox->buf; 693 694 memset(inbox, 0, INIT_HCA_IN_SIZE); 695 696 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; 697 698#if defined(__LITTLE_ENDIAN) 699 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); 700#elif defined(__BIG_ENDIAN) 701 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1); 702#else 703#error Host endianness not defined 704#endif 705 /* Check port for UD address vector: */ 706 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); 707 708 /* Enable IPoIB checksumming if we can: */ 709 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 710 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); 711 712 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 713 714 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); 715 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); 716 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); 717 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); 718 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); 719 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); 720 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET); 721 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); 722 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); 723 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); 724 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); 725 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); 726 727 /* multicast attributes */ 728 729 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 730 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 731 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 732 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 733 734 /* TPT attributes */ 735 736 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); 737 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); 738 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); 739 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); 740 741 /* UAR attributes */ 742 743 MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET); 744 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); 745 746 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000); 747 748 if (err) 749 mlx4_err(dev, "INIT_HCA returns %d\n", err); 750 751 mlx4_free_cmd_mailbox(dev, mailbox); 752 return err; 753} 754 755int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) 756{ 757 struct mlx4_cmd_mailbox *mailbox; 758 u32 *inbox; 759 int err; 760 u32 flags; 761 u16 field; 762 763 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 764#define INIT_PORT_IN_SIZE 256 765#define INIT_PORT_FLAGS_OFFSET 0x00 766#define INIT_PORT_FLAG_SIG (1 << 18) 767#define INIT_PORT_FLAG_NG (1 << 17) 768#define INIT_PORT_FLAG_G0 (1 << 16) 769#define INIT_PORT_VL_SHIFT 4 770#define INIT_PORT_PORT_WIDTH_SHIFT 8 771#define INIT_PORT_MTU_OFFSET 0x04 772#define INIT_PORT_MAX_GID_OFFSET 0x06 773#define INIT_PORT_MAX_PKEY_OFFSET 0x0a 774#define INIT_PORT_GUID0_OFFSET 0x10 775#define INIT_PORT_NODE_GUID_OFFSET 0x18 776#define INIT_PORT_SI_GUID_OFFSET 0x20 777 778 mailbox = mlx4_alloc_cmd_mailbox(dev); 779 if (IS_ERR(mailbox)) 780 return PTR_ERR(mailbox); 781 inbox = mailbox->buf; 782 783 memset(inbox, 0, INIT_PORT_IN_SIZE); 784 785 flags = 0; 786 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; 787 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; 788 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); 789 790 field = 128 << dev->caps.mtu_cap[port]; 791 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); 792 field = dev->caps.gid_table_len[port]; 793 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); 794 field = dev->caps.pkey_table_len[port]; 795 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET); 796 797 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, 798 MLX4_CMD_TIME_CLASS_A); 799 800 mlx4_free_cmd_mailbox(dev, mailbox); 801 } else 802 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 803 MLX4_CMD_TIME_CLASS_A); 804 805 return err; 806} 807EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); 808 809int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) 810{ 811 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000); 812} 813EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); 814 815int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) 816{ 817 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000); 818} 819 820int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) 821{ 822 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, 823 MLX4_CMD_SET_ICM_SIZE, 824 MLX4_CMD_TIME_CLASS_A); 825 if (ret) 826 return ret; 827 828 /* 829 * Round up number of system pages needed in case 830 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. 831 */ 832 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> 833 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); 834 835 return 0; 836} 837 838int mlx4_NOP(struct mlx4_dev *dev) 839{ 840 /* Input modifier of 0x1f means "finish as soon as possible." */ 841 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); 842}