Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlx5_memic_ops' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Maor Gottlieb says:
====================
This series from Maor extends MEMIC to support atomic operations from the
host in addition to already supported regular read/write.
====================

* 'memic_ops':
RDMA/mlx5: Expose UAPI to query DM
RDMA/mlx5: Add support in MEMIC operations
RDMA/mlx5: Add support to MODIFY_MEMIC command
RDMA/mlx5: Re-organize the DM code
RDMA/mlx5: Move all DM logic to separate file
RDMA/uverbs: Make UVERBS_OBJECT_METHODS to consider line number
net/mlx5: Add MEMIC operations related bits

+720 -369
+1
drivers/infiniband/hw/mlx5/Makefile
··· 6 6 cong.o \ 7 7 counters.o \ 8 8 cq.o \ 9 + dm.o \ 9 10 doorbell.o \ 10 11 gsi.o \ 11 12 ib_virt.o \
-101
drivers/infiniband/hw/mlx5/cmd.c
··· 47 47 return mlx5_cmd_exec_inout(dev, query_cong_params, in, out); 48 48 } 49 49 50 - int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr, 51 - u64 length, u32 alignment) 52 - { 53 - struct mlx5_core_dev *dev = dm->dev; 54 - u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size) 55 - >> PAGE_SHIFT; 56 - u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr); 57 - u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment); 58 - u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE); 59 - u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {}; 60 - u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {}; 61 - u32 mlx5_alignment; 62 - u64 page_idx = 0; 63 - int ret = 0; 64 - 65 - if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK)) 66 - return -EINVAL; 67 - 68 - /* mlx5 device sets alignment as 64*2^driver_value 69 - * so normalizing is needed. 70 - */ 71 - mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 : 72 - alignment - MLX5_MEMIC_BASE_ALIGN; 73 - if (mlx5_alignment > max_alignment) 74 - return -EINVAL; 75 - 76 - MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC); 77 - MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE); 78 - MLX5_SET(alloc_memic_in, in, memic_size, length); 79 - MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment, 80 - mlx5_alignment); 81 - 82 - while (page_idx < num_memic_hw_pages) { 83 - spin_lock(&dm->lock); 84 - page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages, 85 - num_memic_hw_pages, 86 - page_idx, 87 - num_pages, 0); 88 - 89 - if (page_idx < num_memic_hw_pages) 90 - bitmap_set(dm->memic_alloc_pages, 91 - page_idx, num_pages); 92 - 93 - spin_unlock(&dm->lock); 94 - 95 - if (page_idx >= num_memic_hw_pages) 96 - break; 97 - 98 - MLX5_SET64(alloc_memic_in, in, range_start_addr, 99 - hw_start_addr + (page_idx * PAGE_SIZE)); 100 - 101 - ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out); 102 - if (ret) { 103 - spin_lock(&dm->lock); 104 - bitmap_clear(dm->memic_alloc_pages, 105 - page_idx, num_pages); 106 - spin_unlock(&dm->lock); 107 - 108 - if (ret == -EAGAIN) { 109 - page_idx++; 110 - continue; 111 - } 112 - 113 - return ret; 114 - } 115 - 116 - *addr = dev->bar_addr + 117 - MLX5_GET64(alloc_memic_out, out, memic_start_addr); 118 - 119 - return 0; 120 - } 121 - 122 - return -ENOMEM; 123 - } 124 - 125 - void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length) 126 - { 127 - struct mlx5_core_dev *dev = dm->dev; 128 - u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr); 129 - u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE); 130 - u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {}; 131 - u64 start_page_idx; 132 - int err; 133 - 134 - addr -= dev->bar_addr; 135 - start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT; 136 - 137 - MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC); 138 - MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr); 139 - MLX5_SET(dealloc_memic_in, in, memic_size, length); 140 - 141 - err = mlx5_cmd_exec_in(dev, dealloc_memic, in); 142 - if (err) 143 - return; 144 - 145 - spin_lock(&dm->lock); 146 - bitmap_clear(dm->memic_alloc_pages, 147 - start_page_idx, num_pages); 148 - spin_unlock(&dm->lock); 149 - } 150 - 151 50 void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid) 152 51 { 153 52 u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
-3
drivers/infiniband/hw/mlx5/cmd.h
··· 41 41 int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey); 42 42 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point, 43 43 void *out); 44 - int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr, 45 - u64 length, u32 alignment); 46 - void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length); 47 44 int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid); 48 45 void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid); 49 46 void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid);
+584
drivers/infiniband/hw/mlx5/dm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 + /* 3 + * Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. 4 + */ 5 + 6 + #include <rdma/uverbs_std_types.h> 7 + #include "dm.h" 8 + 9 + #define UVERBS_MODULE_NAME mlx5_ib 10 + #include <rdma/uverbs_named_ioctl.h> 11 + 12 + static int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr, 13 + u64 length, u32 alignment) 14 + { 15 + struct mlx5_core_dev *dev = dm->dev; 16 + u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size) 17 + >> PAGE_SHIFT; 18 + u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr); 19 + u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment); 20 + u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE); 21 + u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {}; 22 + u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {}; 23 + u32 mlx5_alignment; 24 + u64 page_idx = 0; 25 + int ret = 0; 26 + 27 + if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK)) 28 + return -EINVAL; 29 + 30 + /* mlx5 device sets alignment as 64*2^driver_value 31 + * so normalizing is needed. 32 + */ 33 + mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 : 34 + alignment - MLX5_MEMIC_BASE_ALIGN; 35 + if (mlx5_alignment > max_alignment) 36 + return -EINVAL; 37 + 38 + MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC); 39 + MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE); 40 + MLX5_SET(alloc_memic_in, in, memic_size, length); 41 + MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment, 42 + mlx5_alignment); 43 + 44 + while (page_idx < num_memic_hw_pages) { 45 + spin_lock(&dm->lock); 46 + page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages, 47 + num_memic_hw_pages, 48 + page_idx, 49 + num_pages, 0); 50 + 51 + if (page_idx < num_memic_hw_pages) 52 + bitmap_set(dm->memic_alloc_pages, 53 + page_idx, num_pages); 54 + 55 + spin_unlock(&dm->lock); 56 + 57 + if (page_idx >= num_memic_hw_pages) 58 + break; 59 + 60 + MLX5_SET64(alloc_memic_in, in, range_start_addr, 61 + hw_start_addr + (page_idx * PAGE_SIZE)); 62 + 63 + ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out); 64 + if (ret) { 65 + spin_lock(&dm->lock); 66 + bitmap_clear(dm->memic_alloc_pages, 67 + page_idx, num_pages); 68 + spin_unlock(&dm->lock); 69 + 70 + if (ret == -EAGAIN) { 71 + page_idx++; 72 + continue; 73 + } 74 + 75 + return ret; 76 + } 77 + 78 + *addr = dev->bar_addr + 79 + MLX5_GET64(alloc_memic_out, out, memic_start_addr); 80 + 81 + return 0; 82 + } 83 + 84 + return -ENOMEM; 85 + } 86 + 87 + void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, 88 + u64 length) 89 + { 90 + struct mlx5_core_dev *dev = dm->dev; 91 + u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr); 92 + u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE); 93 + u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {}; 94 + u64 start_page_idx; 95 + int err; 96 + 97 + addr -= dev->bar_addr; 98 + start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT; 99 + 100 + MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC); 101 + MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr); 102 + MLX5_SET(dealloc_memic_in, in, memic_size, length); 103 + 104 + err = mlx5_cmd_exec_in(dev, dealloc_memic, in); 105 + if (err) 106 + return; 107 + 108 + spin_lock(&dm->lock); 109 + bitmap_clear(dm->memic_alloc_pages, 110 + start_page_idx, num_pages); 111 + spin_unlock(&dm->lock); 112 + } 113 + 114 + void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr, 115 + u8 operation) 116 + { 117 + u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {}; 118 + struct mlx5_core_dev *dev = dm->dev; 119 + 120 + MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC); 121 + MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC); 122 + MLX5_SET(modify_memic_in, in, memic_operation_type, operation); 123 + MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr); 124 + 125 + mlx5_cmd_exec_in(dev, modify_memic, in); 126 + } 127 + 128 + static int mlx5_cmd_alloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr, 129 + u8 operation, phys_addr_t *op_addr) 130 + { 131 + u32 out[MLX5_ST_SZ_DW(modify_memic_out)] = {}; 132 + u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {}; 133 + struct mlx5_core_dev *dev = dm->dev; 134 + int err; 135 + 136 + MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC); 137 + MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_ALLOC); 138 + MLX5_SET(modify_memic_in, in, memic_operation_type, operation); 139 + MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr); 140 + 141 + err = mlx5_cmd_exec_inout(dev, modify_memic, in, out); 142 + if (err) 143 + return err; 144 + 145 + *op_addr = dev->bar_addr + 146 + MLX5_GET64(modify_memic_out, out, memic_operation_addr); 147 + return 0; 148 + } 149 + 150 + static int add_dm_mmap_entry(struct ib_ucontext *context, 151 + struct mlx5_user_mmap_entry *mentry, u8 mmap_flag, 152 + size_t size, u64 address) 153 + { 154 + mentry->mmap_flag = mmap_flag; 155 + mentry->address = address; 156 + 157 + return rdma_user_mmap_entry_insert_range( 158 + context, &mentry->rdma_entry, size, 159 + MLX5_IB_MMAP_DEVICE_MEM << 16, 160 + (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1); 161 + } 162 + 163 + static void mlx5_ib_dm_memic_free(struct kref *kref) 164 + { 165 + struct mlx5_ib_dm_memic *dm = 166 + container_of(kref, struct mlx5_ib_dm_memic, ref); 167 + struct mlx5_ib_dev *dev = to_mdev(dm->base.ibdm.device); 168 + 169 + mlx5_cmd_dealloc_memic(&dev->dm, dm->base.dev_addr, dm->base.size); 170 + kfree(dm); 171 + } 172 + 173 + static int copy_op_to_user(struct mlx5_ib_dm_op_entry *op_entry, 174 + struct uverbs_attr_bundle *attrs) 175 + { 176 + u64 start_offset; 177 + u16 page_idx; 178 + int err; 179 + 180 + page_idx = op_entry->mentry.rdma_entry.start_pgoff & 0xFFFF; 181 + start_offset = op_entry->op_addr & ~PAGE_MASK; 182 + err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX, 183 + &page_idx, sizeof(page_idx)); 184 + if (err) 185 + return err; 186 + 187 + return uverbs_copy_to(attrs, 188 + MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET, 189 + &start_offset, sizeof(start_offset)); 190 + } 191 + 192 + static int map_existing_op(struct mlx5_ib_dm_memic *dm, u8 op, 193 + struct uverbs_attr_bundle *attrs) 194 + { 195 + struct mlx5_ib_dm_op_entry *op_entry; 196 + 197 + op_entry = xa_load(&dm->ops, op); 198 + if (!op_entry) 199 + return -ENOENT; 200 + 201 + return copy_op_to_user(op_entry, attrs); 202 + } 203 + 204 + static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)( 205 + struct uverbs_attr_bundle *attrs) 206 + { 207 + struct ib_uobject *uobj = uverbs_attr_get_uobject( 208 + attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE); 209 + struct mlx5_ib_dev *dev = to_mdev(uobj->context->device); 210 + struct ib_dm *ibdm = uobj->object; 211 + struct mlx5_ib_dm_memic *dm = to_memic(ibdm); 212 + struct mlx5_ib_dm_op_entry *op_entry; 213 + int err; 214 + u8 op; 215 + 216 + err = uverbs_copy_from(&op, attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP); 217 + if (err) 218 + return err; 219 + 220 + if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op))) 221 + return -EOPNOTSUPP; 222 + 223 + mutex_lock(&dm->ops_xa_lock); 224 + err = map_existing_op(dm, op, attrs); 225 + if (!err || err != -ENOENT) 226 + goto err_unlock; 227 + 228 + op_entry = kzalloc(sizeof(*op_entry), GFP_KERNEL); 229 + if (!op_entry) 230 + goto err_unlock; 231 + 232 + err = mlx5_cmd_alloc_memic_op(&dev->dm, dm->base.dev_addr, op, 233 + &op_entry->op_addr); 234 + if (err) { 235 + kfree(op_entry); 236 + goto err_unlock; 237 + } 238 + op_entry->op = op; 239 + op_entry->dm = dm; 240 + 241 + err = add_dm_mmap_entry(uobj->context, &op_entry->mentry, 242 + MLX5_IB_MMAP_TYPE_MEMIC_OP, dm->base.size, 243 + op_entry->op_addr & PAGE_MASK); 244 + if (err) { 245 + mlx5_cmd_dealloc_memic_op(&dev->dm, dm->base.dev_addr, op); 246 + kfree(op_entry); 247 + goto err_unlock; 248 + } 249 + /* From this point, entry will be freed by mmap_free */ 250 + kref_get(&dm->ref); 251 + 252 + err = copy_op_to_user(op_entry, attrs); 253 + if (err) 254 + goto err_remove; 255 + 256 + err = xa_insert(&dm->ops, op, op_entry, GFP_KERNEL); 257 + if (err) 258 + goto err_remove; 259 + mutex_unlock(&dm->ops_xa_lock); 260 + 261 + return 0; 262 + 263 + err_remove: 264 + rdma_user_mmap_entry_remove(&op_entry->mentry.rdma_entry); 265 + err_unlock: 266 + mutex_unlock(&dm->ops_xa_lock); 267 + 268 + return err; 269 + } 270 + 271 + static struct ib_dm *handle_alloc_dm_memic(struct ib_ucontext *ctx, 272 + struct ib_dm_alloc_attr *attr, 273 + struct uverbs_attr_bundle *attrs) 274 + { 275 + struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm; 276 + struct mlx5_ib_dm_memic *dm; 277 + u64 start_offset; 278 + u16 page_idx; 279 + int err; 280 + u64 address; 281 + 282 + if (!MLX5_CAP_DEV_MEM(dm_db->dev, memic)) 283 + return ERR_PTR(-EOPNOTSUPP); 284 + 285 + dm = kzalloc(sizeof(*dm), GFP_KERNEL); 286 + if (!dm) 287 + return ERR_PTR(-ENOMEM); 288 + 289 + dm->base.type = MLX5_IB_UAPI_DM_TYPE_MEMIC; 290 + dm->base.size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE); 291 + dm->base.ibdm.device = ctx->device; 292 + 293 + kref_init(&dm->ref); 294 + xa_init(&dm->ops); 295 + mutex_init(&dm->ops_xa_lock); 296 + dm->req_length = attr->length; 297 + 298 + err = mlx5_cmd_alloc_memic(dm_db, &dm->base.dev_addr, 299 + dm->base.size, attr->alignment); 300 + if (err) { 301 + kfree(dm); 302 + return ERR_PTR(err); 303 + } 304 + 305 + address = dm->base.dev_addr & PAGE_MASK; 306 + err = add_dm_mmap_entry(ctx, &dm->mentry, MLX5_IB_MMAP_TYPE_MEMIC, 307 + dm->base.size, address); 308 + if (err) { 309 + mlx5_cmd_dealloc_memic(dm_db, dm->base.dev_addr, dm->base.size); 310 + kfree(dm); 311 + return ERR_PTR(err); 312 + } 313 + 314 + page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF; 315 + err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, 316 + &page_idx, sizeof(page_idx)); 317 + if (err) 318 + goto err_copy; 319 + 320 + start_offset = dm->base.dev_addr & ~PAGE_MASK; 321 + err = uverbs_copy_to(attrs, 322 + MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 323 + &start_offset, sizeof(start_offset)); 324 + if (err) 325 + goto err_copy; 326 + 327 + return &dm->base.ibdm; 328 + 329 + err_copy: 330 + rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry); 331 + return ERR_PTR(err); 332 + } 333 + 334 + static struct ib_dm *handle_alloc_dm_sw_icm(struct ib_ucontext *ctx, 335 + struct ib_dm_alloc_attr *attr, 336 + struct uverbs_attr_bundle *attrs, 337 + int type) 338 + { 339 + struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev; 340 + struct mlx5_ib_dm_icm *dm; 341 + u64 act_size; 342 + int err; 343 + 344 + dm = kzalloc(sizeof(*dm), GFP_KERNEL); 345 + if (!dm) 346 + return ERR_PTR(-ENOMEM); 347 + 348 + dm->base.type = type; 349 + dm->base.ibdm.device = ctx->device; 350 + 351 + if (!capable(CAP_SYS_RAWIO) || !capable(CAP_NET_RAW)) { 352 + err = -EPERM; 353 + goto free; 354 + } 355 + 356 + if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner) || 357 + MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner) || 358 + MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) || 359 + MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2))) { 360 + err = -EOPNOTSUPP; 361 + goto free; 362 + } 363 + 364 + /* Allocation size must a multiple of the basic block size 365 + * and a power of 2. 366 + */ 367 + act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev)); 368 + act_size = roundup_pow_of_two(act_size); 369 + 370 + dm->base.size = act_size; 371 + err = mlx5_dm_sw_icm_alloc(dev, type, act_size, attr->alignment, 372 + to_mucontext(ctx)->devx_uid, 373 + &dm->base.dev_addr, &dm->obj_id); 374 + if (err) 375 + goto free; 376 + 377 + err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 378 + &dm->base.dev_addr, sizeof(dm->base.dev_addr)); 379 + if (err) { 380 + mlx5_dm_sw_icm_dealloc(dev, type, dm->base.size, 381 + to_mucontext(ctx)->devx_uid, 382 + dm->base.dev_addr, dm->obj_id); 383 + goto free; 384 + } 385 + return &dm->base.ibdm; 386 + free: 387 + kfree(dm); 388 + return ERR_PTR(err); 389 + } 390 + 391 + struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev, 392 + struct ib_ucontext *context, 393 + struct ib_dm_alloc_attr *attr, 394 + struct uverbs_attr_bundle *attrs) 395 + { 396 + enum mlx5_ib_uapi_dm_type type; 397 + int err; 398 + 399 + err = uverbs_get_const_default(&type, attrs, 400 + MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE, 401 + MLX5_IB_UAPI_DM_TYPE_MEMIC); 402 + if (err) 403 + return ERR_PTR(err); 404 + 405 + mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n", 406 + type, attr->length, attr->alignment); 407 + 408 + switch (type) { 409 + case MLX5_IB_UAPI_DM_TYPE_MEMIC: 410 + return handle_alloc_dm_memic(context, attr, attrs); 411 + case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 412 + return handle_alloc_dm_sw_icm(context, attr, attrs, 413 + MLX5_SW_ICM_TYPE_STEERING); 414 + case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 415 + return handle_alloc_dm_sw_icm(context, attr, attrs, 416 + MLX5_SW_ICM_TYPE_HEADER_MODIFY); 417 + default: 418 + return ERR_PTR(-EOPNOTSUPP); 419 + } 420 + } 421 + 422 + static void dm_memic_remove_ops(struct mlx5_ib_dm_memic *dm) 423 + { 424 + struct mlx5_ib_dm_op_entry *entry; 425 + unsigned long idx; 426 + 427 + mutex_lock(&dm->ops_xa_lock); 428 + xa_for_each(&dm->ops, idx, entry) { 429 + xa_erase(&dm->ops, idx); 430 + rdma_user_mmap_entry_remove(&entry->mentry.rdma_entry); 431 + } 432 + mutex_unlock(&dm->ops_xa_lock); 433 + } 434 + 435 + static void mlx5_dm_memic_dealloc(struct mlx5_ib_dm_memic *dm) 436 + { 437 + dm_memic_remove_ops(dm); 438 + rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry); 439 + } 440 + 441 + static int mlx5_dm_icm_dealloc(struct mlx5_ib_ucontext *ctx, 442 + struct mlx5_ib_dm_icm *dm) 443 + { 444 + enum mlx5_sw_icm_type type = 445 + dm->base.type == MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM ? 446 + MLX5_SW_ICM_TYPE_STEERING : 447 + MLX5_SW_ICM_TYPE_HEADER_MODIFY; 448 + struct mlx5_core_dev *dev = to_mdev(dm->base.ibdm.device)->mdev; 449 + int err; 450 + 451 + err = mlx5_dm_sw_icm_dealloc(dev, type, dm->base.size, ctx->devx_uid, 452 + dm->base.dev_addr, dm->obj_id); 453 + if (!err) 454 + kfree(dm); 455 + return 0; 456 + } 457 + 458 + static int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, 459 + struct uverbs_attr_bundle *attrs) 460 + { 461 + struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context( 462 + &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); 463 + struct mlx5_ib_dm *dm = to_mdm(ibdm); 464 + 465 + switch (dm->type) { 466 + case MLX5_IB_UAPI_DM_TYPE_MEMIC: 467 + mlx5_dm_memic_dealloc(to_memic(ibdm)); 468 + return 0; 469 + case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 470 + case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 471 + return mlx5_dm_icm_dealloc(ctx, to_icm(ibdm)); 472 + default: 473 + return -EOPNOTSUPP; 474 + } 475 + } 476 + 477 + static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_QUERY)( 478 + struct uverbs_attr_bundle *attrs) 479 + { 480 + struct ib_dm *ibdm = 481 + uverbs_attr_get_obj(attrs, MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE); 482 + struct mlx5_ib_dm *dm = to_mdm(ibdm); 483 + struct mlx5_ib_dm_memic *memic; 484 + u64 start_offset; 485 + u16 page_idx; 486 + int err; 487 + 488 + if (dm->type != MLX5_IB_UAPI_DM_TYPE_MEMIC) 489 + return -EOPNOTSUPP; 490 + 491 + memic = to_memic(ibdm); 492 + page_idx = memic->mentry.rdma_entry.start_pgoff & 0xFFFF; 493 + err = uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX, 494 + &page_idx, sizeof(page_idx)); 495 + if (err) 496 + return err; 497 + 498 + start_offset = memic->base.dev_addr & ~PAGE_MASK; 499 + err = uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET, 500 + &start_offset, sizeof(start_offset)); 501 + if (err) 502 + return err; 503 + 504 + return uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH, 505 + &memic->req_length, 506 + sizeof(memic->req_length)); 507 + } 508 + 509 + void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev *dev, 510 + struct mlx5_user_mmap_entry *mentry) 511 + { 512 + struct mlx5_ib_dm_op_entry *op_entry; 513 + struct mlx5_ib_dm_memic *mdm; 514 + 515 + switch (mentry->mmap_flag) { 516 + case MLX5_IB_MMAP_TYPE_MEMIC: 517 + mdm = container_of(mentry, struct mlx5_ib_dm_memic, mentry); 518 + kref_put(&mdm->ref, mlx5_ib_dm_memic_free); 519 + break; 520 + case MLX5_IB_MMAP_TYPE_MEMIC_OP: 521 + op_entry = container_of(mentry, struct mlx5_ib_dm_op_entry, 522 + mentry); 523 + mdm = op_entry->dm; 524 + mlx5_cmd_dealloc_memic_op(&dev->dm, mdm->base.dev_addr, 525 + op_entry->op); 526 + kfree(op_entry); 527 + kref_put(&mdm->ref, mlx5_ib_dm_memic_free); 528 + break; 529 + default: 530 + WARN_ON(true); 531 + } 532 + } 533 + 534 + DECLARE_UVERBS_NAMED_METHOD( 535 + MLX5_IB_METHOD_DM_QUERY, 536 + UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE, UVERBS_OBJECT_DM, 537 + UVERBS_ACCESS_READ, UA_MANDATORY), 538 + UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET, 539 + UVERBS_ATTR_TYPE(u64), UA_MANDATORY), 540 + UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX, 541 + UVERBS_ATTR_TYPE(u16), UA_MANDATORY), 542 + UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH, 543 + UVERBS_ATTR_TYPE(u64), UA_MANDATORY)); 544 + 545 + ADD_UVERBS_ATTRIBUTES_SIMPLE( 546 + mlx5_ib_dm, UVERBS_OBJECT_DM, UVERBS_METHOD_DM_ALLOC, 547 + UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 548 + UVERBS_ATTR_TYPE(u64), UA_MANDATORY), 549 + UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, 550 + UVERBS_ATTR_TYPE(u16), UA_OPTIONAL), 551 + UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE, 552 + enum mlx5_ib_uapi_dm_type, UA_OPTIONAL)); 553 + 554 + DECLARE_UVERBS_NAMED_METHOD( 555 + MLX5_IB_METHOD_DM_MAP_OP_ADDR, 556 + UVERBS_ATTR_IDR(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE, 557 + UVERBS_OBJECT_DM, 558 + UVERBS_ACCESS_READ, 559 + UA_MANDATORY), 560 + UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP, 561 + UVERBS_ATTR_TYPE(u8), 562 + UA_MANDATORY), 563 + UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET, 564 + UVERBS_ATTR_TYPE(u64), 565 + UA_MANDATORY), 566 + UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX, 567 + UVERBS_ATTR_TYPE(u16), 568 + UA_OPTIONAL)); 569 + 570 + DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DM, 571 + &UVERBS_METHOD(MLX5_IB_METHOD_DM_MAP_OP_ADDR), 572 + &UVERBS_METHOD(MLX5_IB_METHOD_DM_QUERY)); 573 + 574 + const struct uapi_definition mlx5_ib_dm_defs[] = { 575 + UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm), 576 + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DM), 577 + {}, 578 + }; 579 + 580 + const struct ib_device_ops mlx5_ib_dev_dm_ops = { 581 + .alloc_dm = mlx5_ib_alloc_dm, 582 + .dealloc_dm = mlx5_ib_dealloc_dm, 583 + .reg_dm_mr = mlx5_ib_reg_dm_mr, 584 + };
+68
drivers/infiniband/hw/mlx5/dm.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 + /* 3 + * Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. 4 + */ 5 + 6 + #ifndef _MLX5_IB_DM_H 7 + #define _MLX5_IB_DM_H 8 + 9 + #include "mlx5_ib.h" 10 + 11 + extern const struct ib_device_ops mlx5_ib_dev_dm_ops; 12 + extern const struct uapi_definition mlx5_ib_dm_defs[]; 13 + 14 + struct mlx5_ib_dm { 15 + struct ib_dm ibdm; 16 + u32 type; 17 + phys_addr_t dev_addr; 18 + size_t size; 19 + }; 20 + 21 + struct mlx5_ib_dm_op_entry { 22 + struct mlx5_user_mmap_entry mentry; 23 + phys_addr_t op_addr; 24 + struct mlx5_ib_dm_memic *dm; 25 + u8 op; 26 + }; 27 + 28 + struct mlx5_ib_dm_memic { 29 + struct mlx5_ib_dm base; 30 + struct mlx5_user_mmap_entry mentry; 31 + struct xarray ops; 32 + struct mutex ops_xa_lock; 33 + struct kref ref; 34 + size_t req_length; 35 + }; 36 + 37 + struct mlx5_ib_dm_icm { 38 + struct mlx5_ib_dm base; 39 + u32 obj_id; 40 + }; 41 + 42 + static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm) 43 + { 44 + return container_of(ibdm, struct mlx5_ib_dm, ibdm); 45 + } 46 + 47 + static inline struct mlx5_ib_dm_memic *to_memic(struct ib_dm *ibdm) 48 + { 49 + return container_of(ibdm, struct mlx5_ib_dm_memic, base.ibdm); 50 + } 51 + 52 + static inline struct mlx5_ib_dm_icm *to_icm(struct ib_dm *ibdm) 53 + { 54 + return container_of(ibdm, struct mlx5_ib_dm_icm, base.ibdm); 55 + } 56 + 57 + struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev, 58 + struct ib_ucontext *context, 59 + struct ib_dm_alloc_attr *attr, 60 + struct uverbs_attr_bundle *attrs); 61 + void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev *dev, 62 + struct mlx5_user_mmap_entry *mentry); 63 + void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, 64 + u64 length); 65 + void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr, 66 + u8 operation); 67 + 68 + #endif /* _MLX5_IB_DM_H */
+4 -239
drivers/infiniband/hw/mlx5/main.c
··· 34 34 #include "ib_rep.h" 35 35 #include "cmd.h" 36 36 #include "devx.h" 37 + #include "dm.h" 37 38 #include "fs.h" 38 39 #include "srq.h" 39 40 #include "qp.h" ··· 2089 2088 struct mlx5_user_mmap_entry *mentry = to_mmmap(entry); 2090 2089 struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device); 2091 2090 struct mlx5_var_table *var_table = &dev->var_table; 2092 - struct mlx5_ib_dm *mdm; 2093 2091 2094 2092 switch (mentry->mmap_flag) { 2095 2093 case MLX5_IB_MMAP_TYPE_MEMIC: 2096 - mdm = container_of(mentry, struct mlx5_ib_dm, mentry); 2097 - mlx5_cmd_dealloc_memic(&dev->dm, mdm->dev_addr, 2098 - mdm->size); 2099 - kfree(mdm); 2094 + case MLX5_IB_MMAP_TYPE_MEMIC_OP: 2095 + mlx5_ib_dm_mmap_free(dev, mentry); 2100 2096 break; 2101 2097 case MLX5_IB_MMAP_TYPE_VAR: 2102 2098 mutex_lock(&var_table->bitmap_lock); ··· 2218 2220 return err; 2219 2221 } 2220 2222 2221 - static int add_dm_mmap_entry(struct ib_ucontext *context, 2222 - struct mlx5_ib_dm *mdm, 2223 - u64 address) 2224 - { 2225 - mdm->mentry.mmap_flag = MLX5_IB_MMAP_TYPE_MEMIC; 2226 - mdm->mentry.address = address; 2227 - return rdma_user_mmap_entry_insert_range( 2228 - context, &mdm->mentry.rdma_entry, 2229 - mdm->size, 2230 - MLX5_IB_MMAP_DEVICE_MEM << 16, 2231 - (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1); 2232 - } 2233 - 2234 2223 static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma) 2235 2224 { 2236 2225 unsigned long idx; ··· 2315 2330 default: 2316 2331 return mlx5_ib_mmap_offset(dev, vma, ibcontext); 2317 2332 } 2318 - 2319 - return 0; 2320 - } 2321 - 2322 - static inline int check_dm_type_support(struct mlx5_ib_dev *dev, 2323 - u32 type) 2324 - { 2325 - switch (type) { 2326 - case MLX5_IB_UAPI_DM_TYPE_MEMIC: 2327 - if (!MLX5_CAP_DEV_MEM(dev->mdev, memic)) 2328 - return -EOPNOTSUPP; 2329 - break; 2330 - case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 2331 - case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 2332 - if (!capable(CAP_SYS_RAWIO) || 2333 - !capable(CAP_NET_RAW)) 2334 - return -EPERM; 2335 - 2336 - if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) || 2337 - MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner) || 2338 - MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2) || 2339 - MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner_v2))) 2340 - return -EOPNOTSUPP; 2341 - break; 2342 - } 2343 - 2344 - return 0; 2345 - } 2346 - 2347 - static int handle_alloc_dm_memic(struct ib_ucontext *ctx, 2348 - struct mlx5_ib_dm *dm, 2349 - struct ib_dm_alloc_attr *attr, 2350 - struct uverbs_attr_bundle *attrs) 2351 - { 2352 - struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm; 2353 - u64 start_offset; 2354 - u16 page_idx; 2355 - int err; 2356 - u64 address; 2357 - 2358 - dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE); 2359 - 2360 - err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr, 2361 - dm->size, attr->alignment); 2362 - if (err) 2363 - return err; 2364 - 2365 - address = dm->dev_addr & PAGE_MASK; 2366 - err = add_dm_mmap_entry(ctx, dm, address); 2367 - if (err) 2368 - goto err_dealloc; 2369 - 2370 - page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF; 2371 - err = uverbs_copy_to(attrs, 2372 - MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, 2373 - &page_idx, 2374 - sizeof(page_idx)); 2375 - if (err) 2376 - goto err_copy; 2377 - 2378 - start_offset = dm->dev_addr & ~PAGE_MASK; 2379 - err = uverbs_copy_to(attrs, 2380 - MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 2381 - &start_offset, sizeof(start_offset)); 2382 - if (err) 2383 - goto err_copy; 2384 - 2385 - return 0; 2386 - 2387 - err_copy: 2388 - rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry); 2389 - err_dealloc: 2390 - mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size); 2391 - 2392 - return err; 2393 - } 2394 - 2395 - static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx, 2396 - struct mlx5_ib_dm *dm, 2397 - struct ib_dm_alloc_attr *attr, 2398 - struct uverbs_attr_bundle *attrs, 2399 - int type) 2400 - { 2401 - struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev; 2402 - u64 act_size; 2403 - int err; 2404 - 2405 - /* Allocation size must a multiple of the basic block size 2406 - * and a power of 2. 2407 - */ 2408 - act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev)); 2409 - act_size = roundup_pow_of_two(act_size); 2410 - 2411 - dm->size = act_size; 2412 - err = mlx5_dm_sw_icm_alloc(dev, type, act_size, attr->alignment, 2413 - to_mucontext(ctx)->devx_uid, &dm->dev_addr, 2414 - &dm->icm_dm.obj_id); 2415 - if (err) 2416 - return err; 2417 - 2418 - err = uverbs_copy_to(attrs, 2419 - MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 2420 - &dm->dev_addr, sizeof(dm->dev_addr)); 2421 - if (err) 2422 - mlx5_dm_sw_icm_dealloc(dev, type, dm->size, 2423 - to_mucontext(ctx)->devx_uid, dm->dev_addr, 2424 - dm->icm_dm.obj_id); 2425 - 2426 - return err; 2427 - } 2428 - 2429 - struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev, 2430 - struct ib_ucontext *context, 2431 - struct ib_dm_alloc_attr *attr, 2432 - struct uverbs_attr_bundle *attrs) 2433 - { 2434 - struct mlx5_ib_dm *dm; 2435 - enum mlx5_ib_uapi_dm_type type; 2436 - int err; 2437 - 2438 - err = uverbs_get_const_default(&type, attrs, 2439 - MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE, 2440 - MLX5_IB_UAPI_DM_TYPE_MEMIC); 2441 - if (err) 2442 - return ERR_PTR(err); 2443 - 2444 - mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n", 2445 - type, attr->length, attr->alignment); 2446 - 2447 - err = check_dm_type_support(to_mdev(ibdev), type); 2448 - if (err) 2449 - return ERR_PTR(err); 2450 - 2451 - dm = kzalloc(sizeof(*dm), GFP_KERNEL); 2452 - if (!dm) 2453 - return ERR_PTR(-ENOMEM); 2454 - 2455 - dm->type = type; 2456 - 2457 - switch (type) { 2458 - case MLX5_IB_UAPI_DM_TYPE_MEMIC: 2459 - err = handle_alloc_dm_memic(context, dm, 2460 - attr, 2461 - attrs); 2462 - break; 2463 - case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 2464 - err = handle_alloc_dm_sw_icm(context, dm, 2465 - attr, attrs, 2466 - MLX5_SW_ICM_TYPE_STEERING); 2467 - break; 2468 - case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 2469 - err = handle_alloc_dm_sw_icm(context, dm, 2470 - attr, attrs, 2471 - MLX5_SW_ICM_TYPE_HEADER_MODIFY); 2472 - break; 2473 - default: 2474 - err = -EOPNOTSUPP; 2475 - } 2476 - 2477 - if (err) 2478 - goto err_free; 2479 - 2480 - return &dm->ibdm; 2481 - 2482 - err_free: 2483 - kfree(dm); 2484 - return ERR_PTR(err); 2485 - } 2486 - 2487 - int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs) 2488 - { 2489 - struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context( 2490 - &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); 2491 - struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev; 2492 - struct mlx5_ib_dm *dm = to_mdm(ibdm); 2493 - int ret; 2494 - 2495 - switch (dm->type) { 2496 - case MLX5_IB_UAPI_DM_TYPE_MEMIC: 2497 - rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry); 2498 - return 0; 2499 - case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 2500 - ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING, 2501 - dm->size, ctx->devx_uid, dm->dev_addr, 2502 - dm->icm_dm.obj_id); 2503 - if (ret) 2504 - return ret; 2505 - break; 2506 - case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 2507 - ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY, 2508 - dm->size, ctx->devx_uid, dm->dev_addr, 2509 - dm->icm_dm.obj_id); 2510 - if (ret) 2511 - return ret; 2512 - break; 2513 - default: 2514 - return -EOPNOTSUPP; 2515 - } 2516 - 2517 - kfree(dm); 2518 2333 2519 2334 return 0; 2520 2335 } ··· 3603 3818 &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY)); 3604 3819 3605 3820 ADD_UVERBS_ATTRIBUTES_SIMPLE( 3606 - mlx5_ib_dm, 3607 - UVERBS_OBJECT_DM, 3608 - UVERBS_METHOD_DM_ALLOC, 3609 - UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 3610 - UVERBS_ATTR_TYPE(u64), 3611 - UA_MANDATORY), 3612 - UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, 3613 - UVERBS_ATTR_TYPE(u16), 3614 - UA_OPTIONAL), 3615 - UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE, 3616 - enum mlx5_ib_uapi_dm_type, 3617 - UA_OPTIONAL)); 3618 - 3619 - ADD_UVERBS_ATTRIBUTES_SIMPLE( 3620 3821 mlx5_ib_flow_action, 3621 3822 UVERBS_OBJECT_FLOW_ACTION, 3622 3823 UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, ··· 3624 3853 UAPI_DEF_CHAIN(mlx5_ib_flow_defs), 3625 3854 UAPI_DEF_CHAIN(mlx5_ib_qos_defs), 3626 3855 UAPI_DEF_CHAIN(mlx5_ib_std_types_defs), 3856 + UAPI_DEF_CHAIN(mlx5_ib_dm_defs), 3627 3857 3628 3858 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, 3629 3859 &mlx5_ib_flow_action), 3630 - UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm), 3631 3860 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context), 3632 3861 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR, 3633 3862 UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)), ··· 3801 4030 .dealloc_xrcd = mlx5_ib_dealloc_xrcd, 3802 4031 3803 4032 INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd), 3804 - }; 3805 - 3806 - static const struct ib_device_ops mlx5_ib_dev_dm_ops = { 3807 - .alloc_dm = mlx5_ib_alloc_dm, 3808 - .dealloc_dm = mlx5_ib_dealloc_dm, 3809 - .reg_dm_mr = mlx5_ib_reg_dm_mr, 3810 4033 }; 3811 4034 3812 4035 static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
+1 -24
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 166 166 MLX5_IB_MMAP_TYPE_VAR = 2, 167 167 MLX5_IB_MMAP_TYPE_UAR_WC = 3, 168 168 MLX5_IB_MMAP_TYPE_UAR_NC = 4, 169 + MLX5_IB_MMAP_TYPE_MEMIC_OP = 5, 169 170 }; 170 171 171 172 struct mlx5_bfreg_info { ··· 617 616 u8 mmap_flag; 618 617 u64 address; 619 618 u32 page_idx; 620 - }; 621 - 622 - struct mlx5_ib_dm { 623 - struct ib_dm ibdm; 624 - phys_addr_t dev_addr; 625 - u32 type; 626 - size_t size; 627 - union { 628 - struct { 629 - u32 obj_id; 630 - } icm_dm; 631 - /* other dm types specific params should be added here */ 632 - }; 633 - struct mlx5_user_mmap_entry mentry; 634 619 }; 635 620 636 621 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) ··· 1175 1188 return container_of(msrq, struct mlx5_ib_srq, msrq); 1176 1189 } 1177 1190 1178 - static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm) 1179 - { 1180 - return container_of(ibdm, struct mlx5_ib_dm, ibdm); 1181 - } 1182 - 1183 1191 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr) 1184 1192 { 1185 1193 return container_of(ibmr, struct mlx5_ib_mr, ibmr); ··· 1329 1347 struct ib_rwq_ind_table_init_attr *init_attr, 1330 1348 struct ib_udata *udata); 1331 1349 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 1332 - struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev, 1333 - struct ib_ucontext *context, 1334 - struct ib_dm_alloc_attr *attr, 1335 - struct uverbs_attr_bundle *attrs); 1336 - int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs); 1337 1350 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, 1338 1351 struct ib_dm_mr_attr *attr, 1339 1352 struct uverbs_attr_bundle *attrs);
+1
drivers/infiniband/hw/mlx5/mr.c
··· 42 42 #include <rdma/ib_umem.h> 43 43 #include <rdma/ib_umem_odp.h> 44 44 #include <rdma/ib_verbs.h> 45 + #include "dm.h" 45 46 #include "mlx5_ib.h" 46 47 47 48 /*
+41 -1
include/linux/mlx5/mlx5_ifc.h
··· 133 133 MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204, 134 134 MLX5_CMD_OP_ALLOC_MEMIC = 0x205, 135 135 MLX5_CMD_OP_DEALLOC_MEMIC = 0x206, 136 + MLX5_CMD_OP_MODIFY_MEMIC = 0x207, 136 137 MLX5_CMD_OP_CREATE_EQ = 0x301, 137 138 MLX5_CMD_OP_DESTROY_EQ = 0x302, 138 139 MLX5_CMD_OP_QUERY_EQ = 0x303, ··· 1018 1017 1019 1018 u8 header_modify_sw_icm_start_address[0x40]; 1020 1019 1021 - u8 reserved_at_180[0x680]; 1020 + u8 reserved_at_180[0x80]; 1021 + 1022 + u8 memic_operations[0x20]; 1023 + 1024 + u8 reserved_at_220[0x5e0]; 1022 1025 }; 1023 1026 1024 1027 struct mlx5_ifc_device_event_cap_bits { ··· 10420 10415 u8 op_mod[0x10]; 10421 10416 10422 10417 u8 reserved_at_40[0x40]; 10418 + }; 10419 + 10420 + enum { 10421 + MLX5_MODIFY_MEMIC_OP_MOD_ALLOC, 10422 + MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC, 10423 + }; 10424 + 10425 + struct mlx5_ifc_modify_memic_in_bits { 10426 + u8 opcode[0x10]; 10427 + u8 uid[0x10]; 10428 + 10429 + u8 reserved_at_20[0x10]; 10430 + u8 op_mod[0x10]; 10431 + 10432 + u8 reserved_at_40[0x20]; 10433 + 10434 + u8 reserved_at_60[0x18]; 10435 + u8 memic_operation_type[0x8]; 10436 + 10437 + u8 memic_start_addr[0x40]; 10438 + 10439 + u8 reserved_at_c0[0x140]; 10440 + }; 10441 + 10442 + struct mlx5_ifc_modify_memic_out_bits { 10443 + u8 status[0x8]; 10444 + u8 reserved_at_8[0x18]; 10445 + 10446 + u8 syndrome[0x20]; 10447 + 10448 + u8 reserved_at_40[0x40]; 10449 + 10450 + u8 memic_operation_addr[0x40]; 10451 + 10452 + u8 reserved_at_c0[0x140]; 10423 10453 }; 10424 10454 10425 10455 struct mlx5_ifc_alloc_memic_in_bits {
+1 -1
include/rdma/uverbs_named_ioctl.h
··· 20 20 21 21 /* These are static so they do not need to be qualified */ 22 22 #define UVERBS_METHOD_ATTRS(method_id) _method_attrs_##method_id 23 - #define UVERBS_OBJECT_METHODS(object_id) _object_methods_##object_id 23 + #define UVERBS_OBJECT_METHODS(object_id) _UVERBS_NAME(_object_methods_##object_id, __LINE__) 24 24 25 25 #define DECLARE_UVERBS_NAMED_METHOD(_method_id, ...) \ 26 26 static const struct uverbs_attr_def *const UVERBS_METHOD_ATTRS( \
+19
include/uapi/rdma/mlx5_user_ioctl_cmds.h
··· 41 41 MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS = (1U << UVERBS_ID_NS_SHIFT), 42 42 }; 43 43 44 + enum mlx5_ib_dm_methods { 45 + MLX5_IB_METHOD_DM_MAP_OP_ADDR = (1U << UVERBS_ID_NS_SHIFT), 46 + MLX5_IB_METHOD_DM_QUERY, 47 + }; 48 + 49 + enum mlx5_ib_dm_map_op_addr_attrs { 50 + MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE = (1U << UVERBS_ID_NS_SHIFT), 51 + MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP, 52 + MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET, 53 + MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX, 54 + }; 55 + 56 + enum mlx5_ib_query_dm_attrs { 57 + MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE = (1U << UVERBS_ID_NS_SHIFT), 58 + MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET, 59 + MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX, 60 + MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH, 61 + }; 62 + 44 63 enum mlx5_ib_alloc_dm_attrs { 45 64 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET = (1U << UVERBS_ID_NS_SHIFT), 46 65 MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,