Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

vdpa/mlx5: Avoid overwriting CVQ iotlb

When qemu uses different address spaces for data and control virtqueues,
the current code would overwrite the control virtqueue iotlb through the
dup_iotlb call. Fix this by referring to the address space identifier
and the group to asid mapping to determine which mapping needs to be
updated. We also move the address space logic from mlx5 net to core
directory.

Reported-by: Eugenio Pérez <eperezma@redhat.com>
Signed-off-by: Eli Cohen <elic@nvidia.com>
Message-Id: <20221114131759.57883-6-elic@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Acked-by: Eugenio Pérez <eperezma@redhat.com>

authored by

Eli Cohen and committed by
Michael S. Tsirkin
38fc462f 0dbc1b4a

+39 -59
+3 -2
drivers/vdpa/mlx5/core/mlx5_vdpa.h
··· 116 116 int inlen); 117 117 int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey); 118 118 int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, 119 - bool *change_map); 120 - int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb); 119 + bool *change_map, unsigned int asid); 120 + int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, 121 + unsigned int asid); 121 122 void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev); 122 123 123 124 #define mlx5_vdpa_warn(__dev, format, ...) \
+26 -18
drivers/vdpa/mlx5/core/mr.c
··· 511 511 mutex_unlock(&mr->mkey_mtx); 512 512 } 513 513 514 - static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) 514 + static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, 515 + struct vhost_iotlb *iotlb, unsigned int asid) 515 516 { 516 517 struct mlx5_vdpa_mr *mr = &mvdev->mr; 517 518 int err; ··· 520 519 if (mr->initialized) 521 520 return 0; 522 521 523 - if (iotlb) 524 - err = create_user_mr(mvdev, iotlb); 525 - else 526 - err = create_dma_mr(mvdev, mr); 522 + if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) { 523 + if (iotlb) 524 + err = create_user_mr(mvdev, iotlb); 525 + else 526 + err = create_dma_mr(mvdev, mr); 527 527 528 - if (err) 529 - return err; 528 + if (err) 529 + return err; 530 + } 530 531 531 - err = dup_iotlb(mvdev, iotlb); 532 - if (err) 533 - goto out_err; 532 + if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) { 533 + err = dup_iotlb(mvdev, iotlb); 534 + if (err) 535 + goto out_err; 536 + } 534 537 535 538 mr->initialized = true; 536 539 return 0; 537 540 538 541 out_err: 539 - if (iotlb) 540 - destroy_user_mr(mvdev, mr); 541 - else 542 - destroy_dma_mr(mvdev, mr); 542 + if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) { 543 + if (iotlb) 544 + destroy_user_mr(mvdev, mr); 545 + else 546 + destroy_dma_mr(mvdev, mr); 547 + } 543 548 544 549 return err; 545 550 } 546 551 547 - int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) 552 + int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, 553 + unsigned int asid) 548 554 { 549 555 int err; 550 556 551 557 mutex_lock(&mvdev->mr.mkey_mtx); 552 - err = _mlx5_vdpa_create_mr(mvdev, iotlb); 558 + err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid); 553 559 mutex_unlock(&mvdev->mr.mkey_mtx); 554 560 return err; 555 561 } 556 562 557 563 int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, 558 - bool *change_map) 564 + bool *change_map, unsigned int asid) 559 565 { 560 566 struct mlx5_vdpa_mr *mr = &mvdev->mr; 561 567 int err = 0; ··· 574 566 *change_map = true; 575 567 } 576 568 if (!*change_map) 577 - err = _mlx5_vdpa_create_mr(mvdev, iotlb); 569 + err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid); 578 570 mutex_unlock(&mr->mkey_mtx); 579 571 580 572 return err;
+10 -39
drivers/vdpa/mlx5/net/mlx5_vnet.c
··· 2394 2394 } 2395 2395 } 2396 2396 2397 - static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) 2397 + static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, 2398 + struct vhost_iotlb *iotlb, unsigned int asid) 2398 2399 { 2399 2400 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); 2400 2401 int err; ··· 2407 2406 2408 2407 teardown_driver(ndev); 2409 2408 mlx5_vdpa_destroy_mr(mvdev); 2410 - err = mlx5_vdpa_create_mr(mvdev, iotlb); 2409 + err = mlx5_vdpa_create_mr(mvdev, iotlb, asid); 2411 2410 if (err) 2412 2411 goto err_mr; 2413 2412 ··· 2588 2587 ++mvdev->generation; 2589 2588 2590 2589 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { 2591 - if (mlx5_vdpa_create_mr(mvdev, NULL)) 2590 + if (mlx5_vdpa_create_mr(mvdev, NULL, 0)) 2592 2591 mlx5_vdpa_warn(mvdev, "create MR failed\n"); 2593 2592 } 2594 2593 up_write(&ndev->reslock); ··· 2624 2623 return mvdev->generation; 2625 2624 } 2626 2625 2627 - static int set_map_control(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) 2628 - { 2629 - u64 start = 0ULL, last = 0ULL - 1; 2630 - struct vhost_iotlb_map *map; 2631 - int err = 0; 2632 - 2633 - spin_lock(&mvdev->cvq.iommu_lock); 2634 - vhost_iotlb_reset(mvdev->cvq.iotlb); 2635 - 2636 - for (map = vhost_iotlb_itree_first(iotlb, start, last); map; 2637 - map = vhost_iotlb_itree_next(map, start, last)) { 2638 - err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start, 2639 - map->last, map->addr, map->perm); 2640 - if (err) 2641 - goto out; 2642 - } 2643 - 2644 - out: 2645 - spin_unlock(&mvdev->cvq.iommu_lock); 2646 - return err; 2647 - } 2648 - 2649 - static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) 2626 + static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, 2627 + unsigned int asid) 2650 2628 { 2651 2629 bool change_map; 2652 2630 int err; 2653 2631 2654 - err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map); 2632 + err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map, asid); 2655 2633 if (err) { 2656 2634 mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err); 2657 2635 return err; 2658 2636 } 2659 2637 2660 2638 if (change_map) 2661 - err = mlx5_vdpa_change_map(mvdev, iotlb); 2639 + err = mlx5_vdpa_change_map(mvdev, iotlb, asid); 2662 2640 2663 2641 return err; 2664 2642 } ··· 2650 2670 int err = -EINVAL; 2651 2671 2652 2672 down_write(&ndev->reslock); 2653 - if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) { 2654 - err = set_map_data(mvdev, iotlb); 2655 - if (err) 2656 - goto out; 2657 - } 2658 - 2659 - if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) 2660 - err = set_map_control(mvdev, iotlb); 2661 - 2662 - out: 2673 + err = set_map_data(mvdev, iotlb, asid); 2663 2674 up_write(&ndev->reslock); 2664 2675 return err; 2665 2676 } ··· 3153 3182 goto err_mpfs; 3154 3183 3155 3184 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { 3156 - err = mlx5_vdpa_create_mr(mvdev, NULL); 3185 + err = mlx5_vdpa_create_mr(mvdev, NULL, 0); 3157 3186 if (err) 3158 3187 goto err_res; 3159 3188 }