irqchip/gic-v3-its: Add VLPI map/unmap operations

In order to let a VLPI being injected into a guest, the VLPI must
be mapped using the VMAPTI command. When moved to a different vcpu,
it must be moved with the VMOVI command.

These commands are issued via the irq_set_vcpu_affinity method,
making sure we unmap the corresponding host LPI first.

The reverse is also done when the VLPI is unmapped from the guest.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>

+247 -3
+247 -3
drivers/irqchip/irq-gic-v3-its.c
··· 115 u16 *col_map; 116 irq_hw_number_t lpi_base; 117 int nr_lpis; 118 }; 119 120 /* 121 - * The ITS view of a device - belongs to an ITS, a collection, owns an 122 - * interrupt translation table, and a list of interrupts. 123 */ 124 struct its_device { 125 struct list_head entry; ··· 211 struct { 212 struct its_collection *col; 213 } its_invall_cmd; 214 }; 215 }; 216 ··· 241 242 typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, 243 struct its_cmd_desc *); 244 245 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) 246 { ··· 295 static void its_encode_collection(struct its_cmd_block *cmd, u16 col) 296 { 297 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); 298 } 299 300 static inline void its_fixup_cmd(struct its_cmd_block *cmd) ··· 475 return NULL; 476 } 477 478 static u64 its_cmd_ptr_to_offset(struct its_node *its, 479 struct its_cmd_block *ptr) 480 { ··· 670 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, 671 struct its_collection, its_build_sync_cmd) 672 673 static void its_send_int(struct its_device *dev, u32 event_id) 674 { 675 struct its_cmd_desc desc; ··· 773 desc.its_invall_cmd.col = col; 774 775 its_send_single_command(its, its_build_invall_cmd, &desc); 776 } 777 778 /* ··· 914 return 0; 915 } 916 917 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) 918 { 919 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 920 struct its_cmd_info *info = vcpu_info; 921 922 /* Need a v4 ITS */ 923 - if (!its_dev->its->is_v4 || !info) 924 return -EINVAL; 925 926 switch (info->cmd_type) { 927 case MAP_VLPI: 928 929 case GET_VLPI: 930 931 case PROP_UPDATE_VLPI: 932 case PROP_UPDATE_AND_INV_VLPI: ··· 1761 dev->event_map.col_map = col_map; 1762 dev->event_map.lpi_base = lpi_base; 1763 dev->event_map.nr_lpis = nr_lpis; 1764 dev->device_id = dev_id; 1765 INIT_LIST_HEAD(&dev->entry); 1766
··· 115 u16 *col_map; 116 irq_hw_number_t lpi_base; 117 int nr_lpis; 118 + struct mutex vlpi_lock; 119 + struct its_vm *vm; 120 + struct its_vlpi_map *vlpi_maps; 121 + int nr_vlpis; 122 }; 123 124 /* 125 + * The ITS view of a device - belongs to an ITS, owns an interrupt 126 + * translation table, and a list of interrupts. If it some of its 127 + * LPIs are injected into a guest (GICv4), the event_map.vm field 128 + * indicates which one. 129 */ 130 struct its_device { 131 struct list_head entry; ··· 205 struct { 206 struct its_collection *col; 207 } its_invall_cmd; 208 + 209 + struct { 210 + struct its_vpe *vpe; 211 + struct its_device *dev; 212 + u32 virt_id; 213 + u32 event_id; 214 + bool db_enabled; 215 + } its_vmapti_cmd; 216 + 217 + struct { 218 + struct its_vpe *vpe; 219 + struct its_device *dev; 220 + u32 event_id; 221 + bool db_enabled; 222 + } its_vmovi_cmd; 223 }; 224 }; 225 ··· 220 221 typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, 222 struct its_cmd_desc *); 223 + 224 + typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *, 225 + struct its_cmd_desc *); 226 227 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) 228 { ··· 271 static void its_encode_collection(struct its_cmd_block *cmd, u16 col) 272 { 273 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); 274 + } 275 + 276 + static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) 277 + { 278 + its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); 279 + } 280 + 281 + static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) 282 + { 283 + its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); 284 + } 285 + 286 + static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) 287 + { 288 + its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); 289 + } 290 + 291 + static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) 292 + { 293 + its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); 294 } 295 296 static inline void its_fixup_cmd(struct its_cmd_block *cmd) ··· 431 return NULL; 432 } 433 434 + static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd, 435 + struct its_cmd_desc *desc) 436 + { 437 + u32 db; 438 + 439 + if (desc->its_vmapti_cmd.db_enabled) 440 + db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; 441 + else 442 + db = 1023; 443 + 444 + its_encode_cmd(cmd, GITS_CMD_VMAPTI); 445 + its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); 446 + its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); 447 + its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); 448 + its_encode_db_phys_id(cmd, db); 449 + its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); 450 + 451 + its_fixup_cmd(cmd); 452 + 453 + return desc->its_vmapti_cmd.vpe; 454 + } 455 + 456 + static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd, 457 + struct its_cmd_desc *desc) 458 + { 459 + u32 db; 460 + 461 + if (desc->its_vmovi_cmd.db_enabled) 462 + db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; 463 + else 464 + db = 1023; 465 + 466 + its_encode_cmd(cmd, GITS_CMD_VMOVI); 467 + its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); 468 + its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); 469 + its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); 470 + its_encode_db_phys_id(cmd, db); 471 + its_encode_db_valid(cmd, true); 472 + 473 + its_fixup_cmd(cmd); 474 + 475 + return desc->its_vmovi_cmd.vpe; 476 + } 477 + 478 static u64 its_cmd_ptr_to_offset(struct its_node *its, 479 struct its_cmd_block *ptr) 480 { ··· 582 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, 583 struct its_collection, its_build_sync_cmd) 584 585 + static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd, 586 + struct its_vpe *sync_vpe) 587 + { 588 + its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); 589 + its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); 590 + 591 + its_fixup_cmd(sync_cmd); 592 + } 593 + 594 + static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, 595 + struct its_vpe, its_build_vsync_cmd) 596 + 597 static void its_send_int(struct its_device *dev, u32 event_id) 598 { 599 struct its_cmd_desc desc; ··· 673 desc.its_invall_cmd.col = col; 674 675 its_send_single_command(its, its_build_invall_cmd, &desc); 676 + } 677 + 678 + static void its_send_vmapti(struct its_device *dev, u32 id) 679 + { 680 + struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; 681 + struct its_cmd_desc desc; 682 + 683 + desc.its_vmapti_cmd.vpe = map->vpe; 684 + desc.its_vmapti_cmd.dev = dev; 685 + desc.its_vmapti_cmd.virt_id = map->vintid; 686 + desc.its_vmapti_cmd.event_id = id; 687 + desc.its_vmapti_cmd.db_enabled = map->db_enabled; 688 + 689 + its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); 690 + } 691 + 692 + static void its_send_vmovi(struct its_device *dev, u32 id) 693 + { 694 + struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; 695 + struct its_cmd_desc desc; 696 + 697 + desc.its_vmovi_cmd.vpe = map->vpe; 698 + desc.its_vmovi_cmd.dev = dev; 699 + desc.its_vmovi_cmd.event_id = id; 700 + desc.its_vmovi_cmd.db_enabled = map->db_enabled; 701 + 702 + its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); 703 } 704 705 /* ··· 787 return 0; 788 } 789 790 + static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) 791 + { 792 + struct its_device *its_dev = irq_data_get_irq_chip_data(d); 793 + u32 event = its_get_event_id(d); 794 + int ret = 0; 795 + 796 + if (!info->map) 797 + return -EINVAL; 798 + 799 + mutex_lock(&its_dev->event_map.vlpi_lock); 800 + 801 + if (!its_dev->event_map.vm) { 802 + struct its_vlpi_map *maps; 803 + 804 + maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis, 805 + GFP_KERNEL); 806 + if (!maps) { 807 + ret = -ENOMEM; 808 + goto out; 809 + } 810 + 811 + its_dev->event_map.vm = info->map->vm; 812 + its_dev->event_map.vlpi_maps = maps; 813 + } else if (its_dev->event_map.vm != info->map->vm) { 814 + ret = -EINVAL; 815 + goto out; 816 + } 817 + 818 + /* Get our private copy of the mapping information */ 819 + its_dev->event_map.vlpi_maps[event] = *info->map; 820 + 821 + if (irqd_is_forwarded_to_vcpu(d)) { 822 + /* Already mapped, move it around */ 823 + its_send_vmovi(its_dev, event); 824 + } else { 825 + /* Drop the physical mapping */ 826 + its_send_discard(its_dev, event); 827 + 828 + /* and install the virtual one */ 829 + its_send_vmapti(its_dev, event); 830 + irqd_set_forwarded_to_vcpu(d); 831 + 832 + /* Increment the number of VLPIs */ 833 + its_dev->event_map.nr_vlpis++; 834 + } 835 + 836 + out: 837 + mutex_unlock(&its_dev->event_map.vlpi_lock); 838 + return ret; 839 + } 840 + 841 + static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) 842 + { 843 + struct its_device *its_dev = irq_data_get_irq_chip_data(d); 844 + u32 event = its_get_event_id(d); 845 + int ret = 0; 846 + 847 + mutex_lock(&its_dev->event_map.vlpi_lock); 848 + 849 + if (!its_dev->event_map.vm || 850 + !its_dev->event_map.vlpi_maps[event].vm) { 851 + ret = -EINVAL; 852 + goto out; 853 + } 854 + 855 + /* Copy our mapping information to the incoming request */ 856 + *info->map = its_dev->event_map.vlpi_maps[event]; 857 + 858 + out: 859 + mutex_unlock(&its_dev->event_map.vlpi_lock); 860 + return ret; 861 + } 862 + 863 + static int its_vlpi_unmap(struct irq_data *d) 864 + { 865 + struct its_device *its_dev = irq_data_get_irq_chip_data(d); 866 + u32 event = its_get_event_id(d); 867 + int ret = 0; 868 + 869 + mutex_lock(&its_dev->event_map.vlpi_lock); 870 + 871 + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { 872 + ret = -EINVAL; 873 + goto out; 874 + } 875 + 876 + /* Drop the virtual mapping */ 877 + its_send_discard(its_dev, event); 878 + 879 + /* and restore the physical one */ 880 + irqd_clr_forwarded_to_vcpu(d); 881 + its_send_mapti(its_dev, d->hwirq, event); 882 + lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | 883 + LPI_PROP_ENABLED | 884 + LPI_PROP_GROUP1)); 885 + 886 + /* 887 + * Drop the refcount and make the device available again if 888 + * this was the last VLPI. 889 + */ 890 + if (!--its_dev->event_map.nr_vlpis) { 891 + its_dev->event_map.vm = NULL; 892 + kfree(its_dev->event_map.vlpi_maps); 893 + } 894 + 895 + out: 896 + mutex_unlock(&its_dev->event_map.vlpi_lock); 897 + return ret; 898 + } 899 + 900 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) 901 { 902 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 903 struct its_cmd_info *info = vcpu_info; 904 905 /* Need a v4 ITS */ 906 + if (!its_dev->its->is_v4) 907 return -EINVAL; 908 + 909 + /* Unmap request? */ 910 + if (!info) 911 + return its_vlpi_unmap(d); 912 913 switch (info->cmd_type) { 914 case MAP_VLPI: 915 + return its_vlpi_map(d, info); 916 917 case GET_VLPI: 918 + return its_vlpi_get(d, info); 919 920 case PROP_UPDATE_VLPI: 921 case PROP_UPDATE_AND_INV_VLPI: ··· 1518 dev->event_map.col_map = col_map; 1519 dev->event_map.lpi_base = lpi_base; 1520 dev->event_map.nr_lpis = nr_lpis; 1521 + mutex_init(&dev->event_map.vlpi_lock); 1522 dev->device_id = dev_id; 1523 INIT_LIST_HEAD(&dev->entry); 1524