irqchip/gic-v3-its: Add VPE irq domain allocation/teardown

When creating a VM, the low level GICv4 code is responsible for:
- allocating each VPE a unique VPEID
- allocating a doorbell interrupt for each VPE
- allocating the pending tables for each VPE
- allocating the property table for the VM

This of course has to be reversed when the VM is brought down.

All of this is wired into the irq domain alloc/free methods.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>

+169
+169
drivers/irqchip/irq-gic-v3-its.c
··· 148 148 #define ITS_LIST_MAX 16 149 149 150 150 static unsigned long its_list_map; 151 + static DEFINE_IDA(its_vpeid_ida); 151 152 152 153 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) 153 154 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) ··· 1256 1255 return prop_page; 1257 1256 } 1258 1257 1258 + static void its_free_prop_table(struct page *prop_page) 1259 + { 1260 + free_pages((unsigned long)page_address(prop_page), 1261 + get_order(LPI_PROPBASE_SZ)); 1262 + } 1259 1263 1260 1264 static int __init its_alloc_lpi_tables(void) 1261 1265 { ··· 1563 1557 return pend_page; 1564 1558 } 1565 1559 1560 + static void its_free_pending_table(struct page *pt) 1561 + { 1562 + free_pages((unsigned long)page_address(pt), 1563 + get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); 1564 + } 1565 + 1566 1566 static void its_cpu_init_lpis(void) 1567 1567 { 1568 1568 void __iomem *rbase = gic_data_rdist_rd_base(); ··· 1789 1777 return (ilog2(dev_id) < its->device_ids); 1790 1778 1791 1779 return its_alloc_table_entry(baser, dev_id); 1780 + } 1781 + 1782 + static bool its_alloc_vpe_table(u32 vpe_id) 1783 + { 1784 + struct its_node *its; 1785 + 1786 + /* 1787 + * Make sure the L2 tables are allocated on *all* v4 ITSs. We 1788 + * could try and only do it on ITSs corresponding to devices 1789 + * that have interrupts targeted at this VPE, but the 1790 + * complexity becomes crazy (and you have tons of memory 1791 + * anyway, right?). 1792 + */ 1793 + list_for_each_entry(its, &its_nodes, entry) { 1794 + struct its_baser *baser; 1795 + 1796 + if (!its->is_v4) 1797 + continue; 1798 + 1799 + baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); 1800 + if (!baser) 1801 + return false; 1802 + 1803 + if (!its_alloc_table_entry(baser, vpe_id)) 1804 + return false; 1805 + } 1806 + 1807 + return true; 1792 1808 } 1793 1809 1794 1810 static struct its_device *its_create_device(struct its_node *its, u32 dev_id, ··· 2076 2036 .name = "GICv4-vpe", 2077 2037 }; 2078 2038 2039 + static int its_vpe_id_alloc(void) 2040 + { 2041 + return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL); 2042 + } 2043 + 2044 + static void its_vpe_id_free(u16 id) 2045 + { 2046 + ida_simple_remove(&its_vpeid_ida, id); 2047 + } 2048 + 2049 + static int its_vpe_init(struct its_vpe *vpe) 2050 + { 2051 + struct page *vpt_page; 2052 + int vpe_id; 2053 + 2054 + /* Allocate vpe_id */ 2055 + vpe_id = its_vpe_id_alloc(); 2056 + if (vpe_id < 0) 2057 + return vpe_id; 2058 + 2059 + /* Allocate VPT */ 2060 + vpt_page = its_allocate_pending_table(GFP_KERNEL); 2061 + if (!vpt_page) { 2062 + its_vpe_id_free(vpe_id); 2063 + return -ENOMEM; 2064 + } 2065 + 2066 + if (!its_alloc_vpe_table(vpe_id)) { 2067 + its_vpe_id_free(vpe_id); 2068 + its_free_pending_table(vpe->vpt_page); 2069 + return -ENOMEM; 2070 + } 2071 + 2072 + vpe->vpe_id = vpe_id; 2073 + vpe->vpt_page = vpt_page; 2074 + 2075 + return 0; 2076 + } 2077 + 2078 + static void its_vpe_teardown(struct its_vpe *vpe) 2079 + { 2080 + its_vpe_id_free(vpe->vpe_id); 2081 + its_free_pending_table(vpe->vpt_page); 2082 + } 2083 + 2084 + static void its_vpe_irq_domain_free(struct irq_domain *domain, 2085 + unsigned int virq, 2086 + unsigned int nr_irqs) 2087 + { 2088 + struct its_vm *vm = domain->host_data; 2089 + int i; 2090 + 2091 + irq_domain_free_irqs_parent(domain, virq, nr_irqs); 2092 + 2093 + for (i = 0; i < nr_irqs; i++) { 2094 + struct irq_data *data = irq_domain_get_irq_data(domain, 2095 + virq + i); 2096 + struct its_vpe *vpe = irq_data_get_irq_chip_data(data); 2097 + 2098 + BUG_ON(vm != vpe->its_vm); 2099 + 2100 + clear_bit(data->hwirq, vm->db_bitmap); 2101 + its_vpe_teardown(vpe); 2102 + irq_domain_reset_irq_data(data); 2103 + } 2104 + 2105 + if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { 2106 + its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); 2107 + its_free_prop_table(vm->vprop_page); 2108 + } 2109 + } 2110 + 2111 + static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 2112 + unsigned int nr_irqs, void *args) 2113 + { 2114 + struct its_vm *vm = args; 2115 + unsigned long *bitmap; 2116 + struct page *vprop_page; 2117 + int base, nr_ids, i, err = 0; 2118 + 2119 + BUG_ON(!vm); 2120 + 2121 + bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids); 2122 + if (!bitmap) 2123 + return -ENOMEM; 2124 + 2125 + if (nr_ids < nr_irqs) { 2126 + its_lpi_free_chunks(bitmap, base, nr_ids); 2127 + return -ENOMEM; 2128 + } 2129 + 2130 + vprop_page = its_allocate_prop_table(GFP_KERNEL); 2131 + if (!vprop_page) { 2132 + its_lpi_free_chunks(bitmap, base, nr_ids); 2133 + return -ENOMEM; 2134 + } 2135 + 2136 + vm->db_bitmap = bitmap; 2137 + vm->db_lpi_base = base; 2138 + vm->nr_db_lpis = nr_ids; 2139 + vm->vprop_page = vprop_page; 2140 + 2141 + for (i = 0; i < nr_irqs; i++) { 2142 + vm->vpes[i]->vpe_db_lpi = base + i; 2143 + err = its_vpe_init(vm->vpes[i]); 2144 + if (err) 2145 + break; 2146 + err = its_irq_gic_domain_alloc(domain, virq + i, 2147 + vm->vpes[i]->vpe_db_lpi); 2148 + if (err) 2149 + break; 2150 + irq_domain_set_hwirq_and_chip(domain, virq + i, i, 2151 + &its_vpe_irq_chip, vm->vpes[i]); 2152 + set_bit(i, bitmap); 2153 + } 2154 + 2155 + if (err) { 2156 + if (i > 0) 2157 + its_vpe_irq_domain_free(domain, virq, i - 1); 2158 + 2159 + its_lpi_free_chunks(bitmap, base, nr_ids); 2160 + its_free_prop_table(vprop_page); 2161 + } 2162 + 2163 + return err; 2164 + } 2165 + 2079 2166 static const struct irq_domain_ops its_vpe_domain_ops = { 2167 + .alloc = its_vpe_irq_domain_alloc, 2168 + .free = its_vpe_irq_domain_free, 2080 2169 }; 2081 2170 2082 2171 static int its_force_quiescent(void __iomem *base)