Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-5.7-rc1b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull more xen updates from Juergen Gross:

- two cleanups

- fix a boot regression introduced in this merge window

- fix wrong use of memory allocation flags

* tag 'for-linus-5.7-rc1b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
x86/xen: fix booting 32-bit pv guest
x86/xen: make xen_pvmmu_arch_setup() static
xen/blkfront: fix memory allocation flags in blkfront_setup_indirect()
xen: Use evtchn_type_t as a type for event channels

+142 -123
+1 -1
arch/x86/xen/setup.c
··· 985 985 #endif /* CONFIG_X86_64 */ 986 986 } 987 987 988 - void __init xen_pvmmu_arch_setup(void) 988 + static void __init xen_pvmmu_arch_setup(void) 989 989 { 990 990 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); 991 991 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
+1 -1
arch/x86/xen/xen-head.S
··· 38 38 #ifdef CONFIG_X86_64 39 39 mov initial_stack(%rip), %rsp 40 40 #else 41 - mov pa(initial_stack), %esp 41 + mov initial_stack, %esp 42 42 #endif 43 43 44 44 #ifdef CONFIG_X86_64
+12 -5
drivers/block/xen-blkfront.c
··· 47 47 #include <linux/bitmap.h> 48 48 #include <linux/list.h> 49 49 #include <linux/workqueue.h> 50 + #include <linux/sched/mm.h> 50 51 51 52 #include <xen/xen.h> 52 53 #include <xen/xenbus.h> ··· 2190 2189 2191 2190 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) 2192 2191 { 2193 - unsigned int psegs, grants; 2192 + unsigned int psegs, grants, memflags; 2194 2193 int err, i; 2195 2194 struct blkfront_info *info = rinfo->dev_info; 2195 + 2196 + memflags = memalloc_noio_save(); 2196 2197 2197 2198 if (info->max_indirect_segments == 0) { 2198 2199 if (!HAS_EXTRA_REQ) ··· 2227 2224 2228 2225 BUG_ON(!list_empty(&rinfo->indirect_pages)); 2229 2226 for (i = 0; i < num; i++) { 2230 - struct page *indirect_page = alloc_page(GFP_NOIO); 2227 + struct page *indirect_page = alloc_page(GFP_KERNEL); 2231 2228 if (!indirect_page) 2232 2229 goto out_of_memory; 2233 2230 list_add(&indirect_page->lru, &rinfo->indirect_pages); ··· 2238 2235 rinfo->shadow[i].grants_used = 2239 2236 kvcalloc(grants, 2240 2237 sizeof(rinfo->shadow[i].grants_used[0]), 2241 - GFP_NOIO); 2238 + GFP_KERNEL); 2242 2239 rinfo->shadow[i].sg = kvcalloc(psegs, 2243 2240 sizeof(rinfo->shadow[i].sg[0]), 2244 - GFP_NOIO); 2241 + GFP_KERNEL); 2245 2242 if (info->max_indirect_segments) 2246 2243 rinfo->shadow[i].indirect_grants = 2247 2244 kvcalloc(INDIRECT_GREFS(grants), 2248 2245 sizeof(rinfo->shadow[i].indirect_grants[0]), 2249 - GFP_NOIO); 2246 + GFP_KERNEL); 2250 2247 if ((rinfo->shadow[i].grants_used == NULL) || 2251 2248 (rinfo->shadow[i].sg == NULL) || 2252 2249 (info->max_indirect_segments && ··· 2255 2252 sg_init_table(rinfo->shadow[i].sg, psegs); 2256 2253 } 2257 2254 2255 + memalloc_noio_restore(memflags); 2258 2256 2259 2257 return 0; 2260 2258 ··· 2275 2271 __free_page(indirect_page); 2276 2272 } 2277 2273 } 2274 + 2275 + memalloc_noio_restore(memflags); 2276 + 2278 2277 return -ENOMEM; 2279 2278 } 2280 2279
+8 -8
drivers/xen/events/events_2l.c
··· 53 53 set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); 54 54 } 55 55 56 - static void evtchn_2l_clear_pending(unsigned port) 56 + static void evtchn_2l_clear_pending(evtchn_port_t port) 57 57 { 58 58 struct shared_info *s = HYPERVISOR_shared_info; 59 59 sync_clear_bit(port, BM(&s->evtchn_pending[0])); 60 60 } 61 61 62 - static void evtchn_2l_set_pending(unsigned port) 62 + static void evtchn_2l_set_pending(evtchn_port_t port) 63 63 { 64 64 struct shared_info *s = HYPERVISOR_shared_info; 65 65 sync_set_bit(port, BM(&s->evtchn_pending[0])); 66 66 } 67 67 68 - static bool evtchn_2l_is_pending(unsigned port) 68 + static bool evtchn_2l_is_pending(evtchn_port_t port) 69 69 { 70 70 struct shared_info *s = HYPERVISOR_shared_info; 71 71 return sync_test_bit(port, BM(&s->evtchn_pending[0])); 72 72 } 73 73 74 - static bool evtchn_2l_test_and_set_mask(unsigned port) 74 + static bool evtchn_2l_test_and_set_mask(evtchn_port_t port) 75 75 { 76 76 struct shared_info *s = HYPERVISOR_shared_info; 77 77 return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0])); 78 78 } 79 79 80 - static void evtchn_2l_mask(unsigned port) 80 + static void evtchn_2l_mask(evtchn_port_t port) 81 81 { 82 82 struct shared_info *s = HYPERVISOR_shared_info; 83 83 sync_set_bit(port, BM(&s->evtchn_mask[0])); 84 84 } 85 85 86 - static void evtchn_2l_unmask(unsigned port) 86 + static void evtchn_2l_unmask(evtchn_port_t port) 87 87 { 88 88 struct shared_info *s = HYPERVISOR_shared_info; 89 89 unsigned int cpu = get_cpu(); ··· 173 173 /* Timer interrupt has highest priority. */ 174 174 irq = irq_from_virq(cpu, VIRQ_TIMER); 175 175 if (irq != -1) { 176 - unsigned int evtchn = evtchn_from_irq(irq); 176 + evtchn_port_t evtchn = evtchn_from_irq(irq); 177 177 word_idx = evtchn / BITS_PER_LONG; 178 178 bit_idx = evtchn % BITS_PER_LONG; 179 179 if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx)) ··· 228 228 229 229 do { 230 230 xen_ulong_t bits; 231 - int port; 231 + evtchn_port_t port; 232 232 233 233 bits = MASK_LSBS(pending_bits, bit_idx); 234 234
+48 -45
drivers/xen/events/events_base.c
··· 116 116 } 117 117 } 118 118 119 - static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) 119 + static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq) 120 120 { 121 121 unsigned row; 122 122 unsigned col; ··· 143 143 return 0; 144 144 } 145 145 146 - int get_evtchn_to_irq(unsigned evtchn) 146 + int get_evtchn_to_irq(evtchn_port_t evtchn) 147 147 { 148 148 if (evtchn >= xen_evtchn_max_channels()) 149 149 return -1; ··· 162 162 static int xen_irq_info_common_setup(struct irq_info *info, 163 163 unsigned irq, 164 164 enum xen_irq_type type, 165 - unsigned evtchn, 165 + evtchn_port_t evtchn, 166 166 unsigned short cpu) 167 167 { 168 168 int ret; ··· 184 184 } 185 185 186 186 static int xen_irq_info_evtchn_setup(unsigned irq, 187 - unsigned evtchn) 187 + evtchn_port_t evtchn) 188 188 { 189 189 struct irq_info *info = info_for_irq(irq); 190 190 ··· 193 193 194 194 static int xen_irq_info_ipi_setup(unsigned cpu, 195 195 unsigned irq, 196 - unsigned evtchn, 196 + evtchn_port_t evtchn, 197 197 enum ipi_vector ipi) 198 198 { 199 199 struct irq_info *info = info_for_irq(irq); ··· 207 207 208 208 static int xen_irq_info_virq_setup(unsigned cpu, 209 209 unsigned irq, 210 - unsigned evtchn, 210 + evtchn_port_t evtchn, 211 211 unsigned virq) 212 212 { 213 213 struct irq_info *info = info_for_irq(irq); ··· 220 220 } 221 221 222 222 static int xen_irq_info_pirq_setup(unsigned irq, 223 - unsigned evtchn, 223 + evtchn_port_t evtchn, 224 224 unsigned pirq, 225 225 unsigned gsi, 226 226 uint16_t domid, ··· 245 245 /* 246 246 * Accessors for packed IRQ information. 247 247 */ 248 - unsigned int evtchn_from_irq(unsigned irq) 248 + evtchn_port_t evtchn_from_irq(unsigned irq) 249 249 { 250 250 if (WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)) 251 251 return 0; ··· 253 253 return info_for_irq(irq)->evtchn; 254 254 } 255 255 256 - unsigned irq_from_evtchn(unsigned int evtchn) 256 + unsigned int irq_from_evtchn(evtchn_port_t evtchn) 257 257 { 258 258 return get_evtchn_to_irq(evtchn); 259 259 } ··· 304 304 return info_for_irq(irq)->cpu; 305 305 } 306 306 307 - unsigned int cpu_from_evtchn(unsigned int evtchn) 307 + unsigned int cpu_from_evtchn(evtchn_port_t evtchn) 308 308 { 309 309 int irq = get_evtchn_to_irq(evtchn); 310 310 unsigned ret = 0; ··· 330 330 return info->u.pirq.flags & PIRQ_NEEDS_EOI; 331 331 } 332 332 333 - static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) 333 + static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu) 334 334 { 335 - int irq = get_evtchn_to_irq(chn); 335 + int irq = get_evtchn_to_irq(evtchn); 336 336 struct irq_info *info = info_for_irq(irq); 337 337 338 338 BUG_ON(irq == -1); ··· 354 354 */ 355 355 void notify_remote_via_irq(int irq) 356 356 { 357 - int evtchn = evtchn_from_irq(irq); 357 + evtchn_port_t evtchn = evtchn_from_irq(irq); 358 358 359 359 if (VALID_EVTCHN(evtchn)) 360 360 notify_remote_via_evtchn(evtchn); ··· 445 445 irq_free_desc(irq); 446 446 } 447 447 448 - static void xen_evtchn_close(unsigned int port) 448 + static void xen_evtchn_close(evtchn_port_t port) 449 449 { 450 450 struct evtchn_close close; 451 451 ··· 472 472 473 473 static void eoi_pirq(struct irq_data *data) 474 474 { 475 - int evtchn = evtchn_from_irq(data->irq); 475 + evtchn_port_t evtchn = evtchn_from_irq(data->irq); 476 476 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; 477 477 int rc = 0; 478 478 ··· 508 508 { 509 509 struct evtchn_bind_pirq bind_pirq; 510 510 struct irq_info *info = info_for_irq(irq); 511 - int evtchn = evtchn_from_irq(irq); 511 + evtchn_port_t evtchn = evtchn_from_irq(irq); 512 512 int rc; 513 513 514 514 BUG_ON(info->type != IRQT_PIRQ); ··· 561 561 { 562 562 unsigned int irq = data->irq; 563 563 struct irq_info *info = info_for_irq(irq); 564 - unsigned evtchn = evtchn_from_irq(irq); 564 + evtchn_port_t evtchn = evtchn_from_irq(irq); 565 565 566 566 BUG_ON(info->type != IRQT_PIRQ); 567 567 ··· 601 601 602 602 static void __unbind_from_irq(unsigned int irq) 603 603 { 604 - int evtchn = evtchn_from_irq(irq); 604 + evtchn_port_t evtchn = evtchn_from_irq(irq); 605 605 struct irq_info *info = irq_get_handler_data(irq); 606 606 607 607 if (info->refcnt > 0) { ··· 827 827 } 828 828 EXPORT_SYMBOL_GPL(xen_pirq_from_irq); 829 829 830 - int bind_evtchn_to_irq(unsigned int evtchn) 830 + int bind_evtchn_to_irq(evtchn_port_t evtchn) 831 831 { 832 832 int irq; 833 833 int ret; ··· 870 870 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) 871 871 { 872 872 struct evtchn_bind_ipi bind_ipi; 873 - int evtchn, irq; 874 - int ret; 873 + evtchn_port_t evtchn; 874 + int ret, irq; 875 875 876 876 mutex_lock(&irq_mapping_update_lock); 877 877 ··· 909 909 } 910 910 911 911 int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, 912 - unsigned int remote_port) 912 + evtchn_port_t remote_port) 913 913 { 914 914 struct evtchn_bind_interdomain bind_interdomain; 915 915 int err; ··· 924 924 } 925 925 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq); 926 926 927 - static int find_virq(unsigned int virq, unsigned int cpu) 927 + static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn) 928 928 { 929 929 struct evtchn_status status; 930 - int port, rc = -ENOENT; 930 + evtchn_port_t port; 931 + int rc = -ENOENT; 931 932 932 933 memset(&status, 0, sizeof(status)); 933 934 for (port = 0; port < xen_evtchn_max_channels(); port++) { ··· 940 939 if (status.status != EVTCHNSTAT_virq) 941 940 continue; 942 941 if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) { 943 - rc = port; 942 + *evtchn = port; 944 943 break; 945 944 } 946 945 } ··· 963 962 int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) 964 963 { 965 964 struct evtchn_bind_virq bind_virq; 966 - int evtchn, irq, ret; 965 + evtchn_port_t evtchn = 0; 966 + int irq, ret; 967 967 968 968 mutex_lock(&irq_mapping_update_lock); 969 969 ··· 990 988 evtchn = bind_virq.port; 991 989 else { 992 990 if (ret == -EEXIST) 993 - ret = find_virq(virq, cpu); 991 + ret = find_virq(virq, cpu, &evtchn); 994 992 BUG_ON(ret < 0); 995 - evtchn = ret; 996 993 } 997 994 998 995 ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq); ··· 1020 1019 mutex_unlock(&irq_mapping_update_lock); 1021 1020 } 1022 1021 1023 - int bind_evtchn_to_irqhandler(unsigned int evtchn, 1022 + int bind_evtchn_to_irqhandler(evtchn_port_t evtchn, 1024 1023 irq_handler_t handler, 1025 1024 unsigned long irqflags, 1026 1025 const char *devname, void *dev_id) ··· 1041 1040 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); 1042 1041 1043 1042 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, 1044 - unsigned int remote_port, 1043 + evtchn_port_t remote_port, 1045 1044 irq_handler_t handler, 1046 1045 unsigned long irqflags, 1047 1046 const char *devname, ··· 1133 1132 } 1134 1133 EXPORT_SYMBOL_GPL(xen_set_irq_priority); 1135 1134 1136 - int evtchn_make_refcounted(unsigned int evtchn) 1135 + int evtchn_make_refcounted(evtchn_port_t evtchn) 1137 1136 { 1138 1137 int irq = get_evtchn_to_irq(evtchn); 1139 1138 struct irq_info *info; ··· 1154 1153 } 1155 1154 EXPORT_SYMBOL_GPL(evtchn_make_refcounted); 1156 1155 1157 - int evtchn_get(unsigned int evtchn) 1156 + int evtchn_get(evtchn_port_t evtchn) 1158 1157 { 1159 1158 int irq; 1160 1159 struct irq_info *info; ··· 1187 1186 } 1188 1187 EXPORT_SYMBOL_GPL(evtchn_get); 1189 1188 1190 - void evtchn_put(unsigned int evtchn) 1189 + void evtchn_put(evtchn_port_t evtchn) 1191 1190 { 1192 1191 int irq = get_evtchn_to_irq(evtchn); 1193 1192 if (WARN_ON(irq == -1)) ··· 1253 1252 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); 1254 1253 1255 1254 /* Rebind a new event channel to an existing irq. */ 1256 - void rebind_evtchn_irq(int evtchn, int irq) 1255 + void rebind_evtchn_irq(evtchn_port_t evtchn, int irq) 1257 1256 { 1258 1257 struct irq_info *info = info_for_irq(irq); 1259 1258 ··· 1285 1284 } 1286 1285 1287 1286 /* Rebind an evtchn so that it gets delivered to a specific cpu */ 1288 - static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu) 1287 + static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu) 1289 1288 { 1290 1289 struct evtchn_bind_vcpu bind_vcpu; 1291 1290 int masked; ··· 1343 1342 1344 1343 static void enable_dynirq(struct irq_data *data) 1345 1344 { 1346 - int evtchn = evtchn_from_irq(data->irq); 1345 + evtchn_port_t evtchn = evtchn_from_irq(data->irq); 1347 1346 1348 1347 if (VALID_EVTCHN(evtchn)) 1349 1348 unmask_evtchn(evtchn); ··· 1351 1350 1352 1351 static void disable_dynirq(struct irq_data *data) 1353 1352 { 1354 - int evtchn = evtchn_from_irq(data->irq); 1353 + evtchn_port_t evtchn = evtchn_from_irq(data->irq); 1355 1354 1356 1355 if (VALID_EVTCHN(evtchn)) 1357 1356 mask_evtchn(evtchn); ··· 1359 1358 1360 1359 static void ack_dynirq(struct irq_data *data) 1361 1360 { 1362 - int evtchn = evtchn_from_irq(data->irq); 1361 + evtchn_port_t evtchn = evtchn_from_irq(data->irq); 1363 1362 1364 1363 if (!VALID_EVTCHN(evtchn)) 1365 1364 return; ··· 1386 1385 1387 1386 static int retrigger_dynirq(struct irq_data *data) 1388 1387 { 1389 - unsigned int evtchn = evtchn_from_irq(data->irq); 1388 + evtchn_port_t evtchn = evtchn_from_irq(data->irq); 1390 1389 int masked; 1391 1390 1392 1391 if (!VALID_EVTCHN(evtchn)) ··· 1441 1440 static void restore_cpu_virqs(unsigned int cpu) 1442 1441 { 1443 1442 struct evtchn_bind_virq bind_virq; 1444 - int virq, irq, evtchn; 1443 + evtchn_port_t evtchn; 1444 + int virq, irq; 1445 1445 1446 1446 for (virq = 0; virq < NR_VIRQS; virq++) { 1447 1447 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) ··· 1467 1465 static void restore_cpu_ipis(unsigned int cpu) 1468 1466 { 1469 1467 struct evtchn_bind_ipi bind_ipi; 1470 - int ipi, irq, evtchn; 1468 + evtchn_port_t evtchn; 1469 + int ipi, irq; 1471 1470 1472 1471 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { 1473 1472 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) ··· 1492 1489 /* Clear an irq's pending state, in preparation for polling on it */ 1493 1490 void xen_clear_irq_pending(int irq) 1494 1491 { 1495 - int evtchn = evtchn_from_irq(irq); 1492 + evtchn_port_t evtchn = evtchn_from_irq(irq); 1496 1493 1497 1494 if (VALID_EVTCHN(evtchn)) 1498 1495 clear_evtchn(evtchn); ··· 1500 1497 EXPORT_SYMBOL(xen_clear_irq_pending); 1501 1498 void xen_set_irq_pending(int irq) 1502 1499 { 1503 - int evtchn = evtchn_from_irq(irq); 1500 + evtchn_port_t evtchn = evtchn_from_irq(irq); 1504 1501 1505 1502 if (VALID_EVTCHN(evtchn)) 1506 1503 set_evtchn(evtchn); ··· 1508 1505 1509 1506 bool xen_test_irq_pending(int irq) 1510 1507 { 1511 - int evtchn = evtchn_from_irq(irq); 1508 + evtchn_port_t evtchn = evtchn_from_irq(irq); 1512 1509 bool ret = false; 1513 1510 1514 1511 if (VALID_EVTCHN(evtchn)) ··· 1670 1667 void __init xen_init_IRQ(void) 1671 1668 { 1672 1669 int ret = -EINVAL; 1673 - unsigned int evtchn; 1670 + evtchn_port_t evtchn; 1674 1671 1675 1672 if (fifo_events) 1676 1673 ret = xen_evtchn_fifo_init();
+11 -11
drivers/xen/events/events_fifo.c
··· 82 82 83 83 #endif 84 84 85 - static inline event_word_t *event_word_from_port(unsigned port) 85 + static inline event_word_t *event_word_from_port(evtchn_port_t port) 86 86 { 87 87 unsigned i = port / EVENT_WORDS_PER_PAGE; 88 88 ··· 140 140 141 141 static int evtchn_fifo_setup(struct irq_info *info) 142 142 { 143 - unsigned port = info->evtchn; 143 + evtchn_port_t port = info->evtchn; 144 144 unsigned new_array_pages; 145 145 int ret; 146 146 ··· 191 191 /* no-op */ 192 192 } 193 193 194 - static void evtchn_fifo_clear_pending(unsigned port) 194 + static void evtchn_fifo_clear_pending(evtchn_port_t port) 195 195 { 196 196 event_word_t *word = event_word_from_port(port); 197 197 sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); 198 198 } 199 199 200 - static void evtchn_fifo_set_pending(unsigned port) 200 + static void evtchn_fifo_set_pending(evtchn_port_t port) 201 201 { 202 202 event_word_t *word = event_word_from_port(port); 203 203 sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); 204 204 } 205 205 206 - static bool evtchn_fifo_is_pending(unsigned port) 206 + static bool evtchn_fifo_is_pending(evtchn_port_t port) 207 207 { 208 208 event_word_t *word = event_word_from_port(port); 209 209 return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); 210 210 } 211 211 212 - static bool evtchn_fifo_test_and_set_mask(unsigned port) 212 + static bool evtchn_fifo_test_and_set_mask(evtchn_port_t port) 213 213 { 214 214 event_word_t *word = event_word_from_port(port); 215 215 return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); 216 216 } 217 217 218 - static void evtchn_fifo_mask(unsigned port) 218 + static void evtchn_fifo_mask(evtchn_port_t port) 219 219 { 220 220 event_word_t *word = event_word_from_port(port); 221 221 sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); 222 222 } 223 223 224 - static bool evtchn_fifo_is_masked(unsigned port) 224 + static bool evtchn_fifo_is_masked(evtchn_port_t port) 225 225 { 226 226 event_word_t *word = event_word_from_port(port); 227 227 return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); ··· 242 242 } while (w != old); 243 243 } 244 244 245 - static void evtchn_fifo_unmask(unsigned port) 245 + static void evtchn_fifo_unmask(evtchn_port_t port) 246 246 { 247 247 event_word_t *word = event_word_from_port(port); 248 248 ··· 270 270 return w & EVTCHN_FIFO_LINK_MASK; 271 271 } 272 272 273 - static void handle_irq_for_port(unsigned port) 273 + static void handle_irq_for_port(evtchn_port_t port) 274 274 { 275 275 int irq; 276 276 ··· 286 286 { 287 287 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); 288 288 uint32_t head; 289 - unsigned port; 289 + evtchn_port_t port; 290 290 event_word_t *word; 291 291 292 292 head = q->head[priority];
+15 -15
drivers/xen/events/events_internal.h
··· 33 33 int refcnt; 34 34 enum xen_irq_type type; /* type */ 35 35 unsigned irq; 36 - unsigned int evtchn; /* event channel */ 36 + evtchn_port_t evtchn; /* event channel */ 37 37 unsigned short cpu; /* cpu bound */ 38 38 39 39 union { ··· 60 60 int (*setup)(struct irq_info *info); 61 61 void (*bind_to_cpu)(struct irq_info *info, unsigned cpu); 62 62 63 - void (*clear_pending)(unsigned port); 64 - void (*set_pending)(unsigned port); 65 - bool (*is_pending)(unsigned port); 66 - bool (*test_and_set_mask)(unsigned port); 67 - void (*mask)(unsigned port); 68 - void (*unmask)(unsigned port); 63 + void (*clear_pending)(evtchn_port_t port); 64 + void (*set_pending)(evtchn_port_t port); 65 + bool (*is_pending)(evtchn_port_t port); 66 + bool (*test_and_set_mask)(evtchn_port_t port); 67 + void (*mask)(evtchn_port_t port); 68 + void (*unmask)(evtchn_port_t port); 69 69 70 70 void (*handle_events)(unsigned cpu); 71 71 void (*resume)(void); ··· 74 74 extern const struct evtchn_ops *evtchn_ops; 75 75 76 76 extern int **evtchn_to_irq; 77 - int get_evtchn_to_irq(unsigned int evtchn); 77 + int get_evtchn_to_irq(evtchn_port_t evtchn); 78 78 79 79 struct irq_info *info_for_irq(unsigned irq); 80 80 unsigned cpu_from_irq(unsigned irq); 81 - unsigned cpu_from_evtchn(unsigned int evtchn); 81 + unsigned int cpu_from_evtchn(evtchn_port_t evtchn); 82 82 83 83 static inline unsigned xen_evtchn_max_channels(void) 84 84 { ··· 102 102 evtchn_ops->bind_to_cpu(info, cpu); 103 103 } 104 104 105 - static inline void clear_evtchn(unsigned port) 105 + static inline void clear_evtchn(evtchn_port_t port) 106 106 { 107 107 evtchn_ops->clear_pending(port); 108 108 } 109 109 110 - static inline void set_evtchn(unsigned port) 110 + static inline void set_evtchn(evtchn_port_t port) 111 111 { 112 112 evtchn_ops->set_pending(port); 113 113 } 114 114 115 - static inline bool test_evtchn(unsigned port) 115 + static inline bool test_evtchn(evtchn_port_t port) 116 116 { 117 117 return evtchn_ops->is_pending(port); 118 118 } 119 119 120 - static inline bool test_and_set_mask(unsigned port) 120 + static inline bool test_and_set_mask(evtchn_port_t port) 121 121 { 122 122 return evtchn_ops->test_and_set_mask(port); 123 123 } 124 124 125 - static inline void mask_evtchn(unsigned port) 125 + static inline void mask_evtchn(evtchn_port_t port) 126 126 { 127 127 return evtchn_ops->mask(port); 128 128 } 129 129 130 - static inline void unmask_evtchn(unsigned port) 130 + static inline void unmask_evtchn(evtchn_port_t port) 131 131 { 132 132 return evtchn_ops->unmask(port); 133 133 }
+7 -6
drivers/xen/evtchn.c
··· 83 83 struct user_evtchn { 84 84 struct rb_node node; 85 85 struct per_user_data *user; 86 - unsigned port; 86 + evtchn_port_t port; 87 87 bool enabled; 88 88 }; 89 89 ··· 138 138 kfree(evtchn); 139 139 } 140 140 141 - static struct user_evtchn *find_evtchn(struct per_user_data *u, unsigned port) 141 + static struct user_evtchn *find_evtchn(struct per_user_data *u, 142 + evtchn_port_t port) 142 143 { 143 144 struct rb_node *node = u->evtchns.rb_node; 144 145 ··· 164 163 struct per_user_data *u = evtchn->user; 165 164 166 165 WARN(!evtchn->enabled, 167 - "Interrupt for port %d, but apparently not enabled; per-user %p\n", 166 + "Interrupt for port %u, but apparently not enabled; per-user %p\n", 168 167 evtchn->port, u); 169 168 170 169 disable_irq_nosync(irq); ··· 287 286 mutex_lock(&u->bind_mutex); 288 287 289 288 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) { 290 - unsigned port = kbuf[i]; 289 + evtchn_port_t port = kbuf[i]; 291 290 struct user_evtchn *evtchn; 292 291 293 292 evtchn = find_evtchn(u, port); ··· 362 361 return 0; 363 362 } 364 363 365 - static int evtchn_bind_to_user(struct per_user_data *u, int port) 364 + static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port) 366 365 { 367 366 struct user_evtchn *evtchn; 368 367 struct evtchn_close close; ··· 424 423 425 424 static DEFINE_PER_CPU(int, bind_last_selected_cpu); 426 425 427 - static void evtchn_bind_interdom_next_vcpu(int evtchn) 426 + static void evtchn_bind_interdom_next_vcpu(evtchn_port_t evtchn) 428 427 { 429 428 unsigned int selected_cpu, irq; 430 429 struct irq_desc *desc;
+2 -1
drivers/xen/gntdev-common.h
··· 15 15 #include <linux/mman.h> 16 16 #include <linux/mmu_notifier.h> 17 17 #include <linux/types.h> 18 + #include <xen/interface/event_channel.h> 18 19 19 20 struct gntdev_dmabuf_priv; 20 21 ··· 39 38 int flags; 40 39 /* Address relative to the start of the gntdev_grant_map. */ 41 40 int addr; 42 - int event; 41 + evtchn_port_t event; 43 42 }; 44 43 45 44 struct gntdev_grant_map {
+1 -1
drivers/xen/gntdev.c
··· 652 652 struct gntdev_grant_map *map; 653 653 int rc; 654 654 int out_flags; 655 - unsigned int out_event; 655 + evtchn_port_t out_event; 656 656 657 657 if (copy_from_user(&op, u, sizeof(op))) 658 658 return -EFAULT;
+3 -2
drivers/xen/pvcalls-back.c
··· 300 300 struct pvcalls_fedata *fedata, 301 301 uint64_t id, 302 302 grant_ref_t ref, 303 - uint32_t evtchn, 303 + evtchn_port_t evtchn, 304 304 struct socket *sock) 305 305 { 306 306 int ret; ··· 905 905 906 906 static int backend_connect(struct xenbus_device *dev) 907 907 { 908 - int err, evtchn; 908 + int err; 909 + evtchn_port_t evtchn; 909 910 grant_ref_t ring_ref; 910 911 struct pvcalls_fedata *fedata = NULL; 911 912
+9 -6
drivers/xen/pvcalls-front.c
··· 368 368 return -ENOMEM; 369 369 } 370 370 371 - static int create_active(struct sock_mapping *map, int *evtchn) 371 + static int create_active(struct sock_mapping *map, evtchn_port_t *evtchn) 372 372 { 373 373 void *bytes; 374 374 int ret = -ENOMEM, irq = -1, i; 375 375 376 - *evtchn = -1; 376 + *evtchn = 0; 377 377 init_waitqueue_head(&map->active.inflight_conn_req); 378 378 379 379 bytes = map->active.data.in; ··· 404 404 return 0; 405 405 406 406 out_error: 407 - if (*evtchn >= 0) 407 + if (*evtchn > 0) 408 408 xenbus_free_evtchn(pvcalls_front_dev, *evtchn); 409 409 return ret; 410 410 } ··· 415 415 struct pvcalls_bedata *bedata; 416 416 struct sock_mapping *map = NULL; 417 417 struct xen_pvcalls_request *req; 418 - int notify, req_id, ret, evtchn; 418 + int notify, req_id, ret; 419 + evtchn_port_t evtchn; 419 420 420 421 if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM) 421 422 return -EOPNOTSUPP; ··· 766 765 struct sock_mapping *map; 767 766 struct sock_mapping *map2 = NULL; 768 767 struct xen_pvcalls_request *req; 769 - int notify, req_id, ret, evtchn, nonblock; 768 + int notify, req_id, ret, nonblock; 769 + evtchn_port_t evtchn; 770 770 771 771 map = pvcalls_enter_sock(sock); 772 772 if (IS_ERR(map)) ··· 1127 1125 static int pvcalls_front_probe(struct xenbus_device *dev, 1128 1126 const struct xenbus_device_id *id) 1129 1127 { 1130 - int ret = -ENOMEM, evtchn, i; 1128 + int ret = -ENOMEM, i; 1129 + evtchn_port_t evtchn; 1131 1130 unsigned int max_page_order, function_calls, len; 1132 1131 char *versions; 1133 1132 grant_ref_t gref_head = 0;
+4 -3
drivers/xen/xen-pciback/xenbus.c
··· 105 105 } 106 106 107 107 static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref, 108 - int remote_evtchn) 108 + evtchn_port_t remote_evtchn) 109 109 { 110 110 int err = 0; 111 111 void *vaddr; 112 112 113 113 dev_dbg(&pdev->xdev->dev, 114 - "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n", 114 + "Attaching to frontend resources - gnt_ref=%d evtchn=%u\n", 115 115 gnt_ref, remote_evtchn); 116 116 117 117 err = xenbus_map_ring_valloc(pdev->xdev, &gnt_ref, 1, &vaddr); ··· 142 142 static int xen_pcibk_attach(struct xen_pcibk_device *pdev) 143 143 { 144 144 int err = 0; 145 - int gnt_ref, remote_evtchn; 145 + int gnt_ref; 146 + evtchn_port_t remote_evtchn; 146 147 char *magic = NULL; 147 148 148 149
+2 -1
drivers/xen/xen-scsiback.c
··· 854 854 static int scsiback_map(struct vscsibk_info *info) 855 855 { 856 856 struct xenbus_device *dev = info->dev; 857 - unsigned int ring_ref, evtchn; 857 + unsigned int ring_ref; 858 + evtchn_port_t evtchn; 858 859 int err; 859 860 860 861 err = xenbus_gather(XBT_NIL, dev->otherend,
+3 -3
drivers/xen/xenbus/xenbus_client.c
··· 391 391 * error, the device will switch to XenbusStateClosing, and the error will be 392 392 * saved in the store. 393 393 */ 394 - int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) 394 + int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port) 395 395 { 396 396 struct evtchn_alloc_unbound alloc_unbound; 397 397 int err; ··· 414 414 /** 415 415 * Free an existing event channel. Returns 0 on success or -errno on error. 416 416 */ 417 - int xenbus_free_evtchn(struct xenbus_device *dev, int port) 417 + int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port) 418 418 { 419 419 struct evtchn_close close; 420 420 int err; ··· 423 423 424 424 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); 425 425 if (err) 426 - xenbus_dev_error(dev, err, "freeing event channel %d", port); 426 + xenbus_dev_error(dev, err, "freeing event channel %u", port); 427 427 428 428 return err; 429 429 }
+11 -11
include/xen/events.h
··· 14 14 15 15 unsigned xen_evtchn_nr_channels(void); 16 16 17 - int bind_evtchn_to_irq(unsigned int evtchn); 18 - int bind_evtchn_to_irqhandler(unsigned int evtchn, 17 + int bind_evtchn_to_irq(evtchn_port_t evtchn); 18 + int bind_evtchn_to_irqhandler(evtchn_port_t evtchn, 19 19 irq_handler_t handler, 20 20 unsigned long irqflags, const char *devname, 21 21 void *dev_id); ··· 31 31 const char *devname, 32 32 void *dev_id); 33 33 int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, 34 - unsigned int remote_port); 34 + evtchn_port_t remote_port); 35 35 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, 36 - unsigned int remote_port, 36 + evtchn_port_t remote_port, 37 37 irq_handler_t handler, 38 38 unsigned long irqflags, 39 39 const char *devname, ··· 54 54 /* 55 55 * Allow extra references to event channels exposed to userspace by evtchn 56 56 */ 57 - int evtchn_make_refcounted(unsigned int evtchn); 58 - int evtchn_get(unsigned int evtchn); 59 - void evtchn_put(unsigned int evtchn); 57 + int evtchn_make_refcounted(evtchn_port_t evtchn); 58 + int evtchn_get(evtchn_port_t evtchn); 59 + void evtchn_put(evtchn_port_t evtchn); 60 60 61 61 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); 62 - void rebind_evtchn_irq(int evtchn, int irq); 62 + void rebind_evtchn_irq(evtchn_port_t evtchn, int irq); 63 63 int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu); 64 64 65 - static inline void notify_remote_via_evtchn(int port) 65 + static inline void notify_remote_via_evtchn(evtchn_port_t port) 66 66 { 67 67 struct evtchn_send send = { .port = port }; 68 68 (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send); ··· 86 86 void xen_poll_irq_timeout(int irq, u64 timeout); 87 87 88 88 /* Determine the IRQ which is bound to an event channel */ 89 - unsigned irq_from_evtchn(unsigned int evtchn); 89 + unsigned int irq_from_evtchn(evtchn_port_t evtchn); 90 90 int irq_from_virq(unsigned int cpu, unsigned int virq); 91 - unsigned int evtchn_from_irq(unsigned irq); 91 + evtchn_port_t evtchn_from_irq(unsigned irq); 92 92 93 93 #ifdef CONFIG_XEN_PVHVM 94 94 /* Xen HVM evtchn vector callback */
+1 -1
include/xen/interface/event_channel.h
··· 220 220 #define EVTCHNOP_set_priority 13 221 221 struct evtchn_set_priority { 222 222 /* IN parameters. */ 223 - uint32_t port; 223 + evtchn_port_t port; 224 224 uint32_t priority; 225 225 }; 226 226
+3 -2
include/xen/xenbus.h
··· 47 47 #include <xen/interface/grant_table.h> 48 48 #include <xen/interface/io/xenbus.h> 49 49 #include <xen/interface/io/xs_wire.h> 50 + #include <xen/interface/event_channel.h> 50 51 51 52 #define XENBUS_MAX_RING_GRANT_ORDER 4 52 53 #define XENBUS_MAX_RING_GRANTS (1U << XENBUS_MAX_RING_GRANT_ORDER) ··· 213 212 214 213 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr); 215 214 216 - int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port); 217 - int xenbus_free_evtchn(struct xenbus_device *dev, int port); 215 + int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port); 216 + int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port); 218 217 219 218 enum xenbus_state xenbus_read_driver_state(const char *path); 220 219