Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * kvm eventfd support - use eventfd objects to signal various KVM events
3 *
4 * Copyright 2009 Novell. All Rights Reserved.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6 *
7 * Author:
8 * Gregory Haskins <ghaskins@novell.com>
9 *
10 * This file is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 */
23
24#include <linux/kvm_host.h>
25#include <linux/kvm.h>
26#include <linux/kvm_irqfd.h>
27#include <linux/workqueue.h>
28#include <linux/syscalls.h>
29#include <linux/wait.h>
30#include <linux/poll.h>
31#include <linux/file.h>
32#include <linux/list.h>
33#include <linux/eventfd.h>
34#include <linux/kernel.h>
35#include <linux/srcu.h>
36#include <linux/slab.h>
37#include <linux/seqlock.h>
38#include <linux/irqbypass.h>
39#include <trace/events/kvm.h>
40
41#include <kvm/iodev.h>
42
43#ifdef CONFIG_HAVE_KVM_IRQFD
44
45static struct workqueue_struct *irqfd_cleanup_wq;
46
47bool __attribute__((weak))
48kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
49{
50 return true;
51}
52
53static void
54irqfd_inject(struct work_struct *work)
55{
56 struct kvm_kernel_irqfd *irqfd =
57 container_of(work, struct kvm_kernel_irqfd, inject);
58 struct kvm *kvm = irqfd->kvm;
59
60 if (!irqfd->resampler) {
61 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
62 false);
63 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
64 false);
65 } else
66 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
67 irqfd->gsi, 1, false);
68}
69
70/*
71 * Since resampler irqfds share an IRQ source ID, we de-assert once
72 * then notify all of the resampler irqfds using this GSI. We can't
73 * do multiple de-asserts or we risk racing with incoming re-asserts.
74 */
75static void
76irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
77{
78 struct kvm_kernel_irqfd_resampler *resampler;
79 struct kvm *kvm;
80 struct kvm_kernel_irqfd *irqfd;
81 int idx;
82
83 resampler = container_of(kian,
84 struct kvm_kernel_irqfd_resampler, notifier);
85 kvm = resampler->kvm;
86
87 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
88 resampler->notifier.gsi, 0, false);
89
90 idx = srcu_read_lock(&kvm->irq_srcu);
91
92 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
93 eventfd_signal(irqfd->resamplefd, 1);
94
95 srcu_read_unlock(&kvm->irq_srcu, idx);
96}
97
98static void
99irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
100{
101 struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
102 struct kvm *kvm = resampler->kvm;
103
104 mutex_lock(&kvm->irqfds.resampler_lock);
105
106 list_del_rcu(&irqfd->resampler_link);
107 synchronize_srcu(&kvm->irq_srcu);
108
109 if (list_empty(&resampler->list)) {
110 list_del(&resampler->link);
111 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
112 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
113 resampler->notifier.gsi, 0, false);
114 kfree(resampler);
115 }
116
117 mutex_unlock(&kvm->irqfds.resampler_lock);
118}
119
120/*
121 * Race-free decouple logic (ordering is critical)
122 */
123static void
124irqfd_shutdown(struct work_struct *work)
125{
126 struct kvm_kernel_irqfd *irqfd =
127 container_of(work, struct kvm_kernel_irqfd, shutdown);
128 struct kvm *kvm = irqfd->kvm;
129 u64 cnt;
130
131 /* Make sure irqfd has been initalized in assign path. */
132 synchronize_srcu(&kvm->irq_srcu);
133
134 /*
135 * Synchronize with the wait-queue and unhook ourselves to prevent
136 * further events.
137 */
138 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
139
140 /*
141 * We know no new events will be scheduled at this point, so block
142 * until all previously outstanding events have completed
143 */
144 flush_work(&irqfd->inject);
145
146 if (irqfd->resampler) {
147 irqfd_resampler_shutdown(irqfd);
148 eventfd_ctx_put(irqfd->resamplefd);
149 }
150
151 /*
152 * It is now safe to release the object's resources
153 */
154#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
155 irq_bypass_unregister_consumer(&irqfd->consumer);
156#endif
157 eventfd_ctx_put(irqfd->eventfd);
158 kfree(irqfd);
159}
160
161
162/* assumes kvm->irqfds.lock is held */
163static bool
164irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
165{
166 return list_empty(&irqfd->list) ? false : true;
167}
168
169/*
170 * Mark the irqfd as inactive and schedule it for removal
171 *
172 * assumes kvm->irqfds.lock is held
173 */
174static void
175irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
176{
177 BUG_ON(!irqfd_is_active(irqfd));
178
179 list_del_init(&irqfd->list);
180
181 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
182}
183
184int __attribute__((weak)) kvm_arch_set_irq_inatomic(
185 struct kvm_kernel_irq_routing_entry *irq,
186 struct kvm *kvm, int irq_source_id,
187 int level,
188 bool line_status)
189{
190 return -EWOULDBLOCK;
191}
192
193/*
194 * Called with wqh->lock held and interrupts disabled
195 */
196static int
197irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
198{
199 struct kvm_kernel_irqfd *irqfd =
200 container_of(wait, struct kvm_kernel_irqfd, wait);
201 __poll_t flags = key_to_poll(key);
202 struct kvm_kernel_irq_routing_entry irq;
203 struct kvm *kvm = irqfd->kvm;
204 unsigned seq;
205 int idx;
206
207 if (flags & EPOLLIN) {
208 idx = srcu_read_lock(&kvm->irq_srcu);
209 do {
210 seq = read_seqcount_begin(&irqfd->irq_entry_sc);
211 irq = irqfd->irq_entry;
212 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
213 /* An event has been signaled, inject an interrupt */
214 if (kvm_arch_set_irq_inatomic(&irq, kvm,
215 KVM_USERSPACE_IRQ_SOURCE_ID, 1,
216 false) == -EWOULDBLOCK)
217 schedule_work(&irqfd->inject);
218 srcu_read_unlock(&kvm->irq_srcu, idx);
219 }
220
221 if (flags & EPOLLHUP) {
222 /* The eventfd is closing, detach from KVM */
223 unsigned long iflags;
224
225 spin_lock_irqsave(&kvm->irqfds.lock, iflags);
226
227 /*
228 * We must check if someone deactivated the irqfd before
229 * we could acquire the irqfds.lock since the item is
230 * deactivated from the KVM side before it is unhooked from
231 * the wait-queue. If it is already deactivated, we can
232 * simply return knowing the other side will cleanup for us.
233 * We cannot race against the irqfd going away since the
234 * other side is required to acquire wqh->lock, which we hold
235 */
236 if (irqfd_is_active(irqfd))
237 irqfd_deactivate(irqfd);
238
239 spin_unlock_irqrestore(&kvm->irqfds.lock, iflags);
240 }
241
242 return 0;
243}
244
245static void
246irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
247 poll_table *pt)
248{
249 struct kvm_kernel_irqfd *irqfd =
250 container_of(pt, struct kvm_kernel_irqfd, pt);
251 add_wait_queue(wqh, &irqfd->wait);
252}
253
254/* Must be called under irqfds.lock */
255static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
256{
257 struct kvm_kernel_irq_routing_entry *e;
258 struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
259 int n_entries;
260
261 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
262
263 write_seqcount_begin(&irqfd->irq_entry_sc);
264
265 e = entries;
266 if (n_entries == 1)
267 irqfd->irq_entry = *e;
268 else
269 irqfd->irq_entry.type = 0;
270
271 write_seqcount_end(&irqfd->irq_entry_sc);
272}
273
274#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
275void __attribute__((weak)) kvm_arch_irq_bypass_stop(
276 struct irq_bypass_consumer *cons)
277{
278}
279
280void __attribute__((weak)) kvm_arch_irq_bypass_start(
281 struct irq_bypass_consumer *cons)
282{
283}
284
285int __attribute__((weak)) kvm_arch_update_irqfd_routing(
286 struct kvm *kvm, unsigned int host_irq,
287 uint32_t guest_irq, bool set)
288{
289 return 0;
290}
291#endif
292
293static int
294kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
295{
296 struct kvm_kernel_irqfd *irqfd, *tmp;
297 struct fd f;
298 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
299 int ret;
300 __poll_t events;
301 int idx;
302
303 if (!kvm_arch_intc_initialized(kvm))
304 return -EAGAIN;
305
306 if (!kvm_arch_irqfd_allowed(kvm, args))
307 return -EINVAL;
308
309 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL_ACCOUNT);
310 if (!irqfd)
311 return -ENOMEM;
312
313 irqfd->kvm = kvm;
314 irqfd->gsi = args->gsi;
315 INIT_LIST_HEAD(&irqfd->list);
316 INIT_WORK(&irqfd->inject, irqfd_inject);
317 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
318 seqcount_init(&irqfd->irq_entry_sc);
319
320 f = fdget(args->fd);
321 if (!f.file) {
322 ret = -EBADF;
323 goto out;
324 }
325
326 eventfd = eventfd_ctx_fileget(f.file);
327 if (IS_ERR(eventfd)) {
328 ret = PTR_ERR(eventfd);
329 goto fail;
330 }
331
332 irqfd->eventfd = eventfd;
333
334 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
335 struct kvm_kernel_irqfd_resampler *resampler;
336
337 resamplefd = eventfd_ctx_fdget(args->resamplefd);
338 if (IS_ERR(resamplefd)) {
339 ret = PTR_ERR(resamplefd);
340 goto fail;
341 }
342
343 irqfd->resamplefd = resamplefd;
344 INIT_LIST_HEAD(&irqfd->resampler_link);
345
346 mutex_lock(&kvm->irqfds.resampler_lock);
347
348 list_for_each_entry(resampler,
349 &kvm->irqfds.resampler_list, link) {
350 if (resampler->notifier.gsi == irqfd->gsi) {
351 irqfd->resampler = resampler;
352 break;
353 }
354 }
355
356 if (!irqfd->resampler) {
357 resampler = kzalloc(sizeof(*resampler),
358 GFP_KERNEL_ACCOUNT);
359 if (!resampler) {
360 ret = -ENOMEM;
361 mutex_unlock(&kvm->irqfds.resampler_lock);
362 goto fail;
363 }
364
365 resampler->kvm = kvm;
366 INIT_LIST_HEAD(&resampler->list);
367 resampler->notifier.gsi = irqfd->gsi;
368 resampler->notifier.irq_acked = irqfd_resampler_ack;
369 INIT_LIST_HEAD(&resampler->link);
370
371 list_add(&resampler->link, &kvm->irqfds.resampler_list);
372 kvm_register_irq_ack_notifier(kvm,
373 &resampler->notifier);
374 irqfd->resampler = resampler;
375 }
376
377 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
378 synchronize_srcu(&kvm->irq_srcu);
379
380 mutex_unlock(&kvm->irqfds.resampler_lock);
381 }
382
383 /*
384 * Install our own custom wake-up handling so we are notified via
385 * a callback whenever someone signals the underlying eventfd
386 */
387 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
388 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
389
390 spin_lock_irq(&kvm->irqfds.lock);
391
392 ret = 0;
393 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
394 if (irqfd->eventfd != tmp->eventfd)
395 continue;
396 /* This fd is used for another irq already. */
397 ret = -EBUSY;
398 spin_unlock_irq(&kvm->irqfds.lock);
399 goto fail;
400 }
401
402 idx = srcu_read_lock(&kvm->irq_srcu);
403 irqfd_update(kvm, irqfd);
404
405 list_add_tail(&irqfd->list, &kvm->irqfds.items);
406
407 spin_unlock_irq(&kvm->irqfds.lock);
408
409 /*
410 * Check if there was an event already pending on the eventfd
411 * before we registered, and trigger it as if we didn't miss it.
412 */
413 events = vfs_poll(f.file, &irqfd->pt);
414
415 if (events & EPOLLIN)
416 schedule_work(&irqfd->inject);
417
418#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
419 if (kvm_arch_has_irq_bypass()) {
420 irqfd->consumer.token = (void *)irqfd->eventfd;
421 irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
422 irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
423 irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
424 irqfd->consumer.start = kvm_arch_irq_bypass_start;
425 ret = irq_bypass_register_consumer(&irqfd->consumer);
426 if (ret)
427 pr_info("irq bypass consumer (token %p) registration fails: %d\n",
428 irqfd->consumer.token, ret);
429 }
430#endif
431
432 srcu_read_unlock(&kvm->irq_srcu, idx);
433
434 /*
435 * do not drop the file until the irqfd is fully initialized, otherwise
436 * we might race against the EPOLLHUP
437 */
438 fdput(f);
439 return 0;
440
441fail:
442 if (irqfd->resampler)
443 irqfd_resampler_shutdown(irqfd);
444
445 if (resamplefd && !IS_ERR(resamplefd))
446 eventfd_ctx_put(resamplefd);
447
448 if (eventfd && !IS_ERR(eventfd))
449 eventfd_ctx_put(eventfd);
450
451 fdput(f);
452
453out:
454 kfree(irqfd);
455 return ret;
456}
457
458bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
459{
460 struct kvm_irq_ack_notifier *kian;
461 int gsi, idx;
462
463 idx = srcu_read_lock(&kvm->irq_srcu);
464 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
465 if (gsi != -1)
466 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
467 link)
468 if (kian->gsi == gsi) {
469 srcu_read_unlock(&kvm->irq_srcu, idx);
470 return true;
471 }
472
473 srcu_read_unlock(&kvm->irq_srcu, idx);
474
475 return false;
476}
477EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
478
479void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
480{
481 struct kvm_irq_ack_notifier *kian;
482
483 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
484 link)
485 if (kian->gsi == gsi)
486 kian->irq_acked(kian);
487}
488
489void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
490{
491 int gsi, idx;
492
493 trace_kvm_ack_irq(irqchip, pin);
494
495 idx = srcu_read_lock(&kvm->irq_srcu);
496 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
497 if (gsi != -1)
498 kvm_notify_acked_gsi(kvm, gsi);
499 srcu_read_unlock(&kvm->irq_srcu, idx);
500}
501
502void kvm_register_irq_ack_notifier(struct kvm *kvm,
503 struct kvm_irq_ack_notifier *kian)
504{
505 mutex_lock(&kvm->irq_lock);
506 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
507 mutex_unlock(&kvm->irq_lock);
508 kvm_arch_post_irq_ack_notifier_list_update(kvm);
509}
510
511void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
512 struct kvm_irq_ack_notifier *kian)
513{
514 mutex_lock(&kvm->irq_lock);
515 hlist_del_init_rcu(&kian->link);
516 mutex_unlock(&kvm->irq_lock);
517 synchronize_srcu(&kvm->irq_srcu);
518 kvm_arch_post_irq_ack_notifier_list_update(kvm);
519}
520#endif
521
522void
523kvm_eventfd_init(struct kvm *kvm)
524{
525#ifdef CONFIG_HAVE_KVM_IRQFD
526 spin_lock_init(&kvm->irqfds.lock);
527 INIT_LIST_HEAD(&kvm->irqfds.items);
528 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
529 mutex_init(&kvm->irqfds.resampler_lock);
530#endif
531 INIT_LIST_HEAD(&kvm->ioeventfds);
532}
533
534#ifdef CONFIG_HAVE_KVM_IRQFD
535/*
536 * shutdown any irqfd's that match fd+gsi
537 */
538static int
539kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
540{
541 struct kvm_kernel_irqfd *irqfd, *tmp;
542 struct eventfd_ctx *eventfd;
543
544 eventfd = eventfd_ctx_fdget(args->fd);
545 if (IS_ERR(eventfd))
546 return PTR_ERR(eventfd);
547
548 spin_lock_irq(&kvm->irqfds.lock);
549
550 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
551 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
552 /*
553 * This clearing of irq_entry.type is needed for when
554 * another thread calls kvm_irq_routing_update before
555 * we flush workqueue below (we synchronize with
556 * kvm_irq_routing_update using irqfds.lock).
557 */
558 write_seqcount_begin(&irqfd->irq_entry_sc);
559 irqfd->irq_entry.type = 0;
560 write_seqcount_end(&irqfd->irq_entry_sc);
561 irqfd_deactivate(irqfd);
562 }
563 }
564
565 spin_unlock_irq(&kvm->irqfds.lock);
566 eventfd_ctx_put(eventfd);
567
568 /*
569 * Block until we know all outstanding shutdown jobs have completed
570 * so that we guarantee there will not be any more interrupts on this
571 * gsi once this deassign function returns.
572 */
573 flush_workqueue(irqfd_cleanup_wq);
574
575 return 0;
576}
577
578int
579kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
580{
581 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
582 return -EINVAL;
583
584 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
585 return kvm_irqfd_deassign(kvm, args);
586
587 return kvm_irqfd_assign(kvm, args);
588}
589
590/*
591 * This function is called as the kvm VM fd is being released. Shutdown all
592 * irqfds that still remain open
593 */
594void
595kvm_irqfd_release(struct kvm *kvm)
596{
597 struct kvm_kernel_irqfd *irqfd, *tmp;
598
599 spin_lock_irq(&kvm->irqfds.lock);
600
601 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
602 irqfd_deactivate(irqfd);
603
604 spin_unlock_irq(&kvm->irqfds.lock);
605
606 /*
607 * Block until we know all outstanding shutdown jobs have completed
608 * since we do not take a kvm* reference.
609 */
610 flush_workqueue(irqfd_cleanup_wq);
611
612}
613
614/*
615 * Take note of a change in irq routing.
616 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
617 */
618void kvm_irq_routing_update(struct kvm *kvm)
619{
620 struct kvm_kernel_irqfd *irqfd;
621
622 spin_lock_irq(&kvm->irqfds.lock);
623
624 list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
625 irqfd_update(kvm, irqfd);
626
627#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
628 if (irqfd->producer) {
629 int ret = kvm_arch_update_irqfd_routing(
630 irqfd->kvm, irqfd->producer->irq,
631 irqfd->gsi, 1);
632 WARN_ON(ret);
633 }
634#endif
635 }
636
637 spin_unlock_irq(&kvm->irqfds.lock);
638}
639
640/*
641 * create a host-wide workqueue for issuing deferred shutdown requests
642 * aggregated from all vm* instances. We need our own isolated
643 * queue to ease flushing work items when a VM exits.
644 */
645int kvm_irqfd_init(void)
646{
647 irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
648 if (!irqfd_cleanup_wq)
649 return -ENOMEM;
650
651 return 0;
652}
653
654void kvm_irqfd_exit(void)
655{
656 destroy_workqueue(irqfd_cleanup_wq);
657}
658#endif
659
660/*
661 * --------------------------------------------------------------------
662 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
663 *
664 * userspace can register a PIO/MMIO address with an eventfd for receiving
665 * notification when the memory has been touched.
666 * --------------------------------------------------------------------
667 */
668
669struct _ioeventfd {
670 struct list_head list;
671 u64 addr;
672 int length;
673 struct eventfd_ctx *eventfd;
674 u64 datamatch;
675 struct kvm_io_device dev;
676 u8 bus_idx;
677 bool wildcard;
678};
679
680static inline struct _ioeventfd *
681to_ioeventfd(struct kvm_io_device *dev)
682{
683 return container_of(dev, struct _ioeventfd, dev);
684}
685
686static void
687ioeventfd_release(struct _ioeventfd *p)
688{
689 eventfd_ctx_put(p->eventfd);
690 list_del(&p->list);
691 kfree(p);
692}
693
694static bool
695ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
696{
697 u64 _val;
698
699 if (addr != p->addr)
700 /* address must be precise for a hit */
701 return false;
702
703 if (!p->length)
704 /* length = 0 means only look at the address, so always a hit */
705 return true;
706
707 if (len != p->length)
708 /* address-range must be precise for a hit */
709 return false;
710
711 if (p->wildcard)
712 /* all else equal, wildcard is always a hit */
713 return true;
714
715 /* otherwise, we have to actually compare the data */
716
717 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
718
719 switch (len) {
720 case 1:
721 _val = *(u8 *)val;
722 break;
723 case 2:
724 _val = *(u16 *)val;
725 break;
726 case 4:
727 _val = *(u32 *)val;
728 break;
729 case 8:
730 _val = *(u64 *)val;
731 break;
732 default:
733 return false;
734 }
735
736 return _val == p->datamatch ? true : false;
737}
738
739/* MMIO/PIO writes trigger an event if the addr/val match */
740static int
741ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
742 int len, const void *val)
743{
744 struct _ioeventfd *p = to_ioeventfd(this);
745
746 if (!ioeventfd_in_range(p, addr, len, val))
747 return -EOPNOTSUPP;
748
749 eventfd_signal(p->eventfd, 1);
750 return 0;
751}
752
753/*
754 * This function is called as KVM is completely shutting down. We do not
755 * need to worry about locking just nuke anything we have as quickly as possible
756 */
757static void
758ioeventfd_destructor(struct kvm_io_device *this)
759{
760 struct _ioeventfd *p = to_ioeventfd(this);
761
762 ioeventfd_release(p);
763}
764
765static const struct kvm_io_device_ops ioeventfd_ops = {
766 .write = ioeventfd_write,
767 .destructor = ioeventfd_destructor,
768};
769
770/* assumes kvm->slots_lock held */
771static bool
772ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
773{
774 struct _ioeventfd *_p;
775
776 list_for_each_entry(_p, &kvm->ioeventfds, list)
777 if (_p->bus_idx == p->bus_idx &&
778 _p->addr == p->addr &&
779 (!_p->length || !p->length ||
780 (_p->length == p->length &&
781 (_p->wildcard || p->wildcard ||
782 _p->datamatch == p->datamatch))))
783 return true;
784
785 return false;
786}
787
788static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
789{
790 if (flags & KVM_IOEVENTFD_FLAG_PIO)
791 return KVM_PIO_BUS;
792 if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
793 return KVM_VIRTIO_CCW_NOTIFY_BUS;
794 return KVM_MMIO_BUS;
795}
796
797static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
798 enum kvm_bus bus_idx,
799 struct kvm_ioeventfd *args)
800{
801
802 struct eventfd_ctx *eventfd;
803 struct _ioeventfd *p;
804 int ret;
805
806 eventfd = eventfd_ctx_fdget(args->fd);
807 if (IS_ERR(eventfd))
808 return PTR_ERR(eventfd);
809
810 p = kzalloc(sizeof(*p), GFP_KERNEL_ACCOUNT);
811 if (!p) {
812 ret = -ENOMEM;
813 goto fail;
814 }
815
816 INIT_LIST_HEAD(&p->list);
817 p->addr = args->addr;
818 p->bus_idx = bus_idx;
819 p->length = args->len;
820 p->eventfd = eventfd;
821
822 /* The datamatch feature is optional, otherwise this is a wildcard */
823 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
824 p->datamatch = args->datamatch;
825 else
826 p->wildcard = true;
827
828 mutex_lock(&kvm->slots_lock);
829
830 /* Verify that there isn't a match already */
831 if (ioeventfd_check_collision(kvm, p)) {
832 ret = -EEXIST;
833 goto unlock_fail;
834 }
835
836 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
837
838 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
839 &p->dev);
840 if (ret < 0)
841 goto unlock_fail;
842
843 kvm_get_bus(kvm, bus_idx)->ioeventfd_count++;
844 list_add_tail(&p->list, &kvm->ioeventfds);
845
846 mutex_unlock(&kvm->slots_lock);
847
848 return 0;
849
850unlock_fail:
851 mutex_unlock(&kvm->slots_lock);
852
853fail:
854 kfree(p);
855 eventfd_ctx_put(eventfd);
856
857 return ret;
858}
859
860static int
861kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
862 struct kvm_ioeventfd *args)
863{
864 struct _ioeventfd *p, *tmp;
865 struct eventfd_ctx *eventfd;
866 struct kvm_io_bus *bus;
867 int ret = -ENOENT;
868
869 eventfd = eventfd_ctx_fdget(args->fd);
870 if (IS_ERR(eventfd))
871 return PTR_ERR(eventfd);
872
873 mutex_lock(&kvm->slots_lock);
874
875 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
876 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
877
878 if (p->bus_idx != bus_idx ||
879 p->eventfd != eventfd ||
880 p->addr != args->addr ||
881 p->length != args->len ||
882 p->wildcard != wildcard)
883 continue;
884
885 if (!p->wildcard && p->datamatch != args->datamatch)
886 continue;
887
888 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
889 bus = kvm_get_bus(kvm, bus_idx);
890 if (bus)
891 bus->ioeventfd_count--;
892 ioeventfd_release(p);
893 ret = 0;
894 break;
895 }
896
897 mutex_unlock(&kvm->slots_lock);
898
899 eventfd_ctx_put(eventfd);
900
901 return ret;
902}
903
904static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
905{
906 enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
907 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
908
909 if (!args->len && bus_idx == KVM_MMIO_BUS)
910 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
911
912 return ret;
913}
914
915static int
916kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
917{
918 enum kvm_bus bus_idx;
919 int ret;
920
921 bus_idx = ioeventfd_bus_from_flags(args->flags);
922 /* must be natural-word sized, or 0 to ignore length */
923 switch (args->len) {
924 case 0:
925 case 1:
926 case 2:
927 case 4:
928 case 8:
929 break;
930 default:
931 return -EINVAL;
932 }
933
934 /* check for range overflow */
935 if (args->addr + args->len < args->addr)
936 return -EINVAL;
937
938 /* check for extra flags that we don't understand */
939 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
940 return -EINVAL;
941
942 /* ioeventfd with no length can't be combined with DATAMATCH */
943 if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
944 return -EINVAL;
945
946 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
947 if (ret)
948 goto fail;
949
950 /* When length is ignored, MMIO is also put on a separate bus, for
951 * faster lookups.
952 */
953 if (!args->len && bus_idx == KVM_MMIO_BUS) {
954 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
955 if (ret < 0)
956 goto fast_fail;
957 }
958
959 return 0;
960
961fast_fail:
962 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
963fail:
964 return ret;
965}
966
967int
968kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
969{
970 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
971 return kvm_deassign_ioeventfd(kvm, args);
972
973 return kvm_assign_ioeventfd(kvm, args);
974}