Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.33-rc1 404 lines 10 kB view raw
1/* 2 * Copyright (C) 2001 MandrakeSoft S.A. 3 * 4 * MandrakeSoft S.A. 5 * 43, rue d'Aboukir 6 * 75002 Paris - France 7 * http://www.linux-mandrake.com/ 8 * http://www.mandrakesoft.com/ 9 * 10 * This library is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU Lesser General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This library is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * Lesser General Public License for more details. 19 * 20 * You should have received a copy of the GNU Lesser General Public 21 * License along with this library; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * 24 * Yunhong Jiang <yunhong.jiang@intel.com> 25 * Yaozu (Eddie) Dong <eddie.dong@intel.com> 26 * Based on Xen 3.1 code. 27 */ 28 29#include <linux/kvm_host.h> 30#include <linux/kvm.h> 31#include <linux/mm.h> 32#include <linux/highmem.h> 33#include <linux/smp.h> 34#include <linux/hrtimer.h> 35#include <linux/io.h> 36#include <asm/processor.h> 37#include <asm/page.h> 38#include <asm/current.h> 39#include <trace/events/kvm.h> 40 41#include "ioapic.h" 42#include "lapic.h" 43#include "irq.h" 44 45#if 0 46#define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) 47#else 48#define ioapic_debug(fmt, arg...) 49#endif 50static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq); 51 52static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, 53 unsigned long addr, 54 unsigned long length) 55{ 56 unsigned long result = 0; 57 58 switch (ioapic->ioregsel) { 59 case IOAPIC_REG_VERSION: 60 result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16) 61 | (IOAPIC_VERSION_ID & 0xff)); 62 break; 63 64 case IOAPIC_REG_APIC_ID: 65 case IOAPIC_REG_ARB_ID: 66 result = ((ioapic->id & 0xf) << 24); 67 break; 68 69 default: 70 { 71 u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; 72 u64 redir_content; 73 74 ASSERT(redir_index < IOAPIC_NUM_PINS); 75 76 redir_content = ioapic->redirtbl[redir_index].bits; 77 result = (ioapic->ioregsel & 0x1) ? 78 (redir_content >> 32) & 0xffffffff : 79 redir_content & 0xffffffff; 80 break; 81 } 82 } 83 84 return result; 85} 86 87static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) 88{ 89 union kvm_ioapic_redirect_entry *pent; 90 int injected = -1; 91 92 pent = &ioapic->redirtbl[idx]; 93 94 if (!pent->fields.mask) { 95 injected = ioapic_deliver(ioapic, idx); 96 if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG) 97 pent->fields.remote_irr = 1; 98 } 99 100 return injected; 101} 102 103static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) 104{ 105 unsigned index; 106 bool mask_before, mask_after; 107 union kvm_ioapic_redirect_entry *e; 108 109 switch (ioapic->ioregsel) { 110 case IOAPIC_REG_VERSION: 111 /* Writes are ignored. */ 112 break; 113 114 case IOAPIC_REG_APIC_ID: 115 ioapic->id = (val >> 24) & 0xf; 116 break; 117 118 case IOAPIC_REG_ARB_ID: 119 break; 120 121 default: 122 index = (ioapic->ioregsel - 0x10) >> 1; 123 124 ioapic_debug("change redir index %x val %x\n", index, val); 125 if (index >= IOAPIC_NUM_PINS) 126 return; 127 e = &ioapic->redirtbl[index]; 128 mask_before = e->fields.mask; 129 if (ioapic->ioregsel & 1) { 130 e->bits &= 0xffffffff; 131 e->bits |= (u64) val << 32; 132 } else { 133 e->bits &= ~0xffffffffULL; 134 e->bits |= (u32) val; 135 e->fields.remote_irr = 0; 136 } 137 mask_after = e->fields.mask; 138 if (mask_before != mask_after) 139 kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after); 140 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG 141 && ioapic->irr & (1 << index)) 142 ioapic_service(ioapic, index); 143 break; 144 } 145} 146 147static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) 148{ 149 union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; 150 struct kvm_lapic_irq irqe; 151 152 ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " 153 "vector=%x trig_mode=%x\n", 154 entry->fields.dest, entry->fields.dest_mode, 155 entry->fields.delivery_mode, entry->fields.vector, 156 entry->fields.trig_mode); 157 158 irqe.dest_id = entry->fields.dest_id; 159 irqe.vector = entry->fields.vector; 160 irqe.dest_mode = entry->fields.dest_mode; 161 irqe.trig_mode = entry->fields.trig_mode; 162 irqe.delivery_mode = entry->fields.delivery_mode << 8; 163 irqe.level = 1; 164 irqe.shorthand = 0; 165 166#ifdef CONFIG_X86 167 /* Always delivery PIT interrupt to vcpu 0 */ 168 if (irq == 0) { 169 irqe.dest_mode = 0; /* Physical mode. */ 170 /* need to read apic_id from apic regiest since 171 * it can be rewritten */ 172 irqe.dest_id = ioapic->kvm->bsp_vcpu->vcpu_id; 173 } 174#endif 175 return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); 176} 177 178int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) 179{ 180 u32 old_irr = ioapic->irr; 181 u32 mask = 1 << irq; 182 union kvm_ioapic_redirect_entry entry; 183 int ret = 1; 184 185 mutex_lock(&ioapic->lock); 186 if (irq >= 0 && irq < IOAPIC_NUM_PINS) { 187 entry = ioapic->redirtbl[irq]; 188 level ^= entry.fields.polarity; 189 if (!level) 190 ioapic->irr &= ~mask; 191 else { 192 int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); 193 ioapic->irr |= mask; 194 if ((edge && old_irr != ioapic->irr) || 195 (!edge && !entry.fields.remote_irr)) 196 ret = ioapic_service(ioapic, irq); 197 else 198 ret = 0; /* report coalesced interrupt */ 199 } 200 trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); 201 } 202 mutex_unlock(&ioapic->lock); 203 204 return ret; 205} 206 207static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, 208 int trigger_mode) 209{ 210 int i; 211 212 for (i = 0; i < IOAPIC_NUM_PINS; i++) { 213 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; 214 215 if (ent->fields.vector != vector) 216 continue; 217 218 /* 219 * We are dropping lock while calling ack notifiers because ack 220 * notifier callbacks for assigned devices call into IOAPIC 221 * recursively. Since remote_irr is cleared only after call 222 * to notifiers if the same vector will be delivered while lock 223 * is dropped it will be put into irr and will be delivered 224 * after ack notifier returns. 225 */ 226 mutex_unlock(&ioapic->lock); 227 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); 228 mutex_lock(&ioapic->lock); 229 230 if (trigger_mode != IOAPIC_LEVEL_TRIG) 231 continue; 232 233 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); 234 ent->fields.remote_irr = 0; 235 if (!ent->fields.mask && (ioapic->irr & (1 << i))) 236 ioapic_service(ioapic, i); 237 } 238} 239 240void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) 241{ 242 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 243 244 mutex_lock(&ioapic->lock); 245 __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); 246 mutex_unlock(&ioapic->lock); 247} 248 249static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) 250{ 251 return container_of(dev, struct kvm_ioapic, dev); 252} 253 254static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr) 255{ 256 return ((addr >= ioapic->base_address && 257 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH))); 258} 259 260static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, 261 void *val) 262{ 263 struct kvm_ioapic *ioapic = to_ioapic(this); 264 u32 result; 265 if (!ioapic_in_range(ioapic, addr)) 266 return -EOPNOTSUPP; 267 268 ioapic_debug("addr %lx\n", (unsigned long)addr); 269 ASSERT(!(addr & 0xf)); /* check alignment */ 270 271 addr &= 0xff; 272 mutex_lock(&ioapic->lock); 273 switch (addr) { 274 case IOAPIC_REG_SELECT: 275 result = ioapic->ioregsel; 276 break; 277 278 case IOAPIC_REG_WINDOW: 279 result = ioapic_read_indirect(ioapic, addr, len); 280 break; 281 282 default: 283 result = 0; 284 break; 285 } 286 mutex_unlock(&ioapic->lock); 287 288 switch (len) { 289 case 8: 290 *(u64 *) val = result; 291 break; 292 case 1: 293 case 2: 294 case 4: 295 memcpy(val, (char *)&result, len); 296 break; 297 default: 298 printk(KERN_WARNING "ioapic: wrong length %d\n", len); 299 } 300 return 0; 301} 302 303static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, 304 const void *val) 305{ 306 struct kvm_ioapic *ioapic = to_ioapic(this); 307 u32 data; 308 if (!ioapic_in_range(ioapic, addr)) 309 return -EOPNOTSUPP; 310 311 ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n", 312 (void*)addr, len, val); 313 ASSERT(!(addr & 0xf)); /* check alignment */ 314 315 if (len == 4 || len == 8) 316 data = *(u32 *) val; 317 else { 318 printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); 319 return 0; 320 } 321 322 addr &= 0xff; 323 mutex_lock(&ioapic->lock); 324 switch (addr) { 325 case IOAPIC_REG_SELECT: 326 ioapic->ioregsel = data; 327 break; 328 329 case IOAPIC_REG_WINDOW: 330 ioapic_write_indirect(ioapic, data); 331 break; 332#ifdef CONFIG_IA64 333 case IOAPIC_REG_EOI: 334 __kvm_ioapic_update_eoi(ioapic, data, IOAPIC_LEVEL_TRIG); 335 break; 336#endif 337 338 default: 339 break; 340 } 341 mutex_unlock(&ioapic->lock); 342 return 0; 343} 344 345void kvm_ioapic_reset(struct kvm_ioapic *ioapic) 346{ 347 int i; 348 349 for (i = 0; i < IOAPIC_NUM_PINS; i++) 350 ioapic->redirtbl[i].fields.mask = 1; 351 ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS; 352 ioapic->ioregsel = 0; 353 ioapic->irr = 0; 354 ioapic->id = 0; 355} 356 357static const struct kvm_io_device_ops ioapic_mmio_ops = { 358 .read = ioapic_mmio_read, 359 .write = ioapic_mmio_write, 360}; 361 362int kvm_ioapic_init(struct kvm *kvm) 363{ 364 struct kvm_ioapic *ioapic; 365 int ret; 366 367 ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); 368 if (!ioapic) 369 return -ENOMEM; 370 mutex_init(&ioapic->lock); 371 kvm->arch.vioapic = ioapic; 372 kvm_ioapic_reset(ioapic); 373 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); 374 ioapic->kvm = kvm; 375 ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &ioapic->dev); 376 if (ret < 0) 377 kfree(ioapic); 378 379 return ret; 380} 381 382int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) 383{ 384 struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); 385 if (!ioapic) 386 return -EINVAL; 387 388 mutex_lock(&ioapic->lock); 389 memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); 390 mutex_unlock(&ioapic->lock); 391 return 0; 392} 393 394int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) 395{ 396 struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); 397 if (!ioapic) 398 return -EINVAL; 399 400 mutex_lock(&ioapic->lock); 401 memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); 402 mutex_unlock(&ioapic->lock); 403 return 0; 404}