Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'stable/for-linus-3.4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen

Pull xen updates from Konrad Rzeszutek Wilk:
"which has three neat features:

- PV multiconsole support, so that there can be hvc1, hvc2, etc; This
can be used in HVM and in PV mode.

- P-state and C-state power management driver that uploads said power
management data to the hypervisor. It also inhibits cpufreq
scaling drivers to load so that only the hypervisor can make power
management decisions - fixing a weird perf bug.

There is one thing in the Kconfig that you won't like: "default y
if (X86_ACPI_CPUFREQ = y || X86_POWERNOW_K8 = y)" (note, that it
all depends on CONFIG_XEN which depends on CONFIG_PARAVIRT which by
default is off). I've a fix to convert that boolean expression
into "default m" which I am going to post after the cpufreq git
pull - as the two patches to make this work depend on a fix in Dave
Jones's tree.

- Function Level Reset (FLR) support in the Xen PCI backend.

Fixes:

- Kconfig dependencies for Xen PV keyboard and video
- Compile warnings and constify fixes
- Change over to use percpu_xxx instead of this_cpu_xxx"

Fix up trivial conflicts in drivers/tty/hvc/hvc_xen.c due to changes to
a removed commit.

* tag 'stable/for-linus-3.4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
xen kconfig: relax INPUT_XEN_KBDDEV_FRONTEND deps
xen/acpi-processor: C and P-state driver that uploads said data to hypervisor.
xen: constify all instances of "struct attribute_group"
xen/xenbus: ignore console/0
hvc_xen: introduce HVC_XEN_FRONTEND
hvc_xen: implement multiconsole support
hvc_xen: support PV on HVM consoles
xenbus: don't free other end details too early
xen/enlighten: Expose MWAIT and MWAIT_LEAF if hypervisor OKs it.
xen/setup/pm/acpi: Remove the call to boot_option_idle_override.
xenbus: address compiler warnings
xen: use this_cpu_xxx replace percpu_xxx funcs
xen/pciback: Support pci_reset_function, aka FLR or D3 support.
pci: Introduce __pci_reset_function_locked to be used when holding device_lock.
xen: Utilize the restore_msi_irqs hook.

+1265 -81
+1
arch/ia64/include/asm/xen/interface.h
··· 77 77 DEFINE_GUEST_HANDLE(long); 78 78 DEFINE_GUEST_HANDLE(void); 79 79 DEFINE_GUEST_HANDLE(uint64_t); 80 + DEFINE_GUEST_HANDLE(uint32_t); 80 81 81 82 typedef unsigned long xen_pfn_t; 82 83 DEFINE_GUEST_HANDLE(xen_pfn_t);
+1
arch/x86/include/asm/xen/interface.h
··· 56 56 DEFINE_GUEST_HANDLE(long); 57 57 DEFINE_GUEST_HANDLE(void); 58 58 DEFINE_GUEST_HANDLE(uint64_t); 59 + DEFINE_GUEST_HANDLE(uint32_t); 59 60 #endif 60 61 61 62 #ifndef HYPERVISOR_VIRT_START
+27
arch/x86/pci/xen.c
··· 324 324 out: 325 325 return ret; 326 326 } 327 + 328 + static void xen_initdom_restore_msi_irqs(struct pci_dev *dev, int irq) 329 + { 330 + int ret = 0; 331 + 332 + if (pci_seg_supported) { 333 + struct physdev_pci_device restore_ext; 334 + 335 + restore_ext.seg = pci_domain_nr(dev->bus); 336 + restore_ext.bus = dev->bus->number; 337 + restore_ext.devfn = dev->devfn; 338 + ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi_ext, 339 + &restore_ext); 340 + if (ret == -ENOSYS) 341 + pci_seg_supported = false; 342 + WARN(ret && ret != -ENOSYS, "restore_msi_ext -> %d\n", ret); 343 + } 344 + if (!pci_seg_supported) { 345 + struct physdev_restore_msi restore; 346 + 347 + restore.bus = dev->bus->number; 348 + restore.devfn = dev->devfn; 349 + ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi, &restore); 350 + WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret); 351 + } 352 + } 327 353 #endif 328 354 329 355 static void xen_teardown_msi_irqs(struct pci_dev *dev) ··· 472 446 #ifdef CONFIG_PCI_MSI 473 447 x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; 474 448 x86_msi.teardown_msi_irq = xen_teardown_msi_irq; 449 + x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; 475 450 #endif 476 451 xen_setup_acpi_sci(); 477 452 __acpi_register_gsi = acpi_register_gsi_xen;
+95 -4
arch/x86/xen/enlighten.c
··· 62 62 #include <asm/reboot.h> 63 63 #include <asm/stackprotector.h> 64 64 #include <asm/hypervisor.h> 65 + #include <asm/mwait.h> 66 + 67 + #ifdef CONFIG_ACPI 68 + #include <linux/acpi.h> 69 + #include <asm/acpi.h> 70 + #include <acpi/pdc_intel.h> 71 + #include <acpi/processor.h> 72 + #include <xen/interface/platform.h> 73 + #endif 65 74 66 75 #include "xen-ops.h" 67 76 #include "mmu.h" ··· 209 200 static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; 210 201 static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; 211 202 203 + static __read_mostly unsigned int cpuid_leaf1_ecx_set_mask; 204 + static __read_mostly unsigned int cpuid_leaf5_ecx_val; 205 + static __read_mostly unsigned int cpuid_leaf5_edx_val; 206 + 212 207 static void xen_cpuid(unsigned int *ax, unsigned int *bx, 213 208 unsigned int *cx, unsigned int *dx) 214 209 { 215 210 unsigned maskebx = ~0; 216 211 unsigned maskecx = ~0; 217 212 unsigned maskedx = ~0; 218 - 213 + unsigned setecx = 0; 219 214 /* 220 215 * Mask out inconvenient features, to try and disable as many 221 216 * unsupported kernel subsystems as possible. ··· 227 214 switch (*ax) { 228 215 case 1: 229 216 maskecx = cpuid_leaf1_ecx_mask; 217 + setecx = cpuid_leaf1_ecx_set_mask; 230 218 maskedx = cpuid_leaf1_edx_mask; 231 219 break; 220 + 221 + case CPUID_MWAIT_LEAF: 222 + /* Synthesize the values.. */ 223 + *ax = 0; 224 + *bx = 0; 225 + *cx = cpuid_leaf5_ecx_val; 226 + *dx = cpuid_leaf5_edx_val; 227 + return; 232 228 233 229 case 0xb: 234 230 /* Suppress extended topology stuff */ ··· 254 232 255 233 *bx &= maskebx; 256 234 *cx &= maskecx; 235 + *cx |= setecx; 257 236 *dx &= maskedx; 237 + 258 238 } 259 239 240 + static bool __init xen_check_mwait(void) 241 + { 242 + #ifdef CONFIG_ACPI 243 + struct xen_platform_op op = { 244 + .cmd = XENPF_set_processor_pminfo, 245 + .u.set_pminfo.id = -1, 246 + .u.set_pminfo.type = XEN_PM_PDC, 247 + }; 248 + uint32_t buf[3]; 249 + unsigned int ax, bx, cx, dx; 250 + unsigned int mwait_mask; 251 + 252 + /* We need to determine whether it is OK to expose the MWAIT 253 + * capability to the kernel to harvest deeper than C3 states from ACPI 254 + * _CST using the processor_harvest_xen.c module. For this to work, we 255 + * need to gather the MWAIT_LEAF values (which the cstate.c code 256 + * checks against). The hypervisor won't expose the MWAIT flag because 257 + * it would break backwards compatibility; so we will find out directly 258 + * from the hardware and hypercall. 259 + */ 260 + if (!xen_initial_domain()) 261 + return false; 262 + 263 + ax = 1; 264 + cx = 0; 265 + 266 + native_cpuid(&ax, &bx, &cx, &dx); 267 + 268 + mwait_mask = (1 << (X86_FEATURE_EST % 32)) | 269 + (1 << (X86_FEATURE_MWAIT % 32)); 270 + 271 + if ((cx & mwait_mask) != mwait_mask) 272 + return false; 273 + 274 + /* We need to emulate the MWAIT_LEAF and for that we need both 275 + * ecx and edx. The hypercall provides only partial information. 276 + */ 277 + 278 + ax = CPUID_MWAIT_LEAF; 279 + bx = 0; 280 + cx = 0; 281 + dx = 0; 282 + 283 + native_cpuid(&ax, &bx, &cx, &dx); 284 + 285 + /* Ask the Hypervisor whether to clear ACPI_PDC_C_C2C3_FFH. If so, 286 + * don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3. 287 + */ 288 + buf[0] = ACPI_PDC_REVISION_ID; 289 + buf[1] = 1; 290 + buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP); 291 + 292 + set_xen_guest_handle(op.u.set_pminfo.pdc, buf); 293 + 294 + if ((HYPERVISOR_dom0_op(&op) == 0) && 295 + (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) { 296 + cpuid_leaf5_ecx_val = cx; 297 + cpuid_leaf5_edx_val = dx; 298 + } 299 + return true; 300 + #else 301 + return false; 302 + #endif 303 + } 260 304 static void __init xen_init_cpuid_mask(void) 261 305 { 262 306 unsigned int ax, bx, cx, dx; ··· 349 261 /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ 350 262 if ((cx & xsave_mask) != xsave_mask) 351 263 cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ 264 + 265 + if (xen_check_mwait()) 266 + cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32)); 352 267 } 353 268 354 269 static void xen_set_debugreg(int reg, unsigned long val) ··· 868 777 869 778 static unsigned long xen_read_cr0(void) 870 779 { 871 - unsigned long cr0 = percpu_read(xen_cr0_value); 780 + unsigned long cr0 = this_cpu_read(xen_cr0_value); 872 781 873 782 if (unlikely(cr0 == 0)) { 874 783 cr0 = native_read_cr0(); 875 - percpu_write(xen_cr0_value, cr0); 784 + this_cpu_write(xen_cr0_value, cr0); 876 785 } 877 786 878 787 return cr0; ··· 882 791 { 883 792 struct multicall_space mcs; 884 793 885 - percpu_write(xen_cr0_value, cr0); 794 + this_cpu_write(xen_cr0_value, cr0); 886 795 887 796 /* Only pay attention to cr0.TS; everything else is 888 797 ignored. */
+4 -4
arch/x86/xen/irq.c
··· 26 26 struct vcpu_info *vcpu; 27 27 unsigned long flags; 28 28 29 - vcpu = percpu_read(xen_vcpu); 29 + vcpu = this_cpu_read(xen_vcpu); 30 30 31 31 /* flag has opposite sense of mask */ 32 32 flags = !vcpu->evtchn_upcall_mask; ··· 50 50 make sure we're don't switch CPUs between getting the vcpu 51 51 pointer and updating the mask. */ 52 52 preempt_disable(); 53 - vcpu = percpu_read(xen_vcpu); 53 + vcpu = this_cpu_read(xen_vcpu); 54 54 vcpu->evtchn_upcall_mask = flags; 55 55 preempt_enable_no_resched(); 56 56 ··· 72 72 make sure we're don't switch CPUs between getting the vcpu 73 73 pointer and updating the mask. */ 74 74 preempt_disable(); 75 - percpu_read(xen_vcpu)->evtchn_upcall_mask = 1; 75 + this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1; 76 76 preempt_enable_no_resched(); 77 77 } 78 78 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable); ··· 86 86 the caller is confused and is trying to re-enable interrupts 87 87 on an indeterminate processor. */ 88 88 89 - vcpu = percpu_read(xen_vcpu); 89 + vcpu = this_cpu_read(xen_vcpu); 90 90 vcpu->evtchn_upcall_mask = 0; 91 91 92 92 /* Doesn't matter if we get preempted here, because any
+10 -10
arch/x86/xen/mmu.c
··· 1071 1071 struct mm_struct *mm = info; 1072 1072 struct mm_struct *active_mm; 1073 1073 1074 - active_mm = percpu_read(cpu_tlbstate.active_mm); 1074 + active_mm = this_cpu_read(cpu_tlbstate.active_mm); 1075 1075 1076 - if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK) 1076 + if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) 1077 1077 leave_mm(smp_processor_id()); 1078 1078 1079 1079 /* If this cpu still has a stale cr3 reference, then make sure 1080 1080 it has been flushed. */ 1081 - if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) 1081 + if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd)) 1082 1082 load_cr3(swapper_pg_dir); 1083 1083 } 1084 1084 ··· 1185 1185 1186 1186 static void xen_write_cr2(unsigned long cr2) 1187 1187 { 1188 - percpu_read(xen_vcpu)->arch.cr2 = cr2; 1188 + this_cpu_read(xen_vcpu)->arch.cr2 = cr2; 1189 1189 } 1190 1190 1191 1191 static unsigned long xen_read_cr2(void) 1192 1192 { 1193 - return percpu_read(xen_vcpu)->arch.cr2; 1193 + return this_cpu_read(xen_vcpu)->arch.cr2; 1194 1194 } 1195 1195 1196 1196 unsigned long xen_read_cr2_direct(void) 1197 1197 { 1198 - return percpu_read(xen_vcpu_info.arch.cr2); 1198 + return this_cpu_read(xen_vcpu_info.arch.cr2); 1199 1199 } 1200 1200 1201 1201 static void xen_flush_tlb(void) ··· 1278 1278 1279 1279 static unsigned long xen_read_cr3(void) 1280 1280 { 1281 - return percpu_read(xen_cr3); 1281 + return this_cpu_read(xen_cr3); 1282 1282 } 1283 1283 1284 1284 static void set_current_cr3(void *v) 1285 1285 { 1286 - percpu_write(xen_current_cr3, (unsigned long)v); 1286 + this_cpu_write(xen_current_cr3, (unsigned long)v); 1287 1287 } 1288 1288 1289 1289 static void __xen_write_cr3(bool kernel, unsigned long cr3) ··· 1306 1306 xen_extend_mmuext_op(&op); 1307 1307 1308 1308 if (kernel) { 1309 - percpu_write(xen_cr3, cr3); 1309 + this_cpu_write(xen_cr3, cr3); 1310 1310 1311 1311 /* Update xen_current_cr3 once the batch has actually 1312 1312 been submitted. */ ··· 1322 1322 1323 1323 /* Update while interrupts are disabled, so its atomic with 1324 1324 respect to ipis */ 1325 - percpu_write(xen_cr3, cr3); 1325 + this_cpu_write(xen_cr3, cr3); 1326 1326 1327 1327 __xen_write_cr3(true, cr3); 1328 1328
+1 -1
arch/x86/xen/multicalls.h
··· 47 47 xen_mc_flush(); 48 48 49 49 /* restore flags saved in xen_mc_batch */ 50 - local_irq_restore(percpu_read(xen_mc_irq_flags)); 50 + local_irq_restore(this_cpu_read(xen_mc_irq_flags)); 51 51 } 52 52 53 53 /* Set up a callback to be called when the current batch is flushed */
-1
arch/x86/xen/setup.c
··· 420 420 boot_cpu_data.hlt_works_ok = 1; 421 421 #endif 422 422 disable_cpuidle(); 423 - boot_option_idle_override = IDLE_HALT; 424 423 WARN_ON(set_pm_idle_to_default()); 425 424 fiddle_vdso(); 426 425 }
+1 -1
arch/x86/xen/smp.c
··· 76 76 xen_setup_cpu_clockevents(); 77 77 78 78 set_cpu_online(cpu, true); 79 - percpu_write(cpu_state, CPU_ONLINE); 79 + this_cpu_write(cpu_state, CPU_ONLINE); 80 80 wmb(); 81 81 82 82 /* We can take interrupts now: we're officially "up". */
+1 -1
drivers/input/misc/Kconfig
··· 558 558 559 559 config INPUT_XEN_KBDDEV_FRONTEND 560 560 tristate "Xen virtual keyboard and mouse support" 561 - depends on XEN_FBDEV_FRONTEND 561 + depends on XEN 562 562 default y 563 563 select XEN_XENBUS_FRONTEND 564 564 help
+25
drivers/pci/pci.c
··· 3163 3163 EXPORT_SYMBOL_GPL(__pci_reset_function); 3164 3164 3165 3165 /** 3166 + * __pci_reset_function_locked - reset a PCI device function while holding 3167 + * the @dev mutex lock. 3168 + * @dev: PCI device to reset 3169 + * 3170 + * Some devices allow an individual function to be reset without affecting 3171 + * other functions in the same device. The PCI device must be responsive 3172 + * to PCI config space in order to use this function. 3173 + * 3174 + * The device function is presumed to be unused and the caller is holding 3175 + * the device mutex lock when this function is called. 3176 + * Resetting the device will make the contents of PCI configuration space 3177 + * random, so any caller of this must be prepared to reinitialise the 3178 + * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 3179 + * etc. 3180 + * 3181 + * Returns 0 if the device function was successfully reset or negative if the 3182 + * device doesn't support resetting a single function. 3183 + */ 3184 + int __pci_reset_function_locked(struct pci_dev *dev) 3185 + { 3186 + return pci_dev_reset(dev, 1); 3187 + } 3188 + EXPORT_SYMBOL_GPL(__pci_reset_function_locked); 3189 + 3190 + /** 3166 3191 * pci_probe_reset_function - check whether the device can be safely reset 3167 3192 * @dev: PCI device to reset 3168 3193 *
+8
drivers/tty/hvc/Kconfig
··· 66 66 help 67 67 Xen virtual console device driver 68 68 69 + config HVC_XEN_FRONTEND 70 + bool "Xen Hypervisor Multiple Consoles support" 71 + depends on HVC_XEN 72 + select XEN_XENBUS_FRONTEND 73 + default y 74 + help 75 + Xen driver for secondary virtual consoles 76 + 69 77 config HVC_UDBG 70 78 bool "udbg based fake hypervisor console" 71 79 depends on PPC && EXPERIMENTAL
+423 -44
drivers/tty/hvc/hvc_xen.c
··· 23 23 #include <linux/err.h> 24 24 #include <linux/init.h> 25 25 #include <linux/types.h> 26 + #include <linux/list.h> 26 27 28 + #include <asm/io.h> 27 29 #include <asm/xen/hypervisor.h> 28 30 29 31 #include <xen/xen.h> 32 + #include <xen/interface/xen.h> 33 + #include <xen/hvm.h> 34 + #include <xen/grant_table.h> 30 35 #include <xen/page.h> 31 36 #include <xen/events.h> 32 37 #include <xen/interface/io/console.h> 33 38 #include <xen/hvc-console.h> 39 + #include <xen/xenbus.h> 34 40 35 41 #include "hvc_console.h" 36 42 37 43 #define HVC_COOKIE 0x58656e /* "Xen" in hex */ 38 44 39 - static struct hvc_struct *hvc; 40 - static int xencons_irq; 45 + struct xencons_info { 46 + struct list_head list; 47 + struct xenbus_device *xbdev; 48 + struct xencons_interface *intf; 49 + unsigned int evtchn; 50 + struct hvc_struct *hvc; 51 + int irq; 52 + int vtermno; 53 + grant_ref_t gntref; 54 + }; 55 + 56 + static LIST_HEAD(xenconsoles); 57 + static DEFINE_SPINLOCK(xencons_lock); 41 58 42 59 /* ------------------------------------------------------------------ */ 43 60 44 - static unsigned long console_pfn = ~0ul; 45 - 46 - static inline struct xencons_interface *xencons_interface(void) 61 + static struct xencons_info *vtermno_to_xencons(int vtermno) 47 62 { 48 - if (console_pfn == ~0ul) 49 - return mfn_to_virt(xen_start_info->console.domU.mfn); 50 - else 51 - return __va(console_pfn << PAGE_SHIFT); 63 + struct xencons_info *entry, *n, *ret = NULL; 64 + 65 + if (list_empty(&xenconsoles)) 66 + return NULL; 67 + 68 + list_for_each_entry_safe(entry, n, &xenconsoles, list) { 69 + if (entry->vtermno == vtermno) { 70 + ret = entry; 71 + break; 72 + } 73 + } 74 + 75 + return ret; 52 76 } 53 77 54 - static inline void notify_daemon(void) 78 + static inline int xenbus_devid_to_vtermno(int devid) 79 + { 80 + return devid + HVC_COOKIE; 81 + } 82 + 83 + static inline void notify_daemon(struct xencons_info *cons) 55 84 { 56 85 /* Use evtchn: this is called early, before irq is set up. */ 57 - notify_remote_via_evtchn(xen_start_info->console.domU.evtchn); 86 + notify_remote_via_evtchn(cons->evtchn); 58 87 } 59 88 60 - static int __write_console(const char *data, int len) 89 + static int __write_console(struct xencons_info *xencons, 90 + const char *data, int len) 61 91 { 62 - struct xencons_interface *intf = xencons_interface(); 63 92 XENCONS_RING_IDX cons, prod; 93 + struct xencons_interface *intf = xencons->intf; 64 94 int sent = 0; 65 95 66 96 cons = intf->out_cons; ··· 105 75 intf->out_prod = prod; 106 76 107 77 if (sent) 108 - notify_daemon(); 78 + notify_daemon(xencons); 109 79 return sent; 110 80 } 111 81 112 82 static int domU_write_console(uint32_t vtermno, const char *data, int len) 113 83 { 114 84 int ret = len; 85 + struct xencons_info *cons = vtermno_to_xencons(vtermno); 86 + if (cons == NULL) 87 + return -EINVAL; 115 88 116 89 /* 117 90 * Make sure the whole buffer is emitted, polling if ··· 123 90 * kernel is crippled. 124 91 */ 125 92 while (len) { 126 - int sent = __write_console(data, len); 93 + int sent = __write_console(cons, data, len); 127 94 128 95 data += sent; 129 96 len -= sent; ··· 137 104 138 105 static int domU_read_console(uint32_t vtermno, char *buf, int len) 139 106 { 140 - struct xencons_interface *intf = xencons_interface(); 107 + struct xencons_interface *intf; 141 108 XENCONS_RING_IDX cons, prod; 142 109 int recv = 0; 110 + struct xencons_info *xencons = vtermno_to_xencons(vtermno); 111 + if (xencons == NULL) 112 + return -EINVAL; 113 + intf = xencons->intf; 143 114 144 115 cons = intf->in_cons; 145 116 prod = intf->in_prod; ··· 156 119 mb(); /* read ring before consuming */ 157 120 intf->in_cons = cons; 158 121 159 - notify_daemon(); 122 + notify_daemon(xencons); 160 123 return recv; 161 124 } 162 125 ··· 194 157 .notifier_hangup = notifier_hangup_irq, 195 158 }; 196 159 197 - static int __init xen_hvc_init(void) 160 + static int xen_hvm_console_init(void) 198 161 { 199 - struct hvc_struct *hp; 200 - struct hv_ops *ops; 162 + int r; 163 + uint64_t v = 0; 164 + unsigned long mfn; 165 + struct xencons_info *info; 166 + 167 + if (!xen_hvm_domain()) 168 + return -ENODEV; 169 + 170 + info = vtermno_to_xencons(HVC_COOKIE); 171 + if (!info) { 172 + info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL | __GFP_ZERO); 173 + if (!info) 174 + return -ENOMEM; 175 + } 176 + 177 + /* already configured */ 178 + if (info->intf != NULL) 179 + return 0; 180 + 181 + r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); 182 + if (r < 0) { 183 + kfree(info); 184 + return -ENODEV; 185 + } 186 + info->evtchn = v; 187 + hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v); 188 + if (r < 0) { 189 + kfree(info); 190 + return -ENODEV; 191 + } 192 + mfn = v; 193 + info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE); 194 + if (info->intf == NULL) { 195 + kfree(info); 196 + return -ENODEV; 197 + } 198 + info->vtermno = HVC_COOKIE; 199 + 200 + spin_lock(&xencons_lock); 201 + list_add_tail(&info->list, &xenconsoles); 202 + spin_unlock(&xencons_lock); 203 + 204 + return 0; 205 + } 206 + 207 + static int xen_pv_console_init(void) 208 + { 209 + struct xencons_info *info; 201 210 202 211 if (!xen_pv_domain()) 203 212 return -ENODEV; 204 213 205 - if (xen_initial_domain()) { 206 - ops = &dom0_hvc_ops; 207 - xencons_irq = bind_virq_to_irq(VIRQ_CONSOLE, 0); 208 - } else { 209 - if (!xen_start_info->console.domU.evtchn) 210 - return -ENODEV; 214 + if (!xen_start_info->console.domU.evtchn) 215 + return -ENODEV; 211 216 212 - ops = &domU_hvc_ops; 213 - xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn); 217 + info = vtermno_to_xencons(HVC_COOKIE); 218 + if (!info) { 219 + info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL | __GFP_ZERO); 220 + if (!info) 221 + return -ENOMEM; 214 222 } 215 - if (xencons_irq < 0) 216 - xencons_irq = 0; 217 - else 218 - irq_set_noprobe(xencons_irq); 219 223 220 - hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256); 221 - if (IS_ERR(hp)) 222 - return PTR_ERR(hp); 224 + /* already configured */ 225 + if (info->intf != NULL) 226 + return 0; 223 227 224 - hvc = hp; 228 + info->evtchn = xen_start_info->console.domU.evtchn; 229 + info->intf = mfn_to_virt(xen_start_info->console.domU.mfn); 230 + info->vtermno = HVC_COOKIE; 225 231 226 - console_pfn = mfn_to_pfn(xen_start_info->console.domU.mfn); 232 + spin_lock(&xencons_lock); 233 + list_add_tail(&info->list, &xenconsoles); 234 + spin_unlock(&xencons_lock); 235 + 236 + return 0; 237 + } 238 + 239 + static int xen_initial_domain_console_init(void) 240 + { 241 + struct xencons_info *info; 242 + 243 + if (!xen_initial_domain()) 244 + return -ENODEV; 245 + 246 + info = vtermno_to_xencons(HVC_COOKIE); 247 + if (!info) { 248 + info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL | __GFP_ZERO); 249 + if (!info) 250 + return -ENOMEM; 251 + } 252 + 253 + info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0); 254 + info->vtermno = HVC_COOKIE; 255 + 256 + spin_lock(&xencons_lock); 257 + list_add_tail(&info->list, &xenconsoles); 258 + spin_unlock(&xencons_lock); 227 259 228 260 return 0; 229 261 } 230 262 231 263 void xen_console_resume(void) 232 264 { 233 - if (xencons_irq) 234 - rebind_evtchn_irq(xen_start_info->console.domU.evtchn, xencons_irq); 265 + struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE); 266 + if (info != NULL && info->irq) 267 + rebind_evtchn_irq(info->evtchn, info->irq); 268 + } 269 + 270 + static void xencons_disconnect_backend(struct xencons_info *info) 271 + { 272 + if (info->irq > 0) 273 + unbind_from_irqhandler(info->irq, NULL); 274 + info->irq = 0; 275 + if (info->evtchn > 0) 276 + xenbus_free_evtchn(info->xbdev, info->evtchn); 277 + info->evtchn = 0; 278 + if (info->gntref > 0) 279 + gnttab_free_grant_references(info->gntref); 280 + info->gntref = 0; 281 + if (info->hvc != NULL) 282 + hvc_remove(info->hvc); 283 + info->hvc = NULL; 284 + } 285 + 286 + static void xencons_free(struct xencons_info *info) 287 + { 288 + free_page((unsigned long)info->intf); 289 + info->intf = NULL; 290 + info->vtermno = 0; 291 + kfree(info); 292 + } 293 + 294 + static int xen_console_remove(struct xencons_info *info) 295 + { 296 + xencons_disconnect_backend(info); 297 + spin_lock(&xencons_lock); 298 + list_del(&info->list); 299 + spin_unlock(&xencons_lock); 300 + if (info->xbdev != NULL) 301 + xencons_free(info); 302 + else { 303 + if (xen_hvm_domain()) 304 + iounmap(info->intf); 305 + kfree(info); 306 + } 307 + return 0; 308 + } 309 + 310 + #ifdef CONFIG_HVC_XEN_FRONTEND 311 + static struct xenbus_driver xencons_driver; 312 + 313 + static int xencons_remove(struct xenbus_device *dev) 314 + { 315 + return xen_console_remove(dev_get_drvdata(&dev->dev)); 316 + } 317 + 318 + static int xencons_connect_backend(struct xenbus_device *dev, 319 + struct xencons_info *info) 320 + { 321 + int ret, evtchn, devid, ref, irq; 322 + struct xenbus_transaction xbt; 323 + grant_ref_t gref_head; 324 + unsigned long mfn; 325 + 326 + ret = xenbus_alloc_evtchn(dev, &evtchn); 327 + if (ret) 328 + return ret; 329 + info->evtchn = evtchn; 330 + irq = bind_evtchn_to_irq(evtchn); 331 + if (irq < 0) 332 + return irq; 333 + info->irq = irq; 334 + devid = dev->nodename[strlen(dev->nodename) - 1] - '0'; 335 + info->hvc = hvc_alloc(xenbus_devid_to_vtermno(devid), 336 + irq, &domU_hvc_ops, 256); 337 + if (IS_ERR(info->hvc)) 338 + return PTR_ERR(info->hvc); 339 + if (xen_pv_domain()) 340 + mfn = virt_to_mfn(info->intf); 341 + else 342 + mfn = __pa(info->intf) >> PAGE_SHIFT; 343 + ret = gnttab_alloc_grant_references(1, &gref_head); 344 + if (ret < 0) 345 + return ret; 346 + info->gntref = gref_head; 347 + ref = gnttab_claim_grant_reference(&gref_head); 348 + if (ref < 0) 349 + return ref; 350 + gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id, 351 + mfn, 0); 352 + 353 + again: 354 + ret = xenbus_transaction_start(&xbt); 355 + if (ret) { 356 + xenbus_dev_fatal(dev, ret, "starting transaction"); 357 + return ret; 358 + } 359 + ret = xenbus_printf(xbt, dev->nodename, "ring-ref", "%d", ref); 360 + if (ret) 361 + goto error_xenbus; 362 + ret = xenbus_printf(xbt, dev->nodename, "port", "%u", 363 + evtchn); 364 + if (ret) 365 + goto error_xenbus; 366 + ret = xenbus_printf(xbt, dev->nodename, "type", "ioemu"); 367 + if (ret) 368 + goto error_xenbus; 369 + ret = xenbus_transaction_end(xbt, 0); 370 + if (ret) { 371 + if (ret == -EAGAIN) 372 + goto again; 373 + xenbus_dev_fatal(dev, ret, "completing transaction"); 374 + return ret; 375 + } 376 + 377 + xenbus_switch_state(dev, XenbusStateInitialised); 378 + return 0; 379 + 380 + error_xenbus: 381 + xenbus_transaction_end(xbt, 1); 382 + xenbus_dev_fatal(dev, ret, "writing xenstore"); 383 + return ret; 384 + } 385 + 386 + static int __devinit xencons_probe(struct xenbus_device *dev, 387 + const struct xenbus_device_id *id) 388 + { 389 + int ret, devid; 390 + struct xencons_info *info; 391 + 392 + devid = dev->nodename[strlen(dev->nodename) - 1] - '0'; 393 + if (devid == 0) 394 + return -ENODEV; 395 + 396 + info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL | __GFP_ZERO); 397 + if (!info) 398 + goto error_nomem; 399 + dev_set_drvdata(&dev->dev, info); 400 + info->xbdev = dev; 401 + info->vtermno = xenbus_devid_to_vtermno(devid); 402 + info->intf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 403 + if (!info->intf) 404 + goto error_nomem; 405 + 406 + ret = xencons_connect_backend(dev, info); 407 + if (ret < 0) 408 + goto error; 409 + spin_lock(&xencons_lock); 410 + list_add_tail(&info->list, &xenconsoles); 411 + spin_unlock(&xencons_lock); 412 + 413 + return 0; 414 + 415 + error_nomem: 416 + ret = -ENOMEM; 417 + xenbus_dev_fatal(dev, ret, "allocating device memory"); 418 + error: 419 + xencons_disconnect_backend(info); 420 + xencons_free(info); 421 + return ret; 422 + } 423 + 424 + static int xencons_resume(struct xenbus_device *dev) 425 + { 426 + struct xencons_info *info = dev_get_drvdata(&dev->dev); 427 + 428 + xencons_disconnect_backend(info); 429 + memset(info->intf, 0, PAGE_SIZE); 430 + return xencons_connect_backend(dev, info); 431 + } 432 + 433 + static void xencons_backend_changed(struct xenbus_device *dev, 434 + enum xenbus_state backend_state) 435 + { 436 + switch (backend_state) { 437 + case XenbusStateReconfiguring: 438 + case XenbusStateReconfigured: 439 + case XenbusStateInitialising: 440 + case XenbusStateInitialised: 441 + case XenbusStateUnknown: 442 + case XenbusStateClosed: 443 + break; 444 + 445 + case XenbusStateInitWait: 446 + break; 447 + 448 + case XenbusStateConnected: 449 + xenbus_switch_state(dev, XenbusStateConnected); 450 + break; 451 + 452 + case XenbusStateClosing: 453 + xenbus_frontend_closed(dev); 454 + break; 455 + } 456 + } 457 + 458 + static const struct xenbus_device_id xencons_ids[] = { 459 + { "console" }, 460 + { "" } 461 + }; 462 + 463 + 464 + static DEFINE_XENBUS_DRIVER(xencons, "xenconsole", 465 + .probe = xencons_probe, 466 + .remove = xencons_remove, 467 + .resume = xencons_resume, 468 + .otherend_changed = xencons_backend_changed, 469 + ); 470 + #endif /* CONFIG_HVC_XEN_FRONTEND */ 471 + 472 + static int __init xen_hvc_init(void) 473 + { 474 + int r; 475 + struct xencons_info *info; 476 + const struct hv_ops *ops; 477 + 478 + if (!xen_domain()) 479 + return -ENODEV; 480 + 481 + if (xen_initial_domain()) { 482 + ops = &dom0_hvc_ops; 483 + r = xen_initial_domain_console_init(); 484 + if (r < 0) 485 + return r; 486 + info = vtermno_to_xencons(HVC_COOKIE); 487 + } else { 488 + ops = &domU_hvc_ops; 489 + if (xen_hvm_domain()) 490 + r = xen_hvm_console_init(); 491 + else 492 + r = xen_pv_console_init(); 493 + if (r < 0) 494 + return r; 495 + 496 + info = vtermno_to_xencons(HVC_COOKIE); 497 + info->irq = bind_evtchn_to_irq(info->evtchn); 498 + } 499 + if (info->irq < 0) 500 + info->irq = 0; /* NO_IRQ */ 501 + else 502 + irq_set_noprobe(info->irq); 503 + 504 + info->hvc = hvc_alloc(HVC_COOKIE, info->irq, ops, 256); 505 + if (IS_ERR(info->hvc)) { 506 + r = PTR_ERR(info->hvc); 507 + spin_lock(&xencons_lock); 508 + list_del(&info->list); 509 + spin_unlock(&xencons_lock); 510 + if (info->irq) 511 + unbind_from_irqhandler(info->irq, NULL); 512 + kfree(info); 513 + return r; 514 + } 515 + 516 + r = 0; 517 + #ifdef CONFIG_HVC_XEN_FRONTEND 518 + r = xenbus_register_frontend(&xencons_driver); 519 + #endif 520 + return r; 235 521 } 236 522 237 523 static void __exit xen_hvc_fini(void) 238 524 { 239 - if (hvc) 240 - hvc_remove(hvc); 525 + struct xencons_info *entry, *next; 526 + 527 + if (list_empty(&xenconsoles)) 528 + return; 529 + 530 + list_for_each_entry_safe(entry, next, &xenconsoles, list) { 531 + xen_console_remove(entry); 532 + } 241 533 } 242 534 243 535 static int xen_cons_init(void) 244 536 { 245 - struct hv_ops *ops; 537 + const struct hv_ops *ops; 246 538 247 - if (!xen_pv_domain()) 539 + if (!xen_domain()) 248 540 return 0; 249 541 250 542 if (xen_initial_domain()) 251 543 ops = &dom0_hvc_ops; 252 - else 544 + else { 545 + int r; 253 546 ops = &domU_hvc_ops; 547 + 548 + if (xen_hvm_domain()) 549 + r = xen_hvm_console_init(); 550 + else 551 + r = xen_pv_console_init(); 552 + if (r < 0) 553 + return r; 554 + } 254 555 255 556 hvc_instantiate(HVC_COOKIE, 0, ops); 256 557 return 0; 257 558 } 559 + 258 560 259 561 module_init(xen_hvc_init); 260 562 module_exit(xen_hvc_fini); ··· 605 229 { 606 230 unsigned int linelen, off = 0; 607 231 const char *pos; 232 + 233 + if (!xen_pv_domain()) 234 + return; 608 235 609 236 dom0_write_console(0, string, len); 610 237
+1
drivers/video/Kconfig
··· 2269 2269 select FB_SYS_IMAGEBLIT 2270 2270 select FB_SYS_FOPS 2271 2271 select FB_DEFERRED_IO 2272 + select INPUT_XEN_KBDDEV_FRONTEND 2272 2273 select XEN_XENBUS_FRONTEND 2273 2274 default y 2274 2275 help
+17
drivers/xen/Kconfig
··· 178 178 depends on XEN 179 179 default m 180 180 181 + config XEN_ACPI_PROCESSOR 182 + tristate "Xen ACPI processor" 183 + depends on XEN && X86 && ACPI_PROCESSOR 184 + default y if (X86_ACPI_CPUFREQ = y || X86_POWERNOW_K8 = y) 185 + default m if (X86_ACPI_CPUFREQ = m || X86_POWERNOW_K8 = m) 186 + help 187 + This ACPI processor uploads Power Management information to the Xen hypervisor. 188 + 189 + To do that the driver parses the Power Management data and uploads said 190 + information to the Xen hypervisor. Then the Xen hypervisor can select the 191 + proper Cx and Pxx states. It also registers itslef as the SMM so that 192 + other drivers (such as ACPI cpufreq scaling driver) will not load. 193 + 194 + To compile this driver as a module, choose M here: the 195 + module will be called xen_acpi_processor If you do not know what to choose, 196 + select M here. If the CPUFREQ drivers are built in, select Y here. 197 + 181 198 endmenu
+1 -1
drivers/xen/Makefile
··· 20 20 obj-$(CONFIG_XEN_DOM0) += pci.o 21 21 obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ 22 22 obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o 23 - 23 + obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o 24 24 xen-evtchn-y := evtchn.o 25 25 xen-gntdev-y := gntdev.o 26 26 xen-gntalloc-y := gntalloc.o
+3 -3
drivers/xen/sys-hypervisor.c
··· 97 97 NULL 98 98 }; 99 99 100 - static struct attribute_group version_group = { 100 + static const struct attribute_group version_group = { 101 101 .name = "version", 102 102 .attrs = version_attrs, 103 103 }; ··· 210 210 NULL 211 211 }; 212 212 213 - static struct attribute_group xen_compilation_group = { 213 + static const struct attribute_group xen_compilation_group = { 214 214 .name = "compilation", 215 215 .attrs = xen_compile_attrs, 216 216 }; ··· 340 340 NULL 341 341 }; 342 342 343 - static struct attribute_group xen_properties_group = { 343 + static const struct attribute_group xen_properties_group = { 344 344 .name = "properties", 345 345 .attrs = xen_properties_attrs, 346 346 };
+562
drivers/xen/xen-acpi-processor.c
··· 1 + /* 2 + * Copyright 2012 by Oracle Inc 3 + * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 4 + * 5 + * This code borrows ideas from https://lkml.org/lkml/2011/11/30/249 6 + * so many thanks go to Kevin Tian <kevin.tian@intel.com> 7 + * and Yu Ke <ke.yu@intel.com>. 8 + * 9 + * This program is free software; you can redistribute it and/or modify it 10 + * under the terms and conditions of the GNU General Public License, 11 + * version 2, as published by the Free Software Foundation. 12 + * 13 + * This program is distributed in the hope it will be useful, but WITHOUT 14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 + * more details. 17 + * 18 + */ 19 + 20 + #include <linux/cpumask.h> 21 + #include <linux/cpufreq.h> 22 + #include <linux/freezer.h> 23 + #include <linux/kernel.h> 24 + #include <linux/kthread.h> 25 + #include <linux/init.h> 26 + #include <linux/module.h> 27 + #include <linux/types.h> 28 + #include <acpi/acpi_bus.h> 29 + #include <acpi/acpi_drivers.h> 30 + #include <acpi/processor.h> 31 + 32 + #include <xen/interface/platform.h> 33 + #include <asm/xen/hypercall.h> 34 + 35 + #define DRV_NAME "xen-acpi-processor: " 36 + 37 + static int no_hypercall; 38 + MODULE_PARM_DESC(off, "Inhibit the hypercall."); 39 + module_param_named(off, no_hypercall, int, 0400); 40 + 41 + /* 42 + * Note: Do not convert the acpi_id* below to cpumask_var_t or use cpumask_bit 43 + * - as those shrink to nr_cpu_bits (which is dependent on possible_cpu), which 44 + * can be less than what we want to put in. Instead use the 'nr_acpi_bits' 45 + * which is dynamically computed based on the MADT or x2APIC table. 46 + */ 47 + static unsigned int nr_acpi_bits; 48 + /* Mutex to protect the acpi_ids_done - for CPU hotplug use. */ 49 + static DEFINE_MUTEX(acpi_ids_mutex); 50 + /* Which ACPI ID we have processed from 'struct acpi_processor'. */ 51 + static unsigned long *acpi_ids_done; 52 + /* Which ACPI ID exist in the SSDT/DSDT processor definitions. */ 53 + static unsigned long __initdata *acpi_id_present; 54 + /* And if there is an _CST definition (or a PBLK) for the ACPI IDs */ 55 + static unsigned long __initdata *acpi_id_cst_present; 56 + 57 + static int push_cxx_to_hypervisor(struct acpi_processor *_pr) 58 + { 59 + struct xen_platform_op op = { 60 + .cmd = XENPF_set_processor_pminfo, 61 + .interface_version = XENPF_INTERFACE_VERSION, 62 + .u.set_pminfo.id = _pr->acpi_id, 63 + .u.set_pminfo.type = XEN_PM_CX, 64 + }; 65 + struct xen_processor_cx *dst_cx, *dst_cx_states = NULL; 66 + struct acpi_processor_cx *cx; 67 + unsigned int i, ok; 68 + int ret = 0; 69 + 70 + dst_cx_states = kcalloc(_pr->power.count, 71 + sizeof(struct xen_processor_cx), GFP_KERNEL); 72 + if (!dst_cx_states) 73 + return -ENOMEM; 74 + 75 + for (ok = 0, i = 1; i <= _pr->power.count; i++) { 76 + cx = &_pr->power.states[i]; 77 + if (!cx->valid) 78 + continue; 79 + 80 + dst_cx = &(dst_cx_states[ok++]); 81 + 82 + dst_cx->reg.space_id = ACPI_ADR_SPACE_SYSTEM_IO; 83 + if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { 84 + dst_cx->reg.bit_width = 8; 85 + dst_cx->reg.bit_offset = 0; 86 + dst_cx->reg.access_size = 1; 87 + } else { 88 + dst_cx->reg.space_id = ACPI_ADR_SPACE_FIXED_HARDWARE; 89 + if (cx->entry_method == ACPI_CSTATE_FFH) { 90 + /* NATIVE_CSTATE_BEYOND_HALT */ 91 + dst_cx->reg.bit_offset = 2; 92 + dst_cx->reg.bit_width = 1; /* VENDOR_INTEL */ 93 + } 94 + dst_cx->reg.access_size = 0; 95 + } 96 + dst_cx->reg.address = cx->address; 97 + 98 + dst_cx->type = cx->type; 99 + dst_cx->latency = cx->latency; 100 + dst_cx->power = cx->power; 101 + 102 + dst_cx->dpcnt = 0; 103 + set_xen_guest_handle(dst_cx->dp, NULL); 104 + } 105 + if (!ok) { 106 + pr_debug(DRV_NAME "No _Cx for ACPI CPU %u\n", _pr->acpi_id); 107 + kfree(dst_cx_states); 108 + return -EINVAL; 109 + } 110 + op.u.set_pminfo.power.count = ok; 111 + op.u.set_pminfo.power.flags.bm_control = _pr->flags.bm_control; 112 + op.u.set_pminfo.power.flags.bm_check = _pr->flags.bm_check; 113 + op.u.set_pminfo.power.flags.has_cst = _pr->flags.has_cst; 114 + op.u.set_pminfo.power.flags.power_setup_done = 115 + _pr->flags.power_setup_done; 116 + 117 + set_xen_guest_handle(op.u.set_pminfo.power.states, dst_cx_states); 118 + 119 + if (!no_hypercall) 120 + ret = HYPERVISOR_dom0_op(&op); 121 + 122 + if (!ret) { 123 + pr_debug("ACPI CPU%u - C-states uploaded.\n", _pr->acpi_id); 124 + for (i = 1; i <= _pr->power.count; i++) { 125 + cx = &_pr->power.states[i]; 126 + if (!cx->valid) 127 + continue; 128 + pr_debug(" C%d: %s %d uS\n", 129 + cx->type, cx->desc, (u32)cx->latency); 130 + } 131 + } else 132 + pr_err(DRV_NAME "(CX): Hypervisor error (%d) for ACPI CPU%u\n", 133 + ret, _pr->acpi_id); 134 + 135 + kfree(dst_cx_states); 136 + 137 + return ret; 138 + } 139 + static struct xen_processor_px * 140 + xen_copy_pss_data(struct acpi_processor *_pr, 141 + struct xen_processor_performance *dst_perf) 142 + { 143 + struct xen_processor_px *dst_states = NULL; 144 + unsigned int i; 145 + 146 + BUILD_BUG_ON(sizeof(struct xen_processor_px) != 147 + sizeof(struct acpi_processor_px)); 148 + 149 + dst_states = kcalloc(_pr->performance->state_count, 150 + sizeof(struct xen_processor_px), GFP_KERNEL); 151 + if (!dst_states) 152 + return ERR_PTR(-ENOMEM); 153 + 154 + dst_perf->state_count = _pr->performance->state_count; 155 + for (i = 0; i < _pr->performance->state_count; i++) { 156 + /* Fortunatly for us, they are both the same size */ 157 + memcpy(&(dst_states[i]), &(_pr->performance->states[i]), 158 + sizeof(struct acpi_processor_px)); 159 + } 160 + return dst_states; 161 + } 162 + static int xen_copy_psd_data(struct acpi_processor *_pr, 163 + struct xen_processor_performance *dst) 164 + { 165 + struct acpi_psd_package *pdomain; 166 + 167 + BUILD_BUG_ON(sizeof(struct xen_psd_package) != 168 + sizeof(struct acpi_psd_package)); 169 + 170 + /* This information is enumerated only if acpi_processor_preregister_performance 171 + * has been called. 172 + */ 173 + dst->shared_type = _pr->performance->shared_type; 174 + 175 + pdomain = &(_pr->performance->domain_info); 176 + 177 + /* 'acpi_processor_preregister_performance' does not parse if the 178 + * num_processors <= 1, but Xen still requires it. Do it manually here. 179 + */ 180 + if (pdomain->num_processors <= 1) { 181 + if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) 182 + dst->shared_type = CPUFREQ_SHARED_TYPE_ALL; 183 + else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) 184 + dst->shared_type = CPUFREQ_SHARED_TYPE_HW; 185 + else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) 186 + dst->shared_type = CPUFREQ_SHARED_TYPE_ANY; 187 + 188 + } 189 + memcpy(&(dst->domain_info), pdomain, sizeof(struct acpi_psd_package)); 190 + return 0; 191 + } 192 + static int xen_copy_pct_data(struct acpi_pct_register *pct, 193 + struct xen_pct_register *dst_pct) 194 + { 195 + /* It would be nice if you could just do 'memcpy(pct, dst_pct') but 196 + * sadly the Xen structure did not have the proper padding so the 197 + * descriptor field takes two (dst_pct) bytes instead of one (pct). 198 + */ 199 + dst_pct->descriptor = pct->descriptor; 200 + dst_pct->length = pct->length; 201 + dst_pct->space_id = pct->space_id; 202 + dst_pct->bit_width = pct->bit_width; 203 + dst_pct->bit_offset = pct->bit_offset; 204 + dst_pct->reserved = pct->reserved; 205 + dst_pct->address = pct->address; 206 + return 0; 207 + } 208 + static int push_pxx_to_hypervisor(struct acpi_processor *_pr) 209 + { 210 + int ret = 0; 211 + struct xen_platform_op op = { 212 + .cmd = XENPF_set_processor_pminfo, 213 + .interface_version = XENPF_INTERFACE_VERSION, 214 + .u.set_pminfo.id = _pr->acpi_id, 215 + .u.set_pminfo.type = XEN_PM_PX, 216 + }; 217 + struct xen_processor_performance *dst_perf; 218 + struct xen_processor_px *dst_states = NULL; 219 + 220 + dst_perf = &op.u.set_pminfo.perf; 221 + 222 + dst_perf->platform_limit = _pr->performance_platform_limit; 223 + dst_perf->flags |= XEN_PX_PPC; 224 + xen_copy_pct_data(&(_pr->performance->control_register), 225 + &dst_perf->control_register); 226 + xen_copy_pct_data(&(_pr->performance->status_register), 227 + &dst_perf->status_register); 228 + dst_perf->flags |= XEN_PX_PCT; 229 + dst_states = xen_copy_pss_data(_pr, dst_perf); 230 + if (!IS_ERR_OR_NULL(dst_states)) { 231 + set_xen_guest_handle(dst_perf->states, dst_states); 232 + dst_perf->flags |= XEN_PX_PSS; 233 + } 234 + if (!xen_copy_psd_data(_pr, dst_perf)) 235 + dst_perf->flags |= XEN_PX_PSD; 236 + 237 + if (dst_perf->flags != (XEN_PX_PSD | XEN_PX_PSS | XEN_PX_PCT | XEN_PX_PPC)) { 238 + pr_warn(DRV_NAME "ACPI CPU%u missing some P-state data (%x), skipping.\n", 239 + _pr->acpi_id, dst_perf->flags); 240 + ret = -ENODEV; 241 + goto err_free; 242 + } 243 + 244 + if (!no_hypercall) 245 + ret = HYPERVISOR_dom0_op(&op); 246 + 247 + if (!ret) { 248 + struct acpi_processor_performance *perf; 249 + unsigned int i; 250 + 251 + perf = _pr->performance; 252 + pr_debug("ACPI CPU%u - P-states uploaded.\n", _pr->acpi_id); 253 + for (i = 0; i < perf->state_count; i++) { 254 + pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", 255 + (i == perf->state ? '*' : ' '), i, 256 + (u32) perf->states[i].core_frequency, 257 + (u32) perf->states[i].power, 258 + (u32) perf->states[i].transition_latency); 259 + } 260 + } else if (ret != -EINVAL) 261 + /* EINVAL means the ACPI ID is incorrect - meaning the ACPI 262 + * table is referencing a non-existing CPU - which can happen 263 + * with broken ACPI tables. */ 264 + pr_warn(DRV_NAME "(_PXX): Hypervisor error (%d) for ACPI CPU%u\n", 265 + ret, _pr->acpi_id); 266 + err_free: 267 + if (!IS_ERR_OR_NULL(dst_states)) 268 + kfree(dst_states); 269 + 270 + return ret; 271 + } 272 + static int upload_pm_data(struct acpi_processor *_pr) 273 + { 274 + int err = 0; 275 + 276 + mutex_lock(&acpi_ids_mutex); 277 + if (__test_and_set_bit(_pr->acpi_id, acpi_ids_done)) { 278 + mutex_unlock(&acpi_ids_mutex); 279 + return -EBUSY; 280 + } 281 + if (_pr->flags.power) 282 + err = push_cxx_to_hypervisor(_pr); 283 + 284 + if (_pr->performance && _pr->performance->states) 285 + err |= push_pxx_to_hypervisor(_pr); 286 + 287 + mutex_unlock(&acpi_ids_mutex); 288 + return err; 289 + } 290 + static unsigned int __init get_max_acpi_id(void) 291 + { 292 + struct xenpf_pcpuinfo *info; 293 + struct xen_platform_op op = { 294 + .cmd = XENPF_get_cpuinfo, 295 + .interface_version = XENPF_INTERFACE_VERSION, 296 + }; 297 + int ret = 0; 298 + unsigned int i, last_cpu, max_acpi_id = 0; 299 + 300 + info = &op.u.pcpu_info; 301 + info->xen_cpuid = 0; 302 + 303 + ret = HYPERVISOR_dom0_op(&op); 304 + if (ret) 305 + return NR_CPUS; 306 + 307 + /* The max_present is the same irregardless of the xen_cpuid */ 308 + last_cpu = op.u.pcpu_info.max_present; 309 + for (i = 0; i <= last_cpu; i++) { 310 + info->xen_cpuid = i; 311 + ret = HYPERVISOR_dom0_op(&op); 312 + if (ret) 313 + continue; 314 + max_acpi_id = max(info->acpi_id, max_acpi_id); 315 + } 316 + max_acpi_id *= 2; /* Slack for CPU hotplug support. */ 317 + pr_debug(DRV_NAME "Max ACPI ID: %u\n", max_acpi_id); 318 + return max_acpi_id; 319 + } 320 + /* 321 + * The read_acpi_id and check_acpi_ids are there to support the Xen 322 + * oddity of virtual CPUs != physical CPUs in the initial domain. 323 + * The user can supply 'xen_max_vcpus=X' on the Xen hypervisor line 324 + * which will band the amount of CPUs the initial domain can see. 325 + * In general that is OK, except it plays havoc with any of the 326 + * for_each_[present|online]_cpu macros which are banded to the virtual 327 + * CPU amount. 328 + */ 329 + static acpi_status __init 330 + read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv) 331 + { 332 + u32 acpi_id; 333 + acpi_status status; 334 + acpi_object_type acpi_type; 335 + unsigned long long tmp; 336 + union acpi_object object = { 0 }; 337 + struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 338 + acpi_io_address pblk = 0; 339 + 340 + status = acpi_get_type(handle, &acpi_type); 341 + if (ACPI_FAILURE(status)) 342 + return AE_OK; 343 + 344 + switch (acpi_type) { 345 + case ACPI_TYPE_PROCESSOR: 346 + status = acpi_evaluate_object(handle, NULL, NULL, &buffer); 347 + if (ACPI_FAILURE(status)) 348 + return AE_OK; 349 + acpi_id = object.processor.proc_id; 350 + pblk = object.processor.pblk_address; 351 + break; 352 + case ACPI_TYPE_DEVICE: 353 + status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp); 354 + if (ACPI_FAILURE(status)) 355 + return AE_OK; 356 + acpi_id = tmp; 357 + break; 358 + default: 359 + return AE_OK; 360 + } 361 + /* There are more ACPI Processor objects than in x2APIC or MADT. 362 + * This can happen with incorrect ACPI SSDT declerations. */ 363 + if (acpi_id > nr_acpi_bits) { 364 + pr_debug(DRV_NAME "We only have %u, trying to set %u\n", 365 + nr_acpi_bits, acpi_id); 366 + return AE_OK; 367 + } 368 + /* OK, There is a ACPI Processor object */ 369 + __set_bit(acpi_id, acpi_id_present); 370 + 371 + pr_debug(DRV_NAME "ACPI CPU%u w/ PBLK:0x%lx\n", acpi_id, 372 + (unsigned long)pblk); 373 + 374 + status = acpi_evaluate_object(handle, "_CST", NULL, &buffer); 375 + if (ACPI_FAILURE(status)) { 376 + if (!pblk) 377 + return AE_OK; 378 + } 379 + /* .. and it has a C-state */ 380 + __set_bit(acpi_id, acpi_id_cst_present); 381 + 382 + return AE_OK; 383 + } 384 + static int __init check_acpi_ids(struct acpi_processor *pr_backup) 385 + { 386 + 387 + if (!pr_backup) 388 + return -ENODEV; 389 + 390 + /* All online CPUs have been processed at this stage. Now verify 391 + * whether in fact "online CPUs" == physical CPUs. 392 + */ 393 + acpi_id_present = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL); 394 + if (!acpi_id_present) 395 + return -ENOMEM; 396 + 397 + acpi_id_cst_present = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL); 398 + if (!acpi_id_cst_present) { 399 + kfree(acpi_id_present); 400 + return -ENOMEM; 401 + } 402 + 403 + acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 404 + ACPI_UINT32_MAX, 405 + read_acpi_id, NULL, NULL, NULL); 406 + acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL); 407 + 408 + if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) { 409 + unsigned int i; 410 + for_each_set_bit(i, acpi_id_present, nr_acpi_bits) { 411 + pr_backup->acpi_id = i; 412 + /* Mask out C-states if there are no _CST or PBLK */ 413 + pr_backup->flags.power = test_bit(i, acpi_id_cst_present); 414 + (void)upload_pm_data(pr_backup); 415 + } 416 + } 417 + kfree(acpi_id_present); 418 + acpi_id_present = NULL; 419 + kfree(acpi_id_cst_present); 420 + acpi_id_cst_present = NULL; 421 + return 0; 422 + } 423 + static int __init check_prereq(void) 424 + { 425 + struct cpuinfo_x86 *c = &cpu_data(0); 426 + 427 + if (!xen_initial_domain()) 428 + return -ENODEV; 429 + 430 + if (!acpi_gbl_FADT.smi_command) 431 + return -ENODEV; 432 + 433 + if (c->x86_vendor == X86_VENDOR_INTEL) { 434 + if (!cpu_has(c, X86_FEATURE_EST)) 435 + return -ENODEV; 436 + 437 + return 0; 438 + } 439 + if (c->x86_vendor == X86_VENDOR_AMD) { 440 + /* Copied from powernow-k8.h, can't include ../cpufreq/powernow 441 + * as we get compile warnings for the static functions. 442 + */ 443 + #define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 444 + #define USE_HW_PSTATE 0x00000080 445 + u32 eax, ebx, ecx, edx; 446 + cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); 447 + if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE) 448 + return -ENODEV; 449 + return 0; 450 + } 451 + return -ENODEV; 452 + } 453 + /* acpi_perf_data is a pointer to percpu data. */ 454 + static struct acpi_processor_performance __percpu *acpi_perf_data; 455 + 456 + static void free_acpi_perf_data(void) 457 + { 458 + unsigned int i; 459 + 460 + /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ 461 + for_each_possible_cpu(i) 462 + free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) 463 + ->shared_cpu_map); 464 + free_percpu(acpi_perf_data); 465 + } 466 + 467 + static int __init xen_acpi_processor_init(void) 468 + { 469 + struct acpi_processor *pr_backup = NULL; 470 + unsigned int i; 471 + int rc = check_prereq(); 472 + 473 + if (rc) 474 + return rc; 475 + 476 + nr_acpi_bits = get_max_acpi_id() + 1; 477 + acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL); 478 + if (!acpi_ids_done) 479 + return -ENOMEM; 480 + 481 + acpi_perf_data = alloc_percpu(struct acpi_processor_performance); 482 + if (!acpi_perf_data) { 483 + pr_debug(DRV_NAME "Memory allocation error for acpi_perf_data.\n"); 484 + kfree(acpi_ids_done); 485 + return -ENOMEM; 486 + } 487 + for_each_possible_cpu(i) { 488 + if (!zalloc_cpumask_var_node( 489 + &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, 490 + GFP_KERNEL, cpu_to_node(i))) { 491 + rc = -ENOMEM; 492 + goto err_out; 493 + } 494 + } 495 + 496 + /* Do initialization in ACPI core. It is OK to fail here. */ 497 + (void)acpi_processor_preregister_performance(acpi_perf_data); 498 + 499 + for_each_possible_cpu(i) { 500 + struct acpi_processor_performance *perf; 501 + 502 + perf = per_cpu_ptr(acpi_perf_data, i); 503 + rc = acpi_processor_register_performance(perf, i); 504 + if (WARN_ON(rc)) 505 + goto err_out; 506 + } 507 + rc = acpi_processor_notify_smm(THIS_MODULE); 508 + if (WARN_ON(rc)) 509 + goto err_unregister; 510 + 511 + for_each_possible_cpu(i) { 512 + struct acpi_processor *_pr; 513 + _pr = per_cpu(processors, i /* APIC ID */); 514 + if (!_pr) 515 + continue; 516 + 517 + if (!pr_backup) { 518 + pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); 519 + memcpy(pr_backup, _pr, sizeof(struct acpi_processor)); 520 + } 521 + (void)upload_pm_data(_pr); 522 + } 523 + rc = check_acpi_ids(pr_backup); 524 + if (rc) 525 + goto err_unregister; 526 + 527 + kfree(pr_backup); 528 + 529 + return 0; 530 + err_unregister: 531 + for_each_possible_cpu(i) { 532 + struct acpi_processor_performance *perf; 533 + perf = per_cpu_ptr(acpi_perf_data, i); 534 + acpi_processor_unregister_performance(perf, i); 535 + } 536 + err_out: 537 + /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ 538 + free_acpi_perf_data(); 539 + kfree(acpi_ids_done); 540 + return rc; 541 + } 542 + static void __exit xen_acpi_processor_exit(void) 543 + { 544 + int i; 545 + 546 + kfree(acpi_ids_done); 547 + for_each_possible_cpu(i) { 548 + struct acpi_processor_performance *perf; 549 + perf = per_cpu_ptr(acpi_perf_data, i); 550 + acpi_processor_unregister_performance(perf, i); 551 + } 552 + free_acpi_perf_data(); 553 + } 554 + 555 + MODULE_AUTHOR("Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>"); 556 + MODULE_DESCRIPTION("Xen ACPI Processor P-states (and Cx) driver which uploads PM data to Xen hypervisor"); 557 + MODULE_LICENSE("GPL"); 558 + 559 + /* We want to be loaded before the CPU freq scaling drivers are loaded. 560 + * They are loaded in late_initcall. */ 561 + device_initcall(xen_acpi_processor_init); 562 + module_exit(xen_acpi_processor_exit);
+1 -1
drivers/xen/xen-balloon.c
··· 207 207 NULL 208 208 }; 209 209 210 - static struct attribute_group balloon_info_group = { 210 + static const struct attribute_group balloon_info_group = { 211 211 .name = "info", 212 212 .attrs = balloon_info_attrs 213 213 };
+38 -3
drivers/xen/xen-pciback/pci_stub.c
··· 85 85 static void pcistub_device_release(struct kref *kref) 86 86 { 87 87 struct pcistub_device *psdev; 88 + struct xen_pcibk_dev_data *dev_data; 88 89 89 90 psdev = container_of(kref, struct pcistub_device, kref); 91 + dev_data = pci_get_drvdata(psdev->dev); 90 92 91 93 dev_dbg(&psdev->dev->dev, "pcistub_device_release\n"); 92 94 93 95 xen_unregister_device_domain_owner(psdev->dev); 94 96 95 - /* Clean-up the device */ 97 + /* Call the reset function which does not take lock as this 98 + * is called from "unbind" which takes a device_lock mutex. 99 + */ 100 + __pci_reset_function_locked(psdev->dev); 101 + if (pci_load_and_free_saved_state(psdev->dev, 102 + &dev_data->pci_saved_state)) { 103 + dev_dbg(&psdev->dev->dev, "Could not reload PCI state\n"); 104 + } else 105 + pci_restore_state(psdev->dev); 106 + 107 + /* Disable the device */ 96 108 xen_pcibk_reset_device(psdev->dev); 109 + 110 + kfree(dev_data); 111 + pci_set_drvdata(psdev->dev, NULL); 112 + 113 + /* Clean-up the device */ 97 114 xen_pcibk_config_free_dyn_fields(psdev->dev); 98 115 xen_pcibk_config_free_dev(psdev->dev); 99 - kfree(pci_get_drvdata(psdev->dev)); 100 - pci_set_drvdata(psdev->dev, NULL); 101 116 102 117 psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; 103 118 pci_dev_put(psdev->dev); ··· 246 231 /* Cleanup our device 247 232 * (so it's ready for the next domain) 248 233 */ 234 + 235 + /* This is OK - we are running from workqueue context 236 + * and want to inhibit the user from fiddling with 'reset' 237 + */ 238 + pci_reset_function(dev); 239 + pci_restore_state(psdev->dev); 240 + 241 + /* This disables the device. */ 249 242 xen_pcibk_reset_device(found_psdev->dev); 243 + 244 + /* And cleanup up our emulated fields. */ 250 245 xen_pcibk_config_free_dyn_fields(found_psdev->dev); 251 246 xen_pcibk_config_reset_dev(found_psdev->dev); 252 247 ··· 352 327 err = pci_enable_device(dev); 353 328 if (err) 354 329 goto config_release; 330 + 331 + dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n"); 332 + __pci_reset_function_locked(dev); 333 + 334 + /* We need the device active to save the state. */ 335 + dev_dbg(&dev->dev, "save state of device\n"); 336 + pci_save_state(dev); 337 + dev_data->pci_saved_state = pci_store_saved_state(dev); 338 + if (!dev_data->pci_saved_state) 339 + dev_err(&dev->dev, "Could not store PCI conf saved state!\n"); 355 340 356 341 /* Now disable the device (this also ensures some private device 357 342 * data is setup before we export)
+1
drivers/xen/xen-pciback/pciback.h
··· 41 41 42 42 struct xen_pcibk_dev_data { 43 43 struct list_head config_fields; 44 + struct pci_saved_state *pci_saved_state; 44 45 unsigned int permissive:1; 45 46 unsigned int warned_on_write:1; 46 47 unsigned int enable_intx:1;
+1 -1
drivers/xen/xen-selfballoon.c
··· 488 488 NULL 489 489 }; 490 490 491 - static struct attribute_group selfballoon_group = { 491 + static const struct attribute_group selfballoon_group = { 492 492 .name = "selfballoon", 493 493 .attrs = selfballoon_attrs 494 494 };
+3 -3
drivers/xen/xenbus/xenbus_client.c
··· 569 569 { 570 570 struct gnttab_map_grant_ref op; 571 571 572 - gnttab_set_map_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, gnt_ref, 572 + gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, 573 573 dev->otherend_id); 574 574 575 575 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) ··· 662 662 goto found; 663 663 } 664 664 } 665 - node = NULL; 665 + node = addr = NULL; 666 666 found: 667 667 spin_unlock(&xenbus_valloc_lock); 668 668 ··· 698 698 { 699 699 struct gnttab_unmap_grant_ref op; 700 700 701 - gnttab_set_unmap_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, handle); 701 + gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); 702 702 703 703 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) 704 704 BUG();
+2 -1
drivers/xen/xenbus/xenbus_probe.c
··· 257 257 DPRINTK("%s", dev->nodename); 258 258 259 259 free_otherend_watch(dev); 260 - free_otherend_details(dev); 261 260 262 261 if (drv->remove) 263 262 drv->remove(dev); 263 + 264 + free_otherend_details(dev); 264 265 265 266 xenbus_switch_state(dev, XenbusStateClosed); 266 267 return 0;
+6
drivers/xen/xenbus/xenbus_probe_frontend.c
··· 53 53 char *nodename; 54 54 int err; 55 55 56 + /* ignore console/0 */ 57 + if (!strncmp(type, "console", 7) && !strncmp(name, "0", 1)) { 58 + DPRINTK("Ignoring buggy device entry console/0"); 59 + return 0; 60 + } 61 + 56 62 nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name); 57 63 if (!nodename) 58 64 return -ENOMEM;
+1
include/linux/pci.h
··· 817 817 int pcie_get_mps(struct pci_dev *dev); 818 818 int pcie_set_mps(struct pci_dev *dev, int mps); 819 819 int __pci_reset_function(struct pci_dev *dev); 820 + int __pci_reset_function_locked(struct pci_dev *dev); 820 821 int pci_reset_function(struct pci_dev *dev); 821 822 void pci_update_resource(struct pci_dev *dev, int resno); 822 823 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
+5 -1
include/xen/interface/hvm/params.h
··· 90 90 /* Boolean: Enable aligning all periodic vpts to reduce interrupts */ 91 91 #define HVM_PARAM_VPT_ALIGN 16 92 92 93 - #define HVM_NR_PARAMS 17 93 + /* Console debug shared memory ring and event channel */ 94 + #define HVM_PARAM_CONSOLE_PFN 17 95 + #define HVM_PARAM_CONSOLE_EVTCHN 18 96 + 97 + #define HVM_NR_PARAMS 19 94 98 95 99 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
+7
include/xen/interface/physdev.h
··· 145 145 uint8_t devfn; 146 146 }; 147 147 148 + #define PHYSDEVOP_restore_msi 19 149 + struct physdev_restore_msi { 150 + /* IN */ 151 + uint8_t bus; 152 + uint8_t devfn; 153 + }; 154 + 148 155 #define PHYSDEVOP_manage_pci_add_ext 20 149 156 struct physdev_manage_pci_ext { 150 157 /* IN */
+19 -1
include/xen/interface/platform.h
··· 200 200 #define XEN_PM_CX 0 201 201 #define XEN_PM_PX 1 202 202 #define XEN_PM_TX 2 203 - 203 + #define XEN_PM_PDC 3 204 204 /* Px sub info type */ 205 205 #define XEN_PX_PCT 1 206 206 #define XEN_PX_PSS 2 ··· 293 293 union { 294 294 struct xen_processor_power power;/* Cx: _CST/_CSD */ 295 295 struct xen_processor_performance perf; /* Px: _PPC/_PCT/_PSS/_PSD */ 296 + GUEST_HANDLE(uint32_t) pdc; 296 297 }; 297 298 }; 298 299 DEFINE_GUEST_HANDLE_STRUCT(xenpf_set_processor_pminfo); 300 + 301 + #define XENPF_get_cpuinfo 55 302 + struct xenpf_pcpuinfo { 303 + /* IN */ 304 + uint32_t xen_cpuid; 305 + /* OUT */ 306 + /* The maxium cpu_id that is present */ 307 + uint32_t max_present; 308 + #define XEN_PCPU_FLAGS_ONLINE 1 309 + /* Correponding xen_cpuid is not present*/ 310 + #define XEN_PCPU_FLAGS_INVALID 2 311 + uint32_t flags; 312 + uint32_t apic_id; 313 + uint32_t acpi_id; 314 + }; 315 + DEFINE_GUEST_HANDLE_STRUCT(xenpf_pcpuinfo); 299 316 300 317 struct xen_platform_op { 301 318 uint32_t cmd; ··· 329 312 struct xenpf_change_freq change_freq; 330 313 struct xenpf_getidletime getidletime; 331 314 struct xenpf_set_processor_pminfo set_pminfo; 315 + struct xenpf_pcpuinfo pcpu_info; 332 316 uint8_t pad[128]; 333 317 } u; 334 318 };