Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'stable/for-linus-3.12-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull Xen fixes from Konrad Rzeszutek Wilk:
"Bug-fixes and one update to the kernel-paramters.txt documentation.

- Fix PV spinlocks triggering jump_label code bug
- Remove extraneous code in the tpm front driver
- Fix ballooning out of pages when non-preemptible
- Fix deadlock when using a 32-bit initial domain with large amount
of memory
- Add xen_nopvpsin parameter to the documentation"

* tag 'stable/for-linus-3.12-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
xen/spinlock: Document the xen_nopvspin parameter.
xen/p2m: check MFN is in range before using the m2p table
xen/balloon: don't alloc page while non-preemptible
xen: Do not enable spinlocks before jump_label_init() has executed
tpm: xen-tpmfront: Remove the locality sysfs attribute
tpm: xen-tpmfront: Fix default durations

+63 -67
+4
Documentation/kernel-parameters.txt
··· 3485 3485 the unplug protocol 3486 3486 never -- do not unplug even if version check succeeds 3487 3487 3488 + xen_nopvspin [X86,XEN] 3489 + Disables the ticketlock slowpath using Xen PV 3490 + optimizations. 3491 + 3488 3492 xirc2ps_cs= [NET,PCMCIA] 3489 3493 Format: 3490 3494 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
+20 -11
arch/x86/include/asm/xen/page.h
··· 79 79 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY; 80 80 } 81 81 82 - static inline unsigned long mfn_to_pfn(unsigned long mfn) 82 + static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn) 83 83 { 84 84 unsigned long pfn; 85 - int ret = 0; 85 + int ret; 86 86 87 87 if (xen_feature(XENFEAT_auto_translated_physmap)) 88 88 return mfn; 89 89 90 - if (unlikely(mfn >= machine_to_phys_nr)) { 91 - pfn = ~0; 92 - goto try_override; 93 - } 94 - pfn = 0; 90 + if (unlikely(mfn >= machine_to_phys_nr)) 91 + return ~0; 92 + 95 93 /* 96 94 * The array access can fail (e.g., device space beyond end of RAM). 97 95 * In such cases it doesn't matter what we return (we return garbage), 98 96 * but we must handle the fault without crashing! 99 97 */ 100 98 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 101 - try_override: 102 - /* ret might be < 0 if there are no entries in the m2p for mfn */ 103 99 if (ret < 0) 104 - pfn = ~0; 105 - else if (get_phys_to_machine(pfn) != mfn) 100 + return ~0; 101 + 102 + return pfn; 103 + } 104 + 105 + static inline unsigned long mfn_to_pfn(unsigned long mfn) 106 + { 107 + unsigned long pfn; 108 + 109 + if (xen_feature(XENFEAT_auto_translated_physmap)) 110 + return mfn; 111 + 112 + pfn = mfn_to_pfn_no_overrides(mfn); 113 + if (get_phys_to_machine(pfn) != mfn) { 106 114 /* 107 115 * If this appears to be a foreign mfn (because the pfn 108 116 * doesn't map back to the mfn), then check the local override ··· 119 111 * m2p_find_override_pfn returns ~0 if it doesn't find anything. 120 112 */ 121 113 pfn = m2p_find_override_pfn(mfn, ~0); 114 + } 122 115 123 116 /* 124 117 * pfn is ~0 if there are no entries in the m2p for mfn or if the
+4 -6
arch/x86/xen/p2m.c
··· 879 879 unsigned long uninitialized_var(address); 880 880 unsigned level; 881 881 pte_t *ptep = NULL; 882 - int ret = 0; 883 882 884 883 pfn = page_to_pfn(page); 885 884 if (!PageHighMem(page)) { ··· 925 926 * frontend pages while they are being shared with the backend, 926 927 * because mfn_to_pfn (that ends up being called by GUPF) will 927 928 * return the backend pfn rather than the frontend pfn. */ 928 - ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 929 - if (ret == 0 && get_phys_to_machine(pfn) == mfn) 929 + pfn = mfn_to_pfn_no_overrides(mfn); 930 + if (get_phys_to_machine(pfn) == mfn) 930 931 set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); 931 932 932 933 return 0; ··· 941 942 unsigned long uninitialized_var(address); 942 943 unsigned level; 943 944 pte_t *ptep = NULL; 944 - int ret = 0; 945 945 946 946 pfn = page_to_pfn(page); 947 947 mfn = get_phys_to_machine(pfn); ··· 1027 1029 * the original pfn causes mfn_to_pfn(mfn) to return the frontend 1028 1030 * pfn again. */ 1029 1031 mfn &= ~FOREIGN_FRAME_BIT; 1030 - ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 1031 - if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && 1032 + pfn = mfn_to_pfn_no_overrides(mfn); 1033 + if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && 1032 1034 m2p_find_override(mfn) == NULL) 1033 1035 set_phys_to_machine(pfn, mfn); 1034 1036
+24 -2
arch/x86/xen/spinlock.c
··· 259 259 } 260 260 261 261 262 + /* 263 + * Our init of PV spinlocks is split in two init functions due to us 264 + * using paravirt patching and jump labels patching and having to do 265 + * all of this before SMP code is invoked. 266 + * 267 + * The paravirt patching needs to be done _before_ the alternative asm code 268 + * is started, otherwise we would not patch the core kernel code. 269 + */ 262 270 void __init xen_init_spinlocks(void) 263 271 { 264 272 ··· 275 267 return; 276 268 } 277 269 278 - static_key_slow_inc(&paravirt_ticketlocks_enabled); 279 - 280 270 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); 281 271 pv_lock_ops.unlock_kick = xen_unlock_kick; 282 272 } 273 + 274 + /* 275 + * While the jump_label init code needs to happend _after_ the jump labels are 276 + * enabled and before SMP is started. Hence we use pre-SMP initcall level 277 + * init. We cannot do it in xen_init_spinlocks as that is done before 278 + * jump labels are activated. 279 + */ 280 + static __init int xen_init_spinlocks_jump(void) 281 + { 282 + if (!xen_pvspin) 283 + return 0; 284 + 285 + static_key_slow_inc(&paravirt_ticketlocks_enabled); 286 + return 0; 287 + } 288 + early_initcall(xen_init_spinlocks_jump); 283 289 284 290 static __init int xen_parse_nopvspin(char *arg) 285 291 {
-36
drivers/char/tpm/xen-tpmfront.c
··· 142 142 return length; 143 143 } 144 144 145 - ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr, 146 - char *buf) 147 - { 148 - struct tpm_chip *chip = dev_get_drvdata(dev); 149 - struct tpm_private *priv = TPM_VPRIV(chip); 150 - u8 locality = priv->shr->locality; 151 - 152 - return sprintf(buf, "%d\n", locality); 153 - } 154 - 155 - ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr, 156 - const char *buf, size_t len) 157 - { 158 - struct tpm_chip *chip = dev_get_drvdata(dev); 159 - struct tpm_private *priv = TPM_VPRIV(chip); 160 - u8 val; 161 - 162 - int rv = kstrtou8(buf, 0, &val); 163 - if (rv) 164 - return rv; 165 - 166 - priv->shr->locality = val; 167 - 168 - return len; 169 - } 170 - 171 145 static const struct file_operations vtpm_ops = { 172 146 .owner = THIS_MODULE, 173 147 .llseek = no_llseek, ··· 162 188 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 163 189 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); 164 190 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); 165 - static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality, 166 - tpm_store_locality); 167 191 168 192 static struct attribute *vtpm_attrs[] = { 169 193 &dev_attr_pubek.attr, ··· 174 202 &dev_attr_cancel.attr, 175 203 &dev_attr_durations.attr, 176 204 &dev_attr_timeouts.attr, 177 - &dev_attr_locality.attr, 178 205 NULL, 179 206 }; 180 207 181 208 static struct attribute_group vtpm_attr_grp = { 182 209 .attrs = vtpm_attrs, 183 210 }; 184 - 185 - #define TPM_LONG_TIMEOUT (10 * 60 * HZ) 186 211 187 212 static const struct tpm_vendor_specific tpm_vtpm = { 188 213 .status = vtpm_status, ··· 192 223 .attr_group = &vtpm_attr_grp, 193 224 .miscdev = { 194 225 .fops = &vtpm_ops, 195 - }, 196 - .duration = { 197 - TPM_LONG_TIMEOUT, 198 - TPM_LONG_TIMEOUT, 199 - TPM_LONG_TIMEOUT, 200 226 }, 201 227 }; 202 228
+11 -12
drivers/xen/balloon.c
··· 398 398 if (nr_pages > ARRAY_SIZE(frame_list)) 399 399 nr_pages = ARRAY_SIZE(frame_list); 400 400 401 - scratch_page = get_balloon_scratch_page(); 402 - 403 401 for (i = 0; i < nr_pages; i++) { 404 402 page = alloc_page(gfp); 405 403 if (page == NULL) { ··· 411 413 412 414 scrub_page(page); 413 415 416 + /* 417 + * Ballooned out frames are effectively replaced with 418 + * a scratch frame. Ensure direct mappings and the 419 + * p2m are consistent. 420 + */ 421 + scratch_page = get_balloon_scratch_page(); 414 422 #ifdef CONFIG_XEN_HAVE_PVMMU 415 423 if (xen_pv_domain() && !PageHighMem(page)) { 416 424 ret = HYPERVISOR_update_va_mapping( ··· 426 422 BUG_ON(ret); 427 423 } 428 424 #endif 429 - } 430 - 431 - /* Ensure that ballooned highmem pages don't have kmaps. */ 432 - kmap_flush_unused(); 433 - flush_tlb_all(); 434 - 435 - /* No more mappings: invalidate P2M and add to balloon. */ 436 - for (i = 0; i < nr_pages; i++) { 437 - pfn = mfn_to_pfn(frame_list[i]); 438 425 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 439 426 unsigned long p; 440 427 p = page_to_pfn(scratch_page); 441 428 __set_phys_to_machine(pfn, pfn_to_mfn(p)); 442 429 } 430 + put_balloon_scratch_page(); 431 + 443 432 balloon_append(pfn_to_page(pfn)); 444 433 } 445 434 446 - put_balloon_scratch_page(); 435 + /* Ensure that ballooned highmem pages don't have kmaps. */ 436 + kmap_flush_unused(); 437 + flush_tlb_all(); 447 438 448 439 set_xen_guest_handle(reservation.extent_start, frame_list); 449 440 reservation.nr_extents = nr_pages;