Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'upstream/xenfs' and 'upstream/core' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen

* 'upstream/xenfs' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen:
xen/privcmd: make privcmd visible in domU
xen/privcmd: move remap_domain_mfn_range() to core xen code and export.
privcmd: MMAPBATCH: Fix error handling/reporting
xenbus: export xen_store_interface for xenfs
xen/privcmd: make sure vma is ours before doing anything to it
xen/privcmd: print SIGBUS faults
xen/xenfs: set_page_dirty is supposed to return true if it dirties
xen/privcmd: create address space to allow writable mmaps
xen: add privcmd driver
xen: add variable hypercall caller
xen: add xen_set_domain_pte()
xen: add /proc/xen/xsd_{kva,port} to xenfs

* 'upstream/core' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen: (29 commits)
xen: include xen/xen.h for definition of xen_initial_domain()
xen: use host E820 map for dom0
xen: correctly rebuild mfn list list after migration.
xen: improvements to VIRQ_DEBUG output
xen: set up IRQ before binding virq to evtchn
xen: ensure that all event channels start off bound to VCPU 0
xen/hvc: only notify if we actually sent something
xen: don't add extra_pages for RAM after mem_end
xen: add support for PAT
xen: make sure xen_max_p2m_pfn is up to date
xen: limit extra memory to a certain ratio of base
xen: add extra pages for E820 RAM regions, even if beyond mem_end
xen: make sure xen_extra_mem_start is beyond all non-RAM e820
xen: implement "extra" memory to reserve space for pages not present at boot
xen: Use host-provided E820 map
xen: don't map missing memory
xen: defer building p2m mfn structures until kernel is mapped
xen: add return value to set_phys_to_machine()
xen: convert p2m to a 3 level tree
xen: make install_p2mtop_page() static
...

Fix up trivial conflict in arch/x86/xen/mmu.c, and fix the use of
'reserve_early()' - in the new memblock world order it is now
'memblock_x86_reserve_range()' instead. Pointed out by Jeremy.

+1334 -139
+17
arch/x86/include/asm/xen/hypercall.h
··· 200 200 (type)__res; \ 201 201 }) 202 202 203 + static inline long 204 + privcmd_call(unsigned call, 205 + unsigned long a1, unsigned long a2, 206 + unsigned long a3, unsigned long a4, 207 + unsigned long a5) 208 + { 209 + __HYPERCALL_DECLS; 210 + __HYPERCALL_5ARG(a1, a2, a3, a4, a5); 211 + 212 + asm volatile("call *%[call]" 213 + : __HYPERCALL_5PARAM 214 + : [call] "a" (&hypercall_page[call]) 215 + : __HYPERCALL_CLOBBER5); 216 + 217 + return (long)__res; 218 + } 219 + 203 220 static inline int 204 221 HYPERVISOR_set_trap_table(struct trap_info *table) 205 222 {
+10 -2
arch/x86/include/asm/xen/page.h
··· 37 37 38 38 39 39 extern unsigned long get_phys_to_machine(unsigned long pfn); 40 - extern void set_phys_to_machine(unsigned long pfn, unsigned long mfn); 40 + extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); 41 41 42 42 static inline unsigned long pfn_to_mfn(unsigned long pfn) 43 43 { 44 + unsigned long mfn; 45 + 44 46 if (xen_feature(XENFEAT_auto_translated_physmap)) 45 47 return pfn; 46 48 47 - return get_phys_to_machine(pfn) & ~FOREIGN_FRAME_BIT; 49 + mfn = get_phys_to_machine(pfn); 50 + 51 + if (mfn != INVALID_P2M_ENTRY) 52 + mfn &= ~FOREIGN_FRAME_BIT; 53 + 54 + return mfn; 48 55 } 49 56 50 57 static inline int phys_to_machine_mapping_valid(unsigned long pfn) ··· 166 159 167 160 #define pgd_val_ma(x) ((x).pgd) 168 161 162 + void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid); 169 163 170 164 xmaddr_t arbitrary_virt_to_machine(void *address); 171 165 unsigned long arbitrary_virt_to_mfn(void *vaddr);
+4 -7
arch/x86/xen/Kconfig
··· 19 19 depends on X86_LOCAL_APIC 20 20 21 21 config XEN_MAX_DOMAIN_MEMORY 22 - int "Maximum allowed size of a domain in gigabytes" 23 - default 8 if X86_32 24 - default 32 if X86_64 22 + int 23 + default 128 25 24 depends on XEN 26 25 help 27 - The pseudo-physical to machine address array is sized 28 - according to the maximum possible memory size of a Xen 29 - domain. This array uses 1 page per gigabyte, so there's no 30 - need to be too stingy here. 26 + This only affects the sizing of some bss arrays, the unused 27 + portions of which are freed. 31 28 32 29 config XEN_SAVE_RESTORE 33 30 bool
+8 -8
arch/x86/xen/enlighten.c
··· 136 136 info.mfn = arbitrary_virt_to_mfn(vcpup); 137 137 info.offset = offset_in_page(vcpup); 138 138 139 - printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n", 140 - cpu, vcpup, info.mfn, info.offset); 141 - 142 139 /* Check to see if the hypervisor will put the vcpu_info 143 140 structure where we want it, which allows direct access via 144 141 a percpu-variable. */ ··· 149 152 /* This cpu is using the registered vcpu info, even if 150 153 later ones fail to. */ 151 154 per_cpu(xen_vcpu, cpu) = vcpup; 152 - 153 - printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n", 154 - cpu, vcpup); 155 155 } 156 156 } 157 157 ··· 830 836 Xen console noise. */ 831 837 break; 832 838 839 + case MSR_IA32_CR_PAT: 840 + if (smp_processor_id() == 0) 841 + xen_set_pat(((u64)high << 32) | low); 842 + break; 843 + 833 844 default: 834 845 ret = native_write_msr_safe(msr, low, high); 835 846 } ··· 873 874 /* xen_vcpu_setup managed to place the vcpu_info within the 874 875 percpu area for all cpus, so make use of it */ 875 876 if (have_vcpu_info_placement) { 876 - printk(KERN_INFO "Xen: using vcpu_info placement\n"); 877 - 878 877 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); 879 878 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct); 880 879 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); ··· 1185 1188 1186 1189 xen_raw_console_write("mapping kernel into physical memory\n"); 1187 1190 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); 1191 + 1192 + /* Allocate and initialize top and mid mfn levels for p2m structure */ 1193 + xen_build_mfn_list_list(); 1188 1194 1189 1195 init_mm.pgd = pgd; 1190 1196
+428 -79
arch/x86/xen/mmu.c
··· 57 57 #include <asm/linkage.h> 58 58 #include <asm/page.h> 59 59 #include <asm/init.h> 60 + #include <asm/pat.h> 60 61 61 62 #include <asm/xen/hypercall.h> 62 63 #include <asm/xen/hypervisor.h> ··· 141 140 * large enough to allocate page table pages to allocate the rest. 142 141 * Each page can map 2MB. 143 142 */ 144 - static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss; 143 + #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4) 144 + static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES); 145 145 146 146 #ifdef CONFIG_X86_64 147 147 /* l3 pud for userspace vsyscall mapping */ ··· 173 171 */ 174 172 #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK) 175 173 174 + /* 175 + * Xen leaves the responsibility for maintaining p2m mappings to the 176 + * guests themselves, but it must also access and update the p2m array 177 + * during suspend/resume when all the pages are reallocated. 178 + * 179 + * The p2m table is logically a flat array, but we implement it as a 180 + * three-level tree to allow the address space to be sparse. 181 + * 182 + * Xen 183 + * | 184 + * p2m_top p2m_top_mfn 185 + * / \ / \ 186 + * p2m_mid p2m_mid p2m_mid_mfn p2m_mid_mfn 187 + * / \ / \ / / 188 + * p2m p2m p2m p2m p2m p2m p2m ... 189 + * 190 + * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p. 191 + * 192 + * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the 193 + * maximum representable pseudo-physical address space is: 194 + * P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages 195 + * 196 + * P2M_PER_PAGE depends on the architecture, as a mfn is always 197 + * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to 198 + * 512 and 1024 entries respectively. 199 + */ 176 200 177 - #define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) 178 - #define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE) 201 + unsigned long xen_max_p2m_pfn __read_mostly; 179 202 180 - /* Placeholder for holes in the address space */ 181 - static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data = 182 - { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL }; 203 + #define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) 204 + #define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *)) 205 + #define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **)) 183 206 184 - /* Array of pointers to pages containing p2m entries */ 185 - static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data = 186 - { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] }; 207 + #define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE) 187 208 188 - /* Arrays of p2m arrays expressed in mfns used for save/restore */ 189 - static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss; 209 + /* Placeholders for holes in the address space */ 210 + static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE); 211 + static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE); 212 + static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE); 190 213 191 - static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE] 192 - __page_aligned_bss; 214 + static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE); 215 + static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE); 216 + static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); 217 + 218 + RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); 219 + RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); 193 220 194 221 static inline unsigned p2m_top_index(unsigned long pfn) 195 222 { 196 - BUG_ON(pfn >= MAX_DOMAIN_PAGES); 197 - return pfn / P2M_ENTRIES_PER_PAGE; 223 + BUG_ON(pfn >= MAX_P2M_PFN); 224 + return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE); 225 + } 226 + 227 + static inline unsigned p2m_mid_index(unsigned long pfn) 228 + { 229 + return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE; 198 230 } 199 231 200 232 static inline unsigned p2m_index(unsigned long pfn) 201 233 { 202 - return pfn % P2M_ENTRIES_PER_PAGE; 234 + return pfn % P2M_PER_PAGE; 203 235 } 204 236 205 - /* Build the parallel p2m_top_mfn structures */ 237 + static void p2m_top_init(unsigned long ***top) 238 + { 239 + unsigned i; 240 + 241 + for (i = 0; i < P2M_TOP_PER_PAGE; i++) 242 + top[i] = p2m_mid_missing; 243 + } 244 + 245 + static void p2m_top_mfn_init(unsigned long *top) 246 + { 247 + unsigned i; 248 + 249 + for (i = 0; i < P2M_TOP_PER_PAGE; i++) 250 + top[i] = virt_to_mfn(p2m_mid_missing_mfn); 251 + } 252 + 253 + static void p2m_top_mfn_p_init(unsigned long **top) 254 + { 255 + unsigned i; 256 + 257 + for (i = 0; i < P2M_TOP_PER_PAGE; i++) 258 + top[i] = p2m_mid_missing_mfn; 259 + } 260 + 261 + static void p2m_mid_init(unsigned long **mid) 262 + { 263 + unsigned i; 264 + 265 + for (i = 0; i < P2M_MID_PER_PAGE; i++) 266 + mid[i] = p2m_missing; 267 + } 268 + 269 + static void p2m_mid_mfn_init(unsigned long *mid) 270 + { 271 + unsigned i; 272 + 273 + for (i = 0; i < P2M_MID_PER_PAGE; i++) 274 + mid[i] = virt_to_mfn(p2m_missing); 275 + } 276 + 277 + static void p2m_init(unsigned long *p2m) 278 + { 279 + unsigned i; 280 + 281 + for (i = 0; i < P2M_MID_PER_PAGE; i++) 282 + p2m[i] = INVALID_P2M_ENTRY; 283 + } 284 + 285 + /* 286 + * Build the parallel p2m_top_mfn and p2m_mid_mfn structures 287 + * 288 + * This is called both at boot time, and after resuming from suspend: 289 + * - At boot time we're called very early, and must use extend_brk() 290 + * to allocate memory. 291 + * 292 + * - After resume we're called from within stop_machine, but the mfn 293 + * tree should alreay be completely allocated. 294 + */ 206 295 void xen_build_mfn_list_list(void) 207 296 { 208 - unsigned pfn, idx; 297 + unsigned long pfn; 209 298 210 - for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) { 211 - unsigned topidx = p2m_top_index(pfn); 299 + /* Pre-initialize p2m_top_mfn to be completely missing */ 300 + if (p2m_top_mfn == NULL) { 301 + p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); 302 + p2m_mid_mfn_init(p2m_mid_missing_mfn); 212 303 213 - p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]); 304 + p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); 305 + p2m_top_mfn_p_init(p2m_top_mfn_p); 306 + 307 + p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); 308 + p2m_top_mfn_init(p2m_top_mfn); 309 + } else { 310 + /* Reinitialise, mfn's all change after migration */ 311 + p2m_mid_mfn_init(p2m_mid_missing_mfn); 214 312 } 215 313 216 - for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) { 217 - unsigned topidx = idx * P2M_ENTRIES_PER_PAGE; 218 - p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]); 314 + for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) { 315 + unsigned topidx = p2m_top_index(pfn); 316 + unsigned mididx = p2m_mid_index(pfn); 317 + unsigned long **mid; 318 + unsigned long *mid_mfn_p; 319 + 320 + mid = p2m_top[topidx]; 321 + mid_mfn_p = p2m_top_mfn_p[topidx]; 322 + 323 + /* Don't bother allocating any mfn mid levels if 324 + * they're just missing, just update the stored mfn, 325 + * since all could have changed over a migrate. 326 + */ 327 + if (mid == p2m_mid_missing) { 328 + BUG_ON(mididx); 329 + BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); 330 + p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn); 331 + pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE; 332 + continue; 333 + } 334 + 335 + if (mid_mfn_p == p2m_mid_missing_mfn) { 336 + /* 337 + * XXX boot-time only! We should never find 338 + * missing parts of the mfn tree after 339 + * runtime. extend_brk() will BUG if we call 340 + * it too late. 341 + */ 342 + mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); 343 + p2m_mid_mfn_init(mid_mfn_p); 344 + 345 + p2m_top_mfn_p[topidx] = mid_mfn_p; 346 + } 347 + 348 + p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); 349 + mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]); 219 350 } 220 351 } 221 352 ··· 357 222 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); 358 223 359 224 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = 360 - virt_to_mfn(p2m_top_mfn_list); 361 - HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages; 225 + virt_to_mfn(p2m_top_mfn); 226 + HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn; 362 227 } 363 228 364 229 /* Set up p2m_top to point to the domain-builder provided p2m pages */ ··· 366 231 { 367 232 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list; 368 233 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); 369 - unsigned pfn; 234 + unsigned long pfn; 370 235 371 - for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) { 236 + xen_max_p2m_pfn = max_pfn; 237 + 238 + p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); 239 + p2m_init(p2m_missing); 240 + 241 + p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); 242 + p2m_mid_init(p2m_mid_missing); 243 + 244 + p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); 245 + p2m_top_init(p2m_top); 246 + 247 + /* 248 + * The domain builder gives us a pre-constructed p2m array in 249 + * mfn_list for all the pages initially given to us, so we just 250 + * need to graft that into our tree structure. 251 + */ 252 + for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) { 372 253 unsigned topidx = p2m_top_index(pfn); 254 + unsigned mididx = p2m_mid_index(pfn); 373 255 374 - p2m_top[topidx] = &mfn_list[pfn]; 256 + if (p2m_top[topidx] == p2m_mid_missing) { 257 + unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); 258 + p2m_mid_init(mid); 259 + 260 + p2m_top[topidx] = mid; 261 + } 262 + 263 + p2m_top[topidx][mididx] = &mfn_list[pfn]; 375 264 } 376 - 377 - xen_build_mfn_list_list(); 378 265 } 379 266 380 267 unsigned long get_phys_to_machine(unsigned long pfn) 381 268 { 382 - unsigned topidx, idx; 269 + unsigned topidx, mididx, idx; 383 270 384 - if (unlikely(pfn >= MAX_DOMAIN_PAGES)) 271 + if (unlikely(pfn >= MAX_P2M_PFN)) 385 272 return INVALID_P2M_ENTRY; 386 273 387 274 topidx = p2m_top_index(pfn); 275 + mididx = p2m_mid_index(pfn); 388 276 idx = p2m_index(pfn); 389 - return p2m_top[topidx][idx]; 277 + 278 + return p2m_top[topidx][mididx][idx]; 390 279 } 391 280 EXPORT_SYMBOL_GPL(get_phys_to_machine); 392 281 393 - /* install a new p2m_top page */ 394 - bool install_p2mtop_page(unsigned long pfn, unsigned long *p) 282 + static void *alloc_p2m_page(void) 395 283 { 396 - unsigned topidx = p2m_top_index(pfn); 397 - unsigned long **pfnp, *mfnp; 398 - unsigned i; 399 - 400 - pfnp = &p2m_top[topidx]; 401 - mfnp = &p2m_top_mfn[topidx]; 402 - 403 - for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++) 404 - p[i] = INVALID_P2M_ENTRY; 405 - 406 - if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) { 407 - *mfnp = virt_to_mfn(p); 408 - return true; 409 - } 410 - 411 - return false; 284 + return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); 412 285 } 413 286 414 - static void alloc_p2m(unsigned long pfn) 287 + static void free_p2m_page(void *p) 415 288 { 416 - unsigned long *p; 289 + free_page((unsigned long)p); 290 + } 417 291 418 - p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL); 419 - BUG_ON(p == NULL); 292 + /* 293 + * Fully allocate the p2m structure for a given pfn. We need to check 294 + * that both the top and mid levels are allocated, and make sure the 295 + * parallel mfn tree is kept in sync. We may race with other cpus, so 296 + * the new pages are installed with cmpxchg; if we lose the race then 297 + * simply free the page we allocated and use the one that's there. 298 + */ 299 + static bool alloc_p2m(unsigned long pfn) 300 + { 301 + unsigned topidx, mididx; 302 + unsigned long ***top_p, **mid; 303 + unsigned long *top_mfn_p, *mid_mfn; 420 304 421 - if (!install_p2mtop_page(pfn, p)) 422 - free_page((unsigned long)p); 305 + topidx = p2m_top_index(pfn); 306 + mididx = p2m_mid_index(pfn); 307 + 308 + top_p = &p2m_top[topidx]; 309 + mid = *top_p; 310 + 311 + if (mid == p2m_mid_missing) { 312 + /* Mid level is missing, allocate a new one */ 313 + mid = alloc_p2m_page(); 314 + if (!mid) 315 + return false; 316 + 317 + p2m_mid_init(mid); 318 + 319 + if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing) 320 + free_p2m_page(mid); 321 + } 322 + 323 + top_mfn_p = &p2m_top_mfn[topidx]; 324 + mid_mfn = p2m_top_mfn_p[topidx]; 325 + 326 + BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); 327 + 328 + if (mid_mfn == p2m_mid_missing_mfn) { 329 + /* Separately check the mid mfn level */ 330 + unsigned long missing_mfn; 331 + unsigned long mid_mfn_mfn; 332 + 333 + mid_mfn = alloc_p2m_page(); 334 + if (!mid_mfn) 335 + return false; 336 + 337 + p2m_mid_mfn_init(mid_mfn); 338 + 339 + missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); 340 + mid_mfn_mfn = virt_to_mfn(mid_mfn); 341 + if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn) 342 + free_p2m_page(mid_mfn); 343 + else 344 + p2m_top_mfn_p[topidx] = mid_mfn; 345 + } 346 + 347 + if (p2m_top[topidx][mididx] == p2m_missing) { 348 + /* p2m leaf page is missing */ 349 + unsigned long *p2m; 350 + 351 + p2m = alloc_p2m_page(); 352 + if (!p2m) 353 + return false; 354 + 355 + p2m_init(p2m); 356 + 357 + if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing) 358 + free_p2m_page(p2m); 359 + else 360 + mid_mfn[mididx] = virt_to_mfn(p2m); 361 + } 362 + 363 + return true; 423 364 } 424 365 425 366 /* Try to install p2m mapping; fail if intermediate bits missing */ 426 367 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) 427 368 { 428 - unsigned topidx, idx; 369 + unsigned topidx, mididx, idx; 429 370 430 - if (unlikely(pfn >= MAX_DOMAIN_PAGES)) { 371 + if (unlikely(pfn >= MAX_P2M_PFN)) { 431 372 BUG_ON(mfn != INVALID_P2M_ENTRY); 432 373 return true; 433 374 } 434 375 435 376 topidx = p2m_top_index(pfn); 436 - if (p2m_top[topidx] == p2m_missing) { 437 - if (mfn == INVALID_P2M_ENTRY) 438 - return true; 439 - return false; 440 - } 441 - 377 + mididx = p2m_mid_index(pfn); 442 378 idx = p2m_index(pfn); 443 - p2m_top[topidx][idx] = mfn; 379 + 380 + if (p2m_top[topidx][mididx] == p2m_missing) 381 + return mfn == INVALID_P2M_ENTRY; 382 + 383 + p2m_top[topidx][mididx][idx] = mfn; 444 384 445 385 return true; 446 386 } 447 387 448 - void set_phys_to_machine(unsigned long pfn, unsigned long mfn) 388 + bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) 449 389 { 450 390 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { 451 391 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); 452 - return; 392 + return true; 453 393 } 454 394 455 395 if (unlikely(!__set_phys_to_machine(pfn, mfn))) { 456 - alloc_p2m(pfn); 396 + if (!alloc_p2m(pfn)) 397 + return false; 457 398 458 399 if (!__set_phys_to_machine(pfn, mfn)) 459 - BUG(); 400 + return false; 460 401 } 402 + 403 + return true; 461 404 } 462 405 463 406 unsigned long arbitrary_virt_to_mfn(void *vaddr) ··· 612 399 return pte_flags(pte) & _PAGE_IOMAP; 613 400 } 614 401 615 - static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval) 402 + void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) 616 403 { 617 404 struct multicall_space mcs; 618 405 struct mmu_update *u; ··· 624 411 u->ptr = arbitrary_virt_to_machine(ptep).maddr; 625 412 u->val = pte_val_ma(pteval); 626 413 627 - MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_IO); 414 + MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); 628 415 629 416 xen_mc_issue(PARAVIRT_LAZY_MMU); 417 + } 418 + EXPORT_SYMBOL_GPL(xen_set_domain_pte); 419 + 420 + static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval) 421 + { 422 + xen_set_domain_pte(ptep, pteval, DOMID_IO); 630 423 } 631 424 632 425 static void xen_extend_mmu_update(const struct mmu_update *update) ··· 780 561 if (val & _PAGE_PRESENT) { 781 562 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; 782 563 pteval_t flags = val & PTE_FLAGS_MASK; 783 - val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags; 564 + unsigned long mfn = pfn_to_mfn(pfn); 565 + 566 + /* 567 + * If there's no mfn for the pfn, then just create an 568 + * empty non-present pte. Unfortunately this loses 569 + * information about the original pfn, so 570 + * pte_mfn_to_pfn is asymmetric. 571 + */ 572 + if (unlikely(mfn == INVALID_P2M_ENTRY)) { 573 + mfn = 0; 574 + flags = 0; 575 + } 576 + 577 + val = ((pteval_t)mfn << PAGE_SHIFT) | flags; 784 578 } 785 579 786 580 return val; ··· 815 583 816 584 pteval_t xen_pte_val(pte_t pte) 817 585 { 818 - if (xen_initial_domain() && (pte.pte & _PAGE_IOMAP)) 819 - return pte.pte; 586 + pteval_t pteval = pte.pte; 820 587 821 - return pte_mfn_to_pfn(pte.pte); 588 + /* If this is a WC pte, convert back from Xen WC to Linux WC */ 589 + if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { 590 + WARN_ON(!pat_enabled); 591 + pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; 592 + } 593 + 594 + if (xen_initial_domain() && (pteval & _PAGE_IOMAP)) 595 + return pteval; 596 + 597 + return pte_mfn_to_pfn(pteval); 822 598 } 823 599 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); 824 600 ··· 836 596 } 837 597 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); 838 598 599 + /* 600 + * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7 601 + * are reserved for now, to correspond to the Intel-reserved PAT 602 + * types. 603 + * 604 + * We expect Linux's PAT set as follows: 605 + * 606 + * Idx PTE flags Linux Xen Default 607 + * 0 WB WB WB 608 + * 1 PWT WC WT WT 609 + * 2 PCD UC- UC- UC- 610 + * 3 PCD PWT UC UC UC 611 + * 4 PAT WB WC WB 612 + * 5 PAT PWT WC WP WT 613 + * 6 PAT PCD UC- UC UC- 614 + * 7 PAT PCD PWT UC UC UC 615 + */ 616 + 617 + void xen_set_pat(u64 pat) 618 + { 619 + /* We expect Linux to use a PAT setting of 620 + * UC UC- WC WB (ignoring the PAT flag) */ 621 + WARN_ON(pat != 0x0007010600070106ull); 622 + } 623 + 839 624 pte_t xen_make_pte(pteval_t pte) 840 625 { 841 626 phys_addr_t addr = (pte & PTE_PFN_MASK); 627 + 628 + /* If Linux is trying to set a WC pte, then map to the Xen WC. 629 + * If _PAGE_PAT is set, then it probably means it is really 630 + * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope 631 + * things work out OK... 632 + * 633 + * (We should never see kernel mappings with _PAGE_PSE set, 634 + * but we could see hugetlbfs mappings, I think.). 635 + */ 636 + if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) { 637 + if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) 638 + pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; 639 + } 842 640 843 641 /* 844 642 * Unprivileged domains are allowed to do IOMAPpings for ··· 1990 1712 unsigned ident_pte; 1991 1713 unsigned long pfn; 1992 1714 1715 + level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES, 1716 + PAGE_SIZE); 1717 + 1993 1718 ident_pte = 0; 1994 1719 pfn = 0; 1995 1720 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { ··· 2003 1722 pte_page = m2v(pmd[pmdidx].pmd); 2004 1723 else { 2005 1724 /* Check for free pte pages */ 2006 - if (ident_pte == ARRAY_SIZE(level1_ident_pgt)) 1725 + if (ident_pte == LEVEL1_IDENT_ENTRIES) 2007 1726 break; 2008 1727 2009 1728 pte_page = &level1_ident_pgt[ident_pte]; ··· 2118 1837 return pgd; 2119 1838 } 2120 1839 #else /* !CONFIG_X86_64 */ 2121 - static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss; 1840 + static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD); 2122 1841 2123 1842 __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, 2124 1843 unsigned long max_pfn) 2125 1844 { 2126 1845 pmd_t *kernel_pmd; 1846 + 1847 + level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE); 2127 1848 2128 1849 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + 2129 1850 xen_start_info->nr_pt_frames * PAGE_SIZE + ··· 2551 2268 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap; 2552 2269 } 2553 2270 #endif 2271 + 2272 + #define REMAP_BATCH_SIZE 16 2273 + 2274 + struct remap_data { 2275 + unsigned long mfn; 2276 + pgprot_t prot; 2277 + struct mmu_update *mmu_update; 2278 + }; 2279 + 2280 + static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, 2281 + unsigned long addr, void *data) 2282 + { 2283 + struct remap_data *rmd = data; 2284 + pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); 2285 + 2286 + rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr; 2287 + rmd->mmu_update->val = pte_val_ma(pte); 2288 + rmd->mmu_update++; 2289 + 2290 + return 0; 2291 + } 2292 + 2293 + int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 2294 + unsigned long addr, 2295 + unsigned long mfn, int nr, 2296 + pgprot_t prot, unsigned domid) 2297 + { 2298 + struct remap_data rmd; 2299 + struct mmu_update mmu_update[REMAP_BATCH_SIZE]; 2300 + int batch; 2301 + unsigned long range; 2302 + int err = 0; 2303 + 2304 + prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); 2305 + 2306 + vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 2307 + 2308 + rmd.mfn = mfn; 2309 + rmd.prot = prot; 2310 + 2311 + while (nr) { 2312 + batch = min(REMAP_BATCH_SIZE, nr); 2313 + range = (unsigned long)batch << PAGE_SHIFT; 2314 + 2315 + rmd.mmu_update = mmu_update; 2316 + err = apply_to_page_range(vma->vm_mm, addr, range, 2317 + remap_area_mfn_pte_fn, &rmd); 2318 + if (err) 2319 + goto out; 2320 + 2321 + err = -EFAULT; 2322 + if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0) 2323 + goto out; 2324 + 2325 + nr -= batch; 2326 + addr += range; 2327 + } 2328 + 2329 + err = 0; 2330 + out: 2331 + 2332 + flush_tlb_all(); 2333 + 2334 + return err; 2335 + } 2336 + EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 2554 2337 2555 2338 #ifdef CONFIG_XEN_DEBUG_FS 2556 2339
-1
arch/x86/xen/mmu.h
··· 12 12 13 13 14 14 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 15 - bool install_p2mtop_page(unsigned long pfn, unsigned long *p); 16 15 17 16 void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); 18 17
+109 -3
arch/x86/xen/setup.c
··· 18 18 #include <asm/xen/hypervisor.h> 19 19 #include <asm/xen/hypercall.h> 20 20 21 + #include <xen/xen.h> 21 22 #include <xen/page.h> 22 23 #include <xen/interface/callback.h> 24 + #include <xen/interface/memory.h> 23 25 #include <xen/interface/physdev.h> 24 26 #include <xen/interface/memory.h> 25 27 #include <xen/features.h> ··· 35 33 extern void xen_sysenter_target(void); 36 34 extern void xen_syscall_target(void); 37 35 extern void xen_syscall32_target(void); 36 + 37 + /* Amount of extra memory space we add to the e820 ranges */ 38 + phys_addr_t xen_extra_mem_start, xen_extra_mem_size; 39 + 40 + /* 41 + * The maximum amount of extra memory compared to the base size. The 42 + * main scaling factor is the size of struct page. At extreme ratios 43 + * of base:extra, all the base memory can be filled with page 44 + * structures for the extra memory, leaving no space for anything 45 + * else. 46 + * 47 + * 10x seems like a reasonable balance between scaling flexibility and 48 + * leaving a practically usable system. 49 + */ 50 + #define EXTRA_MEM_RATIO (10) 51 + 52 + static __init void xen_add_extra_mem(unsigned long pages) 53 + { 54 + u64 size = (u64)pages * PAGE_SIZE; 55 + u64 extra_start = xen_extra_mem_start + xen_extra_mem_size; 56 + 57 + if (!pages) 58 + return; 59 + 60 + e820_add_region(extra_start, size, E820_RAM); 61 + sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 62 + 63 + memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA"); 64 + 65 + xen_extra_mem_size += size; 66 + 67 + xen_max_p2m_pfn = PFN_DOWN(extra_start + size); 68 + } 38 69 39 70 static unsigned long __init xen_release_chunk(phys_addr_t start_addr, 40 71 phys_addr_t end_addr) ··· 140 105 /** 141 106 * machine_specific_memory_setup - Hook for machine specific memory setup. 142 107 **/ 143 - 144 108 char * __init xen_memory_setup(void) 145 109 { 110 + static struct e820entry map[E820MAX] __initdata; 111 + 146 112 unsigned long max_pfn = xen_start_info->nr_pages; 113 + unsigned long long mem_end; 114 + int rc; 115 + struct xen_memory_map memmap; 116 + unsigned long extra_pages = 0; 117 + unsigned long extra_limit; 118 + int i; 119 + int op; 147 120 148 121 max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); 122 + mem_end = PFN_PHYS(max_pfn); 123 + 124 + memmap.nr_entries = E820MAX; 125 + set_xen_guest_handle(memmap.buffer, map); 126 + 127 + op = xen_initial_domain() ? 128 + XENMEM_machine_memory_map : 129 + XENMEM_memory_map; 130 + rc = HYPERVISOR_memory_op(op, &memmap); 131 + if (rc == -ENOSYS) { 132 + memmap.nr_entries = 1; 133 + map[0].addr = 0ULL; 134 + map[0].size = mem_end; 135 + /* 8MB slack (to balance backend allocations). */ 136 + map[0].size += 8ULL << 20; 137 + map[0].type = E820_RAM; 138 + rc = 0; 139 + } 140 + BUG_ON(rc); 149 141 150 142 e820.nr_map = 0; 143 + xen_extra_mem_start = mem_end; 144 + for (i = 0; i < memmap.nr_entries; i++) { 145 + unsigned long long end = map[i].addr + map[i].size; 151 146 152 - e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM); 147 + if (map[i].type == E820_RAM) { 148 + if (map[i].addr < mem_end && end > mem_end) { 149 + /* Truncate region to max_mem. */ 150 + u64 delta = end - mem_end; 151 + 152 + map[i].size -= delta; 153 + extra_pages += PFN_DOWN(delta); 154 + 155 + end = mem_end; 156 + } 157 + } 158 + 159 + if (end > xen_extra_mem_start) 160 + xen_extra_mem_start = end; 161 + 162 + /* If region is non-RAM or below mem_end, add what remains */ 163 + if ((map[i].type != E820_RAM || map[i].addr < mem_end) && 164 + map[i].size > 0) 165 + e820_add_region(map[i].addr, map[i].size, map[i].type); 166 + } 153 167 154 168 /* 155 169 * Even though this is normal, usable memory under Xen, reserve ··· 220 136 221 137 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 222 138 223 - xen_return_unused_memory(xen_start_info->nr_pages, &e820); 139 + extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); 140 + 141 + /* 142 + * Clamp the amount of extra memory to a EXTRA_MEM_RATIO 143 + * factor the base size. On non-highmem systems, the base 144 + * size is the full initial memory allocation; on highmem it 145 + * is limited to the max size of lowmem, so that it doesn't 146 + * get completely filled. 147 + * 148 + * In principle there could be a problem in lowmem systems if 149 + * the initial memory is also very large with respect to 150 + * lowmem, but we won't try to deal with that here. 151 + */ 152 + extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), 153 + max_pfn + extra_pages); 154 + 155 + if (extra_limit >= max_pfn) 156 + extra_pages = extra_limit - max_pfn; 157 + else 158 + extra_pages = 0; 159 + 160 + if (!xen_initial_domain()) 161 + xen_add_extra_mem(extra_pages); 224 162 225 163 return "Xen"; 226 164 }
+3
arch/x86/xen/xen-ops.h
··· 30 30 pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); 31 31 void xen_ident_map_ISA(void); 32 32 void xen_reserve_top(void); 33 + extern unsigned long xen_max_p2m_pfn; 34 + 35 + void xen_set_pat(u64); 33 36 34 37 char * __init xen_memory_setup(void); 35 38 void __init xen_arch_setup(void);
+2 -1
drivers/char/hvc_xen.c
··· 74 74 wmb(); /* write ring before updating pointer */ 75 75 intf->out_prod = prod; 76 76 77 - notify_daemon(); 77 + if (sent) 78 + notify_daemon(); 78 79 return sent; 79 80 } 80 81
+68 -31
drivers/xen/events.c
··· 261 261 } 262 262 #endif 263 263 264 - memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0))); 264 + memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s)); 265 265 } 266 266 267 267 static inline void clear_evtchn(int port) ··· 377 377 irq = find_unbound_irq(); 378 378 379 379 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 380 - handle_edge_irq, "event"); 380 + handle_fasteoi_irq, "event"); 381 381 382 382 evtchn_to_irq[evtchn] = irq; 383 383 irq_info[irq] = mk_evtchn_info(evtchn); ··· 435 435 irq = per_cpu(virq_to_irq, cpu)[virq]; 436 436 437 437 if (irq == -1) { 438 + irq = find_unbound_irq(); 439 + 440 + set_irq_chip_and_handler_name(irq, &xen_percpu_chip, 441 + handle_percpu_irq, "virq"); 442 + 438 443 bind_virq.virq = virq; 439 444 bind_virq.vcpu = cpu; 440 445 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, 441 446 &bind_virq) != 0) 442 447 BUG(); 443 448 evtchn = bind_virq.port; 444 - 445 - irq = find_unbound_irq(); 446 - 447 - set_irq_chip_and_handler_name(irq, &xen_percpu_chip, 448 - handle_percpu_irq, "virq"); 449 449 450 450 evtchn_to_irq[evtchn] = irq; 451 451 irq_info[irq] = mk_virq_info(evtchn, virq); ··· 578 578 { 579 579 struct shared_info *sh = HYPERVISOR_shared_info; 580 580 int cpu = smp_processor_id(); 581 + unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu); 581 582 int i; 582 583 unsigned long flags; 583 584 static DEFINE_SPINLOCK(debug_lock); 585 + struct vcpu_info *v; 584 586 585 587 spin_lock_irqsave(&debug_lock, flags); 586 588 587 - printk("vcpu %d\n ", cpu); 589 + printk("\nvcpu %d\n ", cpu); 588 590 589 591 for_each_online_cpu(i) { 590 - struct vcpu_info *v = per_cpu(xen_vcpu, i); 591 - printk("%d: masked=%d pending=%d event_sel %08lx\n ", i, 592 - (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask, 593 - v->evtchn_upcall_pending, 594 - v->evtchn_pending_sel); 592 + int pending; 593 + v = per_cpu(xen_vcpu, i); 594 + pending = (get_irq_regs() && i == cpu) 595 + ? xen_irqs_disabled(get_irq_regs()) 596 + : v->evtchn_upcall_mask; 597 + printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i, 598 + pending, v->evtchn_upcall_pending, 599 + (int)(sizeof(v->evtchn_pending_sel)*2), 600 + v->evtchn_pending_sel); 595 601 } 596 - printk("pending:\n "); 597 - for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) 598 - printk("%08lx%s", sh->evtchn_pending[i], 599 - i % 8 == 0 ? "\n " : " "); 600 - printk("\nmasks:\n "); 601 - for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) 602 - printk("%08lx%s", sh->evtchn_mask[i], 603 - i % 8 == 0 ? "\n " : " "); 602 + v = per_cpu(xen_vcpu, cpu); 604 603 605 - printk("\nunmasked:\n "); 606 - for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) 607 - printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i], 608 - i % 8 == 0 ? "\n " : " "); 604 + printk("\npending:\n "); 605 + for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) 606 + printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2, 607 + sh->evtchn_pending[i], 608 + i % 8 == 0 ? "\n " : " "); 609 + printk("\nglobal mask:\n "); 610 + for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) 611 + printk("%0*lx%s", 612 + (int)(sizeof(sh->evtchn_mask[0])*2), 613 + sh->evtchn_mask[i], 614 + i % 8 == 0 ? "\n " : " "); 615 + 616 + printk("\nglobally unmasked:\n "); 617 + for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) 618 + printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2), 619 + sh->evtchn_pending[i] & ~sh->evtchn_mask[i], 620 + i % 8 == 0 ? "\n " : " "); 621 + 622 + printk("\nlocal cpu%d mask:\n ", cpu); 623 + for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--) 624 + printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2), 625 + cpu_evtchn[i], 626 + i % 8 == 0 ? "\n " : " "); 627 + 628 + printk("\nlocally unmasked:\n "); 629 + for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) { 630 + unsigned long pending = sh->evtchn_pending[i] 631 + & ~sh->evtchn_mask[i] 632 + & cpu_evtchn[i]; 633 + printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2), 634 + pending, i % 8 == 0 ? "\n " : " "); 635 + } 609 636 610 637 printk("\npending list:\n"); 611 - for(i = 0; i < NR_EVENT_CHANNELS; i++) { 638 + for (i = 0; i < NR_EVENT_CHANNELS; i++) { 612 639 if (sync_test_bit(i, sh->evtchn_pending)) { 613 - printk(" %d: event %d -> irq %d\n", 640 + int word_idx = i / BITS_PER_LONG; 641 + printk(" %d: event %d -> irq %d%s%s%s\n", 614 642 cpu_from_evtchn(i), i, 615 - evtchn_to_irq[i]); 643 + evtchn_to_irq[i], 644 + sync_test_bit(word_idx, &v->evtchn_pending_sel) 645 + ? "" : " l2-clear", 646 + !sync_test_bit(i, sh->evtchn_mask) 647 + ? "" : " globally-masked", 648 + sync_test_bit(i, cpu_evtchn) 649 + ? "" : " locally-masked"); 616 650 } 617 651 } 618 652 ··· 696 662 int port = (word_idx * BITS_PER_LONG) + bit_idx; 697 663 int irq = evtchn_to_irq[port]; 698 664 struct irq_desc *desc; 665 + 666 + mask_evtchn(port); 667 + clear_evtchn(port); 699 668 700 669 if (irq != -1) { 701 670 desc = irq_to_desc(irq); ··· 837 800 { 838 801 int evtchn = evtchn_from_irq(irq); 839 802 840 - move_native_irq(irq); 803 + move_masked_irq(irq); 841 804 842 805 if (VALID_EVTCHN(evtchn)) 843 - clear_evtchn(evtchn); 806 + unmask_evtchn(evtchn); 844 807 } 845 808 846 809 static int retrigger_dynirq(unsigned int irq) ··· 996 959 .mask = disable_dynirq, 997 960 .unmask = enable_dynirq, 998 961 999 - .ack = ack_dynirq, 962 + .eoi = ack_dynirq, 1000 963 .set_affinity = set_affinity_irq, 1001 964 .retrigger = retrigger_dynirq, 1002 965 };
+3 -1
drivers/xen/xenbus/xenbus_probe.c
··· 64 64 65 65 66 66 int xen_store_evtchn; 67 - EXPORT_SYMBOL(xen_store_evtchn); 67 + EXPORT_SYMBOL_GPL(xen_store_evtchn); 68 68 69 69 struct xenstore_domain_interface *xen_store_interface; 70 + EXPORT_SYMBOL_GPL(xen_store_interface); 71 + 70 72 static unsigned long xen_store_mfn; 71 73 72 74 static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
+2 -1
drivers/xen/xenfs/Makefile
··· 1 1 obj-$(CONFIG_XENFS) += xenfs.o 2 2 3 - xenfs-objs = super.o xenbus.o 3 + xenfs-y = super.o xenbus.o privcmd.o 4 + xenfs-$(CONFIG_XEN_DOM0) += xenstored.o
+404
drivers/xen/xenfs/privcmd.c
··· 1 + /****************************************************************************** 2 + * privcmd.c 3 + * 4 + * Interface to privileged domain-0 commands. 5 + * 6 + * Copyright (c) 2002-2004, K A Fraser, B Dragovic 7 + */ 8 + 9 + #include <linux/kernel.h> 10 + #include <linux/sched.h> 11 + #include <linux/slab.h> 12 + #include <linux/string.h> 13 + #include <linux/errno.h> 14 + #include <linux/mm.h> 15 + #include <linux/mman.h> 16 + #include <linux/uaccess.h> 17 + #include <linux/swap.h> 18 + #include <linux/smp_lock.h> 19 + #include <linux/highmem.h> 20 + #include <linux/pagemap.h> 21 + #include <linux/seq_file.h> 22 + 23 + #include <asm/pgalloc.h> 24 + #include <asm/pgtable.h> 25 + #include <asm/tlb.h> 26 + #include <asm/xen/hypervisor.h> 27 + #include <asm/xen/hypercall.h> 28 + 29 + #include <xen/xen.h> 30 + #include <xen/privcmd.h> 31 + #include <xen/interface/xen.h> 32 + #include <xen/features.h> 33 + #include <xen/page.h> 34 + #include <xen/xen-ops.h> 35 + 36 + #ifndef HAVE_ARCH_PRIVCMD_MMAP 37 + static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); 38 + #endif 39 + 40 + static long privcmd_ioctl_hypercall(void __user *udata) 41 + { 42 + struct privcmd_hypercall hypercall; 43 + long ret; 44 + 45 + if (copy_from_user(&hypercall, udata, sizeof(hypercall))) 46 + return -EFAULT; 47 + 48 + ret = privcmd_call(hypercall.op, 49 + hypercall.arg[0], hypercall.arg[1], 50 + hypercall.arg[2], hypercall.arg[3], 51 + hypercall.arg[4]); 52 + 53 + return ret; 54 + } 55 + 56 + static void free_page_list(struct list_head *pages) 57 + { 58 + struct page *p, *n; 59 + 60 + list_for_each_entry_safe(p, n, pages, lru) 61 + __free_page(p); 62 + 63 + INIT_LIST_HEAD(pages); 64 + } 65 + 66 + /* 67 + * Given an array of items in userspace, return a list of pages 68 + * containing the data. If copying fails, either because of memory 69 + * allocation failure or a problem reading user memory, return an 70 + * error code; its up to the caller to dispose of any partial list. 71 + */ 72 + static int gather_array(struct list_head *pagelist, 73 + unsigned nelem, size_t size, 74 + void __user *data) 75 + { 76 + unsigned pageidx; 77 + void *pagedata; 78 + int ret; 79 + 80 + if (size > PAGE_SIZE) 81 + return 0; 82 + 83 + pageidx = PAGE_SIZE; 84 + pagedata = NULL; /* quiet, gcc */ 85 + while (nelem--) { 86 + if (pageidx > PAGE_SIZE-size) { 87 + struct page *page = alloc_page(GFP_KERNEL); 88 + 89 + ret = -ENOMEM; 90 + if (page == NULL) 91 + goto fail; 92 + 93 + pagedata = page_address(page); 94 + 95 + list_add_tail(&page->lru, pagelist); 96 + pageidx = 0; 97 + } 98 + 99 + ret = -EFAULT; 100 + if (copy_from_user(pagedata + pageidx, data, size)) 101 + goto fail; 102 + 103 + data += size; 104 + pageidx += size; 105 + } 106 + 107 + ret = 0; 108 + 109 + fail: 110 + return ret; 111 + } 112 + 113 + /* 114 + * Call function "fn" on each element of the array fragmented 115 + * over a list of pages. 116 + */ 117 + static int traverse_pages(unsigned nelem, size_t size, 118 + struct list_head *pos, 119 + int (*fn)(void *data, void *state), 120 + void *state) 121 + { 122 + void *pagedata; 123 + unsigned pageidx; 124 + int ret = 0; 125 + 126 + BUG_ON(size > PAGE_SIZE); 127 + 128 + pageidx = PAGE_SIZE; 129 + pagedata = NULL; /* hush, gcc */ 130 + 131 + while (nelem--) { 132 + if (pageidx > PAGE_SIZE-size) { 133 + struct page *page; 134 + pos = pos->next; 135 + page = list_entry(pos, struct page, lru); 136 + pagedata = page_address(page); 137 + pageidx = 0; 138 + } 139 + 140 + ret = (*fn)(pagedata + pageidx, state); 141 + if (ret) 142 + break; 143 + pageidx += size; 144 + } 145 + 146 + return ret; 147 + } 148 + 149 + struct mmap_mfn_state { 150 + unsigned long va; 151 + struct vm_area_struct *vma; 152 + domid_t domain; 153 + }; 154 + 155 + static int mmap_mfn_range(void *data, void *state) 156 + { 157 + struct privcmd_mmap_entry *msg = data; 158 + struct mmap_mfn_state *st = state; 159 + struct vm_area_struct *vma = st->vma; 160 + int rc; 161 + 162 + /* Do not allow range to wrap the address space. */ 163 + if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) || 164 + ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va)) 165 + return -EINVAL; 166 + 167 + /* Range chunks must be contiguous in va space. */ 168 + if ((msg->va != st->va) || 169 + ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) 170 + return -EINVAL; 171 + 172 + rc = xen_remap_domain_mfn_range(vma, 173 + msg->va & PAGE_MASK, 174 + msg->mfn, msg->npages, 175 + vma->vm_page_prot, 176 + st->domain); 177 + if (rc < 0) 178 + return rc; 179 + 180 + st->va += msg->npages << PAGE_SHIFT; 181 + 182 + return 0; 183 + } 184 + 185 + static long privcmd_ioctl_mmap(void __user *udata) 186 + { 187 + struct privcmd_mmap mmapcmd; 188 + struct mm_struct *mm = current->mm; 189 + struct vm_area_struct *vma; 190 + int rc; 191 + LIST_HEAD(pagelist); 192 + struct mmap_mfn_state state; 193 + 194 + if (!xen_initial_domain()) 195 + return -EPERM; 196 + 197 + if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd))) 198 + return -EFAULT; 199 + 200 + rc = gather_array(&pagelist, 201 + mmapcmd.num, sizeof(struct privcmd_mmap_entry), 202 + mmapcmd.entry); 203 + 204 + if (rc || list_empty(&pagelist)) 205 + goto out; 206 + 207 + down_write(&mm->mmap_sem); 208 + 209 + { 210 + struct page *page = list_first_entry(&pagelist, 211 + struct page, lru); 212 + struct privcmd_mmap_entry *msg = page_address(page); 213 + 214 + vma = find_vma(mm, msg->va); 215 + rc = -EINVAL; 216 + 217 + if (!vma || (msg->va != vma->vm_start) || 218 + !privcmd_enforce_singleshot_mapping(vma)) 219 + goto out_up; 220 + } 221 + 222 + state.va = vma->vm_start; 223 + state.vma = vma; 224 + state.domain = mmapcmd.dom; 225 + 226 + rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry), 227 + &pagelist, 228 + mmap_mfn_range, &state); 229 + 230 + 231 + out_up: 232 + up_write(&mm->mmap_sem); 233 + 234 + out: 235 + free_page_list(&pagelist); 236 + 237 + return rc; 238 + } 239 + 240 + struct mmap_batch_state { 241 + domid_t domain; 242 + unsigned long va; 243 + struct vm_area_struct *vma; 244 + int err; 245 + 246 + xen_pfn_t __user *user; 247 + }; 248 + 249 + static int mmap_batch_fn(void *data, void *state) 250 + { 251 + xen_pfn_t *mfnp = data; 252 + struct mmap_batch_state *st = state; 253 + 254 + if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, 255 + st->vma->vm_page_prot, st->domain) < 0) { 256 + *mfnp |= 0xf0000000U; 257 + st->err++; 258 + } 259 + st->va += PAGE_SIZE; 260 + 261 + return 0; 262 + } 263 + 264 + static int mmap_return_errors(void *data, void *state) 265 + { 266 + xen_pfn_t *mfnp = data; 267 + struct mmap_batch_state *st = state; 268 + 269 + put_user(*mfnp, st->user++); 270 + 271 + return 0; 272 + } 273 + 274 + static struct vm_operations_struct privcmd_vm_ops; 275 + 276 + static long privcmd_ioctl_mmap_batch(void __user *udata) 277 + { 278 + int ret; 279 + struct privcmd_mmapbatch m; 280 + struct mm_struct *mm = current->mm; 281 + struct vm_area_struct *vma; 282 + unsigned long nr_pages; 283 + LIST_HEAD(pagelist); 284 + struct mmap_batch_state state; 285 + 286 + if (!xen_initial_domain()) 287 + return -EPERM; 288 + 289 + if (copy_from_user(&m, udata, sizeof(m))) 290 + return -EFAULT; 291 + 292 + nr_pages = m.num; 293 + if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT))) 294 + return -EINVAL; 295 + 296 + ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), 297 + m.arr); 298 + 299 + if (ret || list_empty(&pagelist)) 300 + goto out; 301 + 302 + down_write(&mm->mmap_sem); 303 + 304 + vma = find_vma(mm, m.addr); 305 + ret = -EINVAL; 306 + if (!vma || 307 + vma->vm_ops != &privcmd_vm_ops || 308 + (m.addr != vma->vm_start) || 309 + ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) || 310 + !privcmd_enforce_singleshot_mapping(vma)) { 311 + up_write(&mm->mmap_sem); 312 + goto out; 313 + } 314 + 315 + state.domain = m.dom; 316 + state.vma = vma; 317 + state.va = m.addr; 318 + state.err = 0; 319 + 320 + ret = traverse_pages(m.num, sizeof(xen_pfn_t), 321 + &pagelist, mmap_batch_fn, &state); 322 + 323 + up_write(&mm->mmap_sem); 324 + 325 + if (state.err > 0) { 326 + ret = 0; 327 + 328 + state.user = m.arr; 329 + traverse_pages(m.num, sizeof(xen_pfn_t), 330 + &pagelist, 331 + mmap_return_errors, &state); 332 + } 333 + 334 + out: 335 + free_page_list(&pagelist); 336 + 337 + return ret; 338 + } 339 + 340 + static long privcmd_ioctl(struct file *file, 341 + unsigned int cmd, unsigned long data) 342 + { 343 + int ret = -ENOSYS; 344 + void __user *udata = (void __user *) data; 345 + 346 + switch (cmd) { 347 + case IOCTL_PRIVCMD_HYPERCALL: 348 + ret = privcmd_ioctl_hypercall(udata); 349 + break; 350 + 351 + case IOCTL_PRIVCMD_MMAP: 352 + ret = privcmd_ioctl_mmap(udata); 353 + break; 354 + 355 + case IOCTL_PRIVCMD_MMAPBATCH: 356 + ret = privcmd_ioctl_mmap_batch(udata); 357 + break; 358 + 359 + default: 360 + ret = -EINVAL; 361 + break; 362 + } 363 + 364 + return ret; 365 + } 366 + 367 + #ifndef HAVE_ARCH_PRIVCMD_MMAP 368 + static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 369 + { 370 + printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", 371 + vma, vma->vm_start, vma->vm_end, 372 + vmf->pgoff, vmf->virtual_address); 373 + 374 + return VM_FAULT_SIGBUS; 375 + } 376 + 377 + static struct vm_operations_struct privcmd_vm_ops = { 378 + .fault = privcmd_fault 379 + }; 380 + 381 + static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) 382 + { 383 + /* Unsupported for auto-translate guests. */ 384 + if (xen_feature(XENFEAT_auto_translated_physmap)) 385 + return -ENOSYS; 386 + 387 + /* DONTCOPY is essential for Xen as copy_page_range is broken. */ 388 + vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY; 389 + vma->vm_ops = &privcmd_vm_ops; 390 + vma->vm_private_data = NULL; 391 + 392 + return 0; 393 + } 394 + 395 + static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma) 396 + { 397 + return (xchg(&vma->vm_private_data, (void *)1) == NULL); 398 + } 399 + #endif 400 + 401 + const struct file_operations privcmd_file_ops = { 402 + .unlocked_ioctl = privcmd_ioctl, 403 + .mmap = privcmd_mmap, 404 + };
+90 -5
drivers/xen/xenfs/super.c
··· 12 12 #include <linux/module.h> 13 13 #include <linux/fs.h> 14 14 #include <linux/magic.h> 15 + #include <linux/mm.h> 16 + #include <linux/backing-dev.h> 15 17 16 18 #include <xen/xen.h> 17 19 ··· 23 21 24 22 MODULE_DESCRIPTION("Xen filesystem"); 25 23 MODULE_LICENSE("GPL"); 24 + 25 + static int xenfs_set_page_dirty(struct page *page) 26 + { 27 + return !TestSetPageDirty(page); 28 + } 29 + 30 + static const struct address_space_operations xenfs_aops = { 31 + .set_page_dirty = xenfs_set_page_dirty, 32 + }; 33 + 34 + static struct backing_dev_info xenfs_backing_dev_info = { 35 + .ra_pages = 0, /* No readahead */ 36 + .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, 37 + }; 38 + 39 + static struct inode *xenfs_make_inode(struct super_block *sb, int mode) 40 + { 41 + struct inode *ret = new_inode(sb); 42 + 43 + if (ret) { 44 + ret->i_mode = mode; 45 + ret->i_mapping->a_ops = &xenfs_aops; 46 + ret->i_mapping->backing_dev_info = &xenfs_backing_dev_info; 47 + ret->i_uid = ret->i_gid = 0; 48 + ret->i_blocks = 0; 49 + ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; 50 + } 51 + return ret; 52 + } 53 + 54 + static struct dentry *xenfs_create_file(struct super_block *sb, 55 + struct dentry *parent, 56 + const char *name, 57 + const struct file_operations *fops, 58 + void *data, 59 + int mode) 60 + { 61 + struct dentry *dentry; 62 + struct inode *inode; 63 + 64 + dentry = d_alloc_name(parent, name); 65 + if (!dentry) 66 + return NULL; 67 + 68 + inode = xenfs_make_inode(sb, S_IFREG | mode); 69 + if (!inode) { 70 + dput(dentry); 71 + return NULL; 72 + } 73 + 74 + inode->i_fop = fops; 75 + inode->i_private = data; 76 + 77 + d_add(dentry, inode); 78 + return dentry; 79 + } 26 80 27 81 static ssize_t capabilities_read(struct file *file, char __user *buf, 28 82 size_t size, loff_t *off) ··· 102 44 [1] = {}, 103 45 { "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR }, 104 46 { "capabilities", &capabilities_file_ops, S_IRUGO }, 47 + { "privcmd", &privcmd_file_ops, S_IRUSR|S_IWUSR }, 105 48 {""}, 106 49 }; 50 + int rc; 107 51 108 - return simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files); 52 + rc = simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files); 53 + if (rc < 0) 54 + return rc; 55 + 56 + if (xen_initial_domain()) { 57 + xenfs_create_file(sb, sb->s_root, "xsd_kva", 58 + &xsd_kva_file_ops, NULL, S_IRUSR|S_IWUSR); 59 + xenfs_create_file(sb, sb->s_root, "xsd_port", 60 + &xsd_port_file_ops, NULL, S_IRUSR|S_IWUSR); 61 + } 62 + 63 + return rc; 109 64 } 110 65 111 66 static int xenfs_get_sb(struct file_system_type *fs_type, ··· 137 66 138 67 static int __init xenfs_init(void) 139 68 { 140 - if (xen_domain()) 141 - return register_filesystem(&xenfs_type); 69 + int err; 70 + if (!xen_domain()) { 71 + printk(KERN_INFO "xenfs: not registering filesystem on non-xen platform\n"); 72 + return 0; 73 + } 142 74 143 - printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n"); 144 - return 0; 75 + err = register_filesystem(&xenfs_type); 76 + if (err) { 77 + printk(KERN_ERR "xenfs: Unable to register filesystem!\n"); 78 + goto out; 79 + } 80 + 81 + err = bdi_init(&xenfs_backing_dev_info); 82 + if (err) 83 + unregister_filesystem(&xenfs_type); 84 + 85 + out: 86 + 87 + return err; 145 88 } 146 89 147 90 static void __exit xenfs_exit(void)
+3
drivers/xen/xenfs/xenfs.h
··· 2 2 #define _XENFS_XENBUS_H 3 3 4 4 extern const struct file_operations xenbus_file_ops; 5 + extern const struct file_operations privcmd_file_ops; 6 + extern const struct file_operations xsd_kva_file_ops; 7 + extern const struct file_operations xsd_port_file_ops; 5 8 6 9 #endif /* _XENFS_XENBUS_H */
+68
drivers/xen/xenfs/xenstored.c
··· 1 + #include <linux/slab.h> 2 + #include <linux/types.h> 3 + #include <linux/mm.h> 4 + #include <linux/fs.h> 5 + 6 + #include <xen/page.h> 7 + 8 + #include "xenfs.h" 9 + #include "../xenbus/xenbus_comms.h" 10 + 11 + static ssize_t xsd_read(struct file *file, char __user *buf, 12 + size_t size, loff_t *off) 13 + { 14 + const char *str = (const char *)file->private_data; 15 + return simple_read_from_buffer(buf, size, off, str, strlen(str)); 16 + } 17 + 18 + static int xsd_release(struct inode *inode, struct file *file) 19 + { 20 + kfree(file->private_data); 21 + return 0; 22 + } 23 + 24 + static int xsd_kva_open(struct inode *inode, struct file *file) 25 + { 26 + file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p", 27 + xen_store_interface); 28 + if (!file->private_data) 29 + return -ENOMEM; 30 + return 0; 31 + } 32 + 33 + static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) 34 + { 35 + size_t size = vma->vm_end - vma->vm_start; 36 + 37 + if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) 38 + return -EINVAL; 39 + 40 + if (remap_pfn_range(vma, vma->vm_start, 41 + virt_to_pfn(xen_store_interface), 42 + size, vma->vm_page_prot)) 43 + return -EAGAIN; 44 + 45 + return 0; 46 + } 47 + 48 + const struct file_operations xsd_kva_file_ops = { 49 + .open = xsd_kva_open, 50 + .mmap = xsd_kva_mmap, 51 + .read = xsd_read, 52 + .release = xsd_release, 53 + }; 54 + 55 + static int xsd_port_open(struct inode *inode, struct file *file) 56 + { 57 + file->private_data = (void *)kasprintf(GFP_KERNEL, "%d", 58 + xen_store_evtchn); 59 + if (!file->private_data) 60 + return -ENOMEM; 61 + return 0; 62 + } 63 + 64 + const struct file_operations xsd_port_file_ops = { 65 + .open = xsd_port_open, 66 + .read = xsd_read, 67 + .release = xsd_release, 68 + };
+1
include/xen/Kbuild
··· 1 1 header-y += evtchn.h 2 + header-y += privcmd.h
+29
include/xen/interface/memory.h
··· 186 186 }; 187 187 DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list); 188 188 189 + /* 190 + * Returns the pseudo-physical memory map as it was when the domain 191 + * was started (specified by XENMEM_set_memory_map). 192 + * arg == addr of struct xen_memory_map. 193 + */ 194 + #define XENMEM_memory_map 9 195 + struct xen_memory_map { 196 + /* 197 + * On call the number of entries which can be stored in buffer. On 198 + * return the number of entries which have been stored in 199 + * buffer. 200 + */ 201 + unsigned int nr_entries; 202 + 203 + /* 204 + * Entries in the buffer are in the same format as returned by the 205 + * BIOS INT 0x15 EAX=0xE820 call. 206 + */ 207 + GUEST_HANDLE(void) buffer; 208 + }; 209 + DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map); 210 + 211 + /* 212 + * Returns the real physical memory map. Passes the same structure as 213 + * XENMEM_memory_map. 214 + * arg == addr of struct xen_memory_map. 215 + */ 216 + #define XENMEM_machine_memory_map 10 217 + 189 218 190 219 /* 191 220 * Prevent the balloon driver from changing the memory reservation
+80
include/xen/privcmd.h
··· 1 + /****************************************************************************** 2 + * privcmd.h 3 + * 4 + * Interface to /proc/xen/privcmd. 5 + * 6 + * Copyright (c) 2003-2005, K A Fraser 7 + * 8 + * This program is free software; you can redistribute it and/or 9 + * modify it under the terms of the GNU General Public License version 2 10 + * as published by the Free Software Foundation; or, when distributed 11 + * separately from the Linux kernel or incorporated into other 12 + * software packages, subject to the following license: 13 + * 14 + * Permission is hereby granted, free of charge, to any person obtaining a copy 15 + * of this source file (the "Software"), to deal in the Software without 16 + * restriction, including without limitation the rights to use, copy, modify, 17 + * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 + * and to permit persons to whom the Software is furnished to do so, subject to 19 + * the following conditions: 20 + * 21 + * The above copyright notice and this permission notice shall be included in 22 + * all copies or substantial portions of the Software. 23 + * 24 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 + * IN THE SOFTWARE. 31 + */ 32 + 33 + #ifndef __LINUX_PUBLIC_PRIVCMD_H__ 34 + #define __LINUX_PUBLIC_PRIVCMD_H__ 35 + 36 + #include <linux/types.h> 37 + 38 + typedef unsigned long xen_pfn_t; 39 + 40 + #ifndef __user 41 + #define __user 42 + #endif 43 + 44 + struct privcmd_hypercall { 45 + __u64 op; 46 + __u64 arg[5]; 47 + }; 48 + 49 + struct privcmd_mmap_entry { 50 + __u64 va; 51 + __u64 mfn; 52 + __u64 npages; 53 + }; 54 + 55 + struct privcmd_mmap { 56 + int num; 57 + domid_t dom; /* target domain */ 58 + struct privcmd_mmap_entry __user *entry; 59 + }; 60 + 61 + struct privcmd_mmapbatch { 62 + int num; /* number of pages to populate */ 63 + domid_t dom; /* target domain */ 64 + __u64 addr; /* virtual address */ 65 + xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */ 66 + }; 67 + 68 + /* 69 + * @cmd: IOCTL_PRIVCMD_HYPERCALL 70 + * @arg: &privcmd_hypercall_t 71 + * Return: Value returned from execution of the specified hypercall. 72 + */ 73 + #define IOCTL_PRIVCMD_HYPERCALL \ 74 + _IOC(_IOC_NONE, 'P', 0, sizeof(struct privcmd_hypercall)) 75 + #define IOCTL_PRIVCMD_MMAP \ 76 + _IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap)) 77 + #define IOCTL_PRIVCMD_MMAPBATCH \ 78 + _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch)) 79 + 80 + #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
+5
include/xen/xen-ops.h
··· 23 23 24 24 void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order); 25 25 26 + int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 27 + unsigned long addr, 28 + unsigned long mfn, int nr, 29 + pgprot_t prot, unsigned domid); 30 + 26 31 #endif /* INCLUDE_XEN_OPS_H */