Merge branch 'bugfix' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen

* 'bugfix' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen:
xen: try harder to balloon up under memory pressure.
Xen balloon: fix totalram_pages counting.
xen: explicitly create/destroy stop_machine workqueues outside suspend/resume region.
xen: improve error handling in do_suspend.
xen: don't leak IRQs over suspend/resume.
xen: call clock resume notifier on all CPUs
xen: use iret for return from 64b kernel to 32b usermode
xen: don't call dpm_resume_noirq() with interrupts disabled.
xen: register runstate info for boot CPU early
xen: register runstate on secondary CPUs
xen: register timer interrupt with IRQF_TIMER
xen: correctly restore pfn_to_mfn_list_list after resume
xen: restore runstate_info even if !have_vcpu_info_placement
xen: re-register runstate area earlier on resume.
xen: wait up to 5 minutes for device connetion
xen: improvement to wait_for_devices()
xen: fix is_disconnected_device/exists_disconnected_device
xen/xenbus: make DEVICE_ATTR()s static

+109 -81
+14 -13
arch/x86/xen/enlighten.c
··· 138 */ 139 void xen_vcpu_restore(void) 140 { 141 - if (have_vcpu_info_placement) { 142 - int cpu; 143 144 - for_each_online_cpu(cpu) { 145 - bool other_cpu = (cpu != smp_processor_id()); 146 147 - if (other_cpu && 148 - HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) 149 - BUG(); 150 151 xen_vcpu_setup(cpu); 152 153 - if (other_cpu && 154 - HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) 155 - BUG(); 156 - } 157 - 158 - BUG_ON(!have_vcpu_info_placement); 159 } 160 } 161 ··· 1178 } 1179 1180 xen_raw_console_write("about to get started...\n"); 1181 1182 /* Start the world */ 1183 #ifdef CONFIG_X86_32
··· 138 */ 139 void xen_vcpu_restore(void) 140 { 141 + int cpu; 142 143 + for_each_online_cpu(cpu) { 144 + bool other_cpu = (cpu != smp_processor_id()); 145 146 + if (other_cpu && 147 + HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) 148 + BUG(); 149 150 + xen_setup_runstate_info(cpu); 151 + 152 + if (have_vcpu_info_placement) 153 xen_vcpu_setup(cpu); 154 155 + if (other_cpu && 156 + HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) 157 + BUG(); 158 } 159 } 160 ··· 1179 } 1180 1181 xen_raw_console_write("about to get started...\n"); 1182 + 1183 + xen_setup_runstate_info(0); 1184 1185 /* Start the world */ 1186 #ifdef CONFIG_X86_32
+1 -1
arch/x86/xen/mmu.c
··· 185 } 186 187 /* Build the parallel p2m_top_mfn structures */ 188 - static void __init xen_build_mfn_list_list(void) 189 { 190 unsigned pfn, idx; 191
··· 185 } 186 187 /* Build the parallel p2m_top_mfn structures */ 188 + void xen_build_mfn_list_list(void) 189 { 190 unsigned pfn, idx; 191
+1
arch/x86/xen/smp.c
··· 295 (unsigned long)task_stack_page(idle) - 296 KERNEL_STACK_OFFSET + THREAD_SIZE; 297 #endif 298 xen_setup_timer(cpu); 299 xen_init_lock_cpu(cpu); 300
··· 295 (unsigned long)task_stack_page(idle) - 296 KERNEL_STACK_OFFSET + THREAD_SIZE; 297 #endif 298 + xen_setup_runstate_info(cpu); 299 xen_setup_timer(cpu); 300 xen_init_lock_cpu(cpu); 301
+16 -1
arch/x86/xen/suspend.c
··· 1 #include <linux/types.h> 2 3 #include <xen/interface/xen.h> 4 #include <xen/grant_table.h> ··· 28 29 void xen_post_suspend(int suspend_cancelled) 30 { 31 xen_setup_shared_info(); 32 33 if (suspend_cancelled) { ··· 47 48 } 49 50 void xen_arch_resume(void) 51 { 52 - /* nothing */ 53 }
··· 1 #include <linux/types.h> 2 + #include <linux/clockchips.h> 3 4 #include <xen/interface/xen.h> 5 #include <xen/grant_table.h> ··· 27 28 void xen_post_suspend(int suspend_cancelled) 29 { 30 + xen_build_mfn_list_list(); 31 + 32 xen_setup_shared_info(); 33 34 if (suspend_cancelled) { ··· 44 45 } 46 47 + static void xen_vcpu_notify_restore(void *data) 48 + { 49 + unsigned long reason = (unsigned long)data; 50 + 51 + /* Boot processor notified via generic timekeeping_resume() */ 52 + if ( smp_processor_id() == 0) 53 + return; 54 + 55 + clockevents_notify(reason, NULL); 56 + } 57 + 58 void xen_arch_resume(void) 59 { 60 + smp_call_function(xen_vcpu_notify_restore, 61 + (void *)CLOCK_EVT_NOTIFY_RESUME, 1); 62 }
+3 -4
arch/x86/xen/time.c
··· 100 return per_cpu(runstate, vcpu).state == RUNSTATE_runnable; 101 } 102 103 - static void setup_runstate_info(int cpu) 104 { 105 struct vcpu_register_runstate_memory_area area; 106 ··· 434 name = "<timer kasprintf failed>"; 435 436 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, 437 - IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, 438 name, NULL); 439 440 evt = &per_cpu(xen_clock_events, cpu); ··· 442 443 evt->cpumask = cpumask_of(cpu); 444 evt->irq = irq; 445 - 446 - setup_runstate_info(cpu); 447 } 448 449 void xen_teardown_timer(int cpu) ··· 492 493 setup_force_cpu_cap(X86_FEATURE_TSC); 494 495 xen_setup_timer(cpu); 496 xen_setup_cpu_clockevents(); 497 }
··· 100 return per_cpu(runstate, vcpu).state == RUNSTATE_runnable; 101 } 102 103 + void xen_setup_runstate_info(int cpu) 104 { 105 struct vcpu_register_runstate_memory_area area; 106 ··· 434 name = "<timer kasprintf failed>"; 435 436 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, 437 + IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER, 438 name, NULL); 439 440 evt = &per_cpu(xen_clock_events, cpu); ··· 442 443 evt->cpumask = cpumask_of(cpu); 444 evt->irq = irq; 445 } 446 447 void xen_teardown_timer(int cpu) ··· 494 495 setup_force_cpu_cap(X86_FEATURE_TSC); 496 497 + xen_setup_runstate_info(cpu); 498 xen_setup_timer(cpu); 499 xen_setup_cpu_clockevents(); 500 }
+2 -2
arch/x86/xen/xen-asm_64.S
··· 96 pushq $__USER32_CS 97 pushq %rcx 98 99 - pushq $VGCF_in_syscall 100 1: jmp hypercall_iret 101 ENDPATCH(xen_sysret32) 102 RELOC(xen_sysret32, 1b+1) ··· 151 ENTRY(xen_sysenter_target) 152 lea 16(%rsp), %rsp /* strip %rcx, %r11 */ 153 mov $-ENOSYS, %rax 154 - pushq $VGCF_in_syscall 155 jmp hypercall_iret 156 ENDPROC(xen_syscall32_target) 157 ENDPROC(xen_sysenter_target)
··· 96 pushq $__USER32_CS 97 pushq %rcx 98 99 + pushq $0 100 1: jmp hypercall_iret 101 ENDPATCH(xen_sysret32) 102 RELOC(xen_sysret32, 1b+1) ··· 151 ENTRY(xen_sysenter_target) 152 lea 16(%rsp), %rsp /* strip %rcx, %r11 */ 153 mov $-ENOSYS, %rax 154 + pushq $0 155 jmp hypercall_iret 156 ENDPROC(xen_syscall32_target) 157 ENDPROC(xen_sysenter_target)
+2
arch/x86/xen/xen-ops.h
··· 25 26 void xen_setup_mfn_list_list(void); 27 void xen_setup_shared_info(void); 28 void xen_setup_machphys_mapping(void); 29 pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); 30 void xen_ident_map_ISA(void); ··· 42 43 void xen_init_irq_ops(void); 44 void xen_setup_timer(int cpu); 45 void xen_teardown_timer(int cpu); 46 cycle_t xen_clocksource_read(void); 47 void xen_setup_cpu_clockevents(void);
··· 25 26 void xen_setup_mfn_list_list(void); 27 void xen_setup_shared_info(void); 28 + void xen_build_mfn_list_list(void); 29 void xen_setup_machphys_mapping(void); 30 pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); 31 void xen_ident_map_ISA(void); ··· 41 42 void xen_init_irq_ops(void); 43 void xen_setup_timer(int cpu); 44 + void xen_setup_runstate_info(int cpu); 45 void xen_teardown_timer(int cpu); 46 cycle_t xen_clocksource_read(void); 47 void xen_setup_cpu_clockevents(void);
+9 -29
drivers/xen/balloon.c
··· 66 /* We aim for 'current allocation' == 'target allocation'. */ 67 unsigned long current_pages; 68 unsigned long target_pages; 69 - /* We may hit the hard limit in Xen. If we do then we remember it. */ 70 - unsigned long hard_limit; 71 /* 72 * Drivers may alter the memory reservation independently, but they 73 * must inform the balloon driver so we avoid hitting the hard limit. ··· 134 list_add(&page->lru, &ballooned_pages); 135 balloon_stats.balloon_low++; 136 } 137 } 138 139 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ ··· 155 } 156 else 157 balloon_stats.balloon_low--; 158 159 return page; 160 } ··· 183 184 static unsigned long current_target(void) 185 { 186 - unsigned long target = min(balloon_stats.target_pages, balloon_stats.hard_limit); 187 188 target = min(target, 189 balloon_stats.current_pages + ··· 219 set_xen_guest_handle(reservation.extent_start, frame_list); 220 reservation.nr_extents = nr_pages; 221 rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); 222 - if (rc < nr_pages) { 223 - if (rc > 0) { 224 - int ret; 225 - 226 - /* We hit the Xen hard limit: reprobe. */ 227 - reservation.nr_extents = rc; 228 - ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, 229 - &reservation); 230 - BUG_ON(ret != rc); 231 - } 232 - if (rc >= 0) 233 - balloon_stats.hard_limit = (balloon_stats.current_pages + rc - 234 - balloon_stats.driver_pages); 235 goto out; 236 - } 237 238 - for (i = 0; i < nr_pages; i++) { 239 page = balloon_retrieve(); 240 BUG_ON(page == NULL); 241 ··· 248 __free_page(page); 249 } 250 251 - balloon_stats.current_pages += nr_pages; 252 - totalram_pages = balloon_stats.current_pages; 253 254 out: 255 spin_unlock_irqrestore(&balloon_lock, flags); 256 257 - return 0; 258 } 259 260 static int decrease_reservation(unsigned long nr_pages) ··· 311 BUG_ON(ret != nr_pages); 312 313 balloon_stats.current_pages -= nr_pages; 314 - totalram_pages = balloon_stats.current_pages; 315 316 spin_unlock_irqrestore(&balloon_lock, flags); 317 ··· 354 static void balloon_set_new_target(unsigned long target) 355 { 356 /* No need for lock. Not read-modify-write updates. */ 357 - balloon_stats.hard_limit = ~0UL; 358 balloon_stats.target_pages = target; 359 schedule_work(&balloon_worker); 360 } ··· 408 pr_info("xen_balloon: Initialising balloon driver.\n"); 409 410 balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn); 411 - totalram_pages = balloon_stats.current_pages; 412 balloon_stats.target_pages = balloon_stats.current_pages; 413 balloon_stats.balloon_low = 0; 414 balloon_stats.balloon_high = 0; 415 balloon_stats.driver_pages = 0UL; 416 - balloon_stats.hard_limit = ~0UL; 417 418 init_timer(&balloon_timer); 419 balloon_timer.data = 0; ··· 456 BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages)); 457 BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low)); 458 BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high)); 459 - BALLOON_SHOW(hard_limit_kb, 460 - (balloon_stats.hard_limit!=~0UL) ? "%lu\n" : "???\n", 461 - (balloon_stats.hard_limit!=~0UL) ? PAGES2KB(balloon_stats.hard_limit) : 0); 462 BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages)); 463 464 static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, ··· 525 &attr_current_kb.attr, 526 &attr_low_kb.attr, 527 &attr_high_kb.attr, 528 - &attr_hard_limit_kb.attr, 529 &attr_driver_kb.attr, 530 NULL 531 };
··· 66 /* We aim for 'current allocation' == 'target allocation'. */ 67 unsigned long current_pages; 68 unsigned long target_pages; 69 /* 70 * Drivers may alter the memory reservation independently, but they 71 * must inform the balloon driver so we avoid hitting the hard limit. ··· 136 list_add(&page->lru, &ballooned_pages); 137 balloon_stats.balloon_low++; 138 } 139 + 140 + totalram_pages--; 141 } 142 143 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ ··· 155 } 156 else 157 balloon_stats.balloon_low--; 158 + 159 + totalram_pages++; 160 161 return page; 162 } ··· 181 182 static unsigned long current_target(void) 183 { 184 + unsigned long target = balloon_stats.target_pages; 185 186 target = min(target, 187 balloon_stats.current_pages + ··· 217 set_xen_guest_handle(reservation.extent_start, frame_list); 218 reservation.nr_extents = nr_pages; 219 rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); 220 + if (rc < 0) 221 goto out; 222 223 + for (i = 0; i < rc; i++) { 224 page = balloon_retrieve(); 225 BUG_ON(page == NULL); 226 ··· 259 __free_page(page); 260 } 261 262 + balloon_stats.current_pages += rc; 263 264 out: 265 spin_unlock_irqrestore(&balloon_lock, flags); 266 267 + return rc < 0 ? rc : rc != nr_pages; 268 } 269 270 static int decrease_reservation(unsigned long nr_pages) ··· 323 BUG_ON(ret != nr_pages); 324 325 balloon_stats.current_pages -= nr_pages; 326 327 spin_unlock_irqrestore(&balloon_lock, flags); 328 ··· 367 static void balloon_set_new_target(unsigned long target) 368 { 369 /* No need for lock. Not read-modify-write updates. */ 370 balloon_stats.target_pages = target; 371 schedule_work(&balloon_worker); 372 } ··· 422 pr_info("xen_balloon: Initialising balloon driver.\n"); 423 424 balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn); 425 balloon_stats.target_pages = balloon_stats.current_pages; 426 balloon_stats.balloon_low = 0; 427 balloon_stats.balloon_high = 0; 428 balloon_stats.driver_pages = 0UL; 429 430 init_timer(&balloon_timer); 431 balloon_timer.data = 0; ··· 472 BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages)); 473 BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low)); 474 BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high)); 475 BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages)); 476 477 static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, ··· 544 &attr_current_kb.attr, 545 &attr_low_kb.attr, 546 &attr_high_kb.attr, 547 &attr_driver_kb.attr, 548 NULL 549 };
+3
drivers/xen/events.c
··· 474 bind_evtchn_to_cpu(evtchn, 0); 475 476 evtchn_to_irq[evtchn] = -1; 477 irq_info[irq] = mk_unbound_info(); 478 479 dynamic_irq_cleanup(irq);
··· 474 bind_evtchn_to_cpu(evtchn, 0); 475 476 evtchn_to_irq[evtchn] = -1; 477 + } 478 + 479 + if (irq_info[irq].type != IRQT_UNBOUND) { 480 irq_info[irq] = mk_unbound_info(); 481 482 dynamic_irq_cleanup(irq);
+26 -15
drivers/xen/manage.c
··· 43 if (err) { 44 printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n", 45 err); 46 - dpm_resume_noirq(PMSG_RESUME); 47 return err; 48 } 49 ··· 68 } 69 70 sysdev_resume(); 71 - dpm_resume_noirq(PMSG_RESUME); 72 73 return 0; 74 } ··· 79 80 shutting_down = SHUTDOWN_SUSPEND; 81 82 #ifdef CONFIG_PREEMPT 83 /* If the kernel is preemptible, we need to freeze all the processes 84 to prevent them from being in the middle of a pagetable update ··· 92 err = freeze_processes(); 93 if (err) { 94 printk(KERN_ERR "xen suspend: freeze failed %d\n", err); 95 - return; 96 } 97 #endif 98 99 err = dpm_suspend_start(PMSG_SUSPEND); 100 if (err) { 101 printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err); 102 - goto out; 103 } 104 105 printk(KERN_DEBUG "suspending xenstore...\n"); 106 xs_suspend(); 107 108 - err = dpm_suspend_noirq(PMSG_SUSPEND); 109 - if (err) { 110 - printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err); 111 - goto resume_devices; 112 - } 113 - 114 err = stop_machine(xen_suspend, &cancelled, cpumask_of(0)); 115 if (err) { 116 printk(KERN_ERR "failed to start xen_suspend: %d\n", err); 117 - goto out; 118 } 119 120 if (!cancelled) { ··· 126 } else 127 xs_suspend_cancel(); 128 129 - dpm_resume_noirq(PMSG_RESUME); 130 - 131 - resume_devices: 132 dpm_resume_end(PMSG_RESUME); 133 134 /* Make sure timer events get retriggered on all CPUs */ 135 clock_was_set(); 136 - out: 137 #ifdef CONFIG_PREEMPT 138 thaw_processes(); 139 #endif 140 shutting_down = SHUTDOWN_INVALID; 141 } 142 #endif /* CONFIG_PM_SLEEP */
··· 43 if (err) { 44 printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n", 45 err); 46 return err; 47 } 48 ··· 69 } 70 71 sysdev_resume(); 72 73 return 0; 74 } ··· 81 82 shutting_down = SHUTDOWN_SUSPEND; 83 84 + err = stop_machine_create(); 85 + if (err) { 86 + printk(KERN_ERR "xen suspend: failed to setup stop_machine %d\n", err); 87 + goto out; 88 + } 89 + 90 #ifdef CONFIG_PREEMPT 91 /* If the kernel is preemptible, we need to freeze all the processes 92 to prevent them from being in the middle of a pagetable update ··· 88 err = freeze_processes(); 89 if (err) { 90 printk(KERN_ERR "xen suspend: freeze failed %d\n", err); 91 + goto out_destroy_sm; 92 } 93 #endif 94 95 err = dpm_suspend_start(PMSG_SUSPEND); 96 if (err) { 97 printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err); 98 + goto out_thaw; 99 + } 100 + 101 + err = dpm_suspend_noirq(PMSG_SUSPEND); 102 + if (err) { 103 + printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err); 104 + goto out_resume; 105 } 106 107 printk(KERN_DEBUG "suspending xenstore...\n"); 108 xs_suspend(); 109 110 err = stop_machine(xen_suspend, &cancelled, cpumask_of(0)); 111 + 112 + dpm_resume_noirq(PMSG_RESUME); 113 + 114 if (err) { 115 printk(KERN_ERR "failed to start xen_suspend: %d\n", err); 116 + cancelled = 1; 117 } 118 119 if (!cancelled) { ··· 119 } else 120 xs_suspend_cancel(); 121 122 + out_resume: 123 dpm_resume_end(PMSG_RESUME); 124 125 /* Make sure timer events get retriggered on all CPUs */ 126 clock_was_set(); 127 + 128 + out_thaw: 129 #ifdef CONFIG_PREEMPT 130 thaw_processes(); 131 + 132 + out_destroy_sm: 133 #endif 134 + stop_machine_destroy(); 135 + 136 + out: 137 shutting_down = SHUTDOWN_INVALID; 138 } 139 #endif /* CONFIG_PM_SLEEP */
+32 -16
drivers/xen/xenbus/xenbus_probe.c
··· 454 { 455 return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); 456 } 457 - DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); 458 459 static ssize_t xendev_show_devtype(struct device *dev, 460 struct device_attribute *attr, char *buf) 461 { 462 return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); 463 } 464 - DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); 465 466 static ssize_t xendev_show_modalias(struct device *dev, 467 struct device_attribute *attr, char *buf) 468 { 469 return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); 470 } 471 - DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); 472 473 int xenbus_probe_node(struct xen_bus_type *bus, 474 const char *type, ··· 843 844 MODULE_LICENSE("GPL"); 845 846 - static int is_disconnected_device(struct device *dev, void *data) 847 { 848 struct xenbus_device *xendev = to_xenbus_device(dev); 849 struct device_driver *drv = data; ··· 861 return 0; 862 863 xendrv = to_xenbus_driver(dev->driver); 864 - return (xendev->state != XenbusStateConnected || 865 - (xendrv->is_ready && !xendrv->is_ready(xendev))); 866 } 867 868 - static int exists_disconnected_device(struct device_driver *drv) 869 { 870 return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, 871 - is_disconnected_device); 872 } 873 874 static int print_device_status(struct device *dev, void *data) ··· 885 /* Information only: is this too noisy? */ 886 printk(KERN_INFO "XENBUS: Device with no driver: %s\n", 887 xendev->nodename); 888 - } else if (xendev->state != XenbusStateConnected) { 889 printk(KERN_WARNING "XENBUS: Timeout connecting " 890 - "to device: %s (state %d)\n", 891 - xendev->nodename, xendev->state); 892 } 893 894 return 0; ··· 901 static int ready_to_wait_for_devices; 902 903 /* 904 - * On a 10 second timeout, wait for all devices currently configured. We need 905 * to do this to guarantee that the filesystems and / or network devices 906 * needed for boot are available, before we can allow the boot to proceed. 907 * ··· 916 */ 917 static void wait_for_devices(struct xenbus_driver *xendrv) 918 { 919 - unsigned long timeout = jiffies + 10*HZ; 920 struct device_driver *drv = xendrv ? &xendrv->driver : NULL; 921 922 if (!ready_to_wait_for_devices || !xen_domain()) 923 return; 924 925 - while (exists_disconnected_device(drv)) { 926 - if (time_after(jiffies, timeout)) 927 - break; 928 schedule_timeout_interruptible(HZ/10); 929 } 930 931 bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, 932 print_device_status);
··· 454 { 455 return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); 456 } 457 + static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); 458 459 static ssize_t xendev_show_devtype(struct device *dev, 460 struct device_attribute *attr, char *buf) 461 { 462 return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); 463 } 464 + static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); 465 466 static ssize_t xendev_show_modalias(struct device *dev, 467 struct device_attribute *attr, char *buf) 468 { 469 return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); 470 } 471 + static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); 472 473 int xenbus_probe_node(struct xen_bus_type *bus, 474 const char *type, ··· 843 844 MODULE_LICENSE("GPL"); 845 846 + static int is_device_connecting(struct device *dev, void *data) 847 { 848 struct xenbus_device *xendev = to_xenbus_device(dev); 849 struct device_driver *drv = data; ··· 861 return 0; 862 863 xendrv = to_xenbus_driver(dev->driver); 864 + return (xendev->state < XenbusStateConnected || 865 + (xendev->state == XenbusStateConnected && 866 + xendrv->is_ready && !xendrv->is_ready(xendev))); 867 } 868 869 + static int exists_connecting_device(struct device_driver *drv) 870 { 871 return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, 872 + is_device_connecting); 873 } 874 875 static int print_device_status(struct device *dev, void *data) ··· 884 /* Information only: is this too noisy? */ 885 printk(KERN_INFO "XENBUS: Device with no driver: %s\n", 886 xendev->nodename); 887 + } else if (xendev->state < XenbusStateConnected) { 888 + enum xenbus_state rstate = XenbusStateUnknown; 889 + if (xendev->otherend) 890 + rstate = xenbus_read_driver_state(xendev->otherend); 891 printk(KERN_WARNING "XENBUS: Timeout connecting " 892 + "to device: %s (local state %d, remote state %d)\n", 893 + xendev->nodename, xendev->state, rstate); 894 } 895 896 return 0; ··· 897 static int ready_to_wait_for_devices; 898 899 /* 900 + * On a 5-minute timeout, wait for all devices currently configured. We need 901 * to do this to guarantee that the filesystems and / or network devices 902 * needed for boot are available, before we can allow the boot to proceed. 903 * ··· 912 */ 913 static void wait_for_devices(struct xenbus_driver *xendrv) 914 { 915 + unsigned long start = jiffies; 916 struct device_driver *drv = xendrv ? &xendrv->driver : NULL; 917 + unsigned int seconds_waited = 0; 918 919 if (!ready_to_wait_for_devices || !xen_domain()) 920 return; 921 922 + while (exists_connecting_device(drv)) { 923 + if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { 924 + if (!seconds_waited) 925 + printk(KERN_WARNING "XENBUS: Waiting for " 926 + "devices to initialise: "); 927 + seconds_waited += 5; 928 + printk("%us...", 300 - seconds_waited); 929 + if (seconds_waited == 300) 930 + break; 931 + } 932 + 933 schedule_timeout_interruptible(HZ/10); 934 } 935 + 936 + if (seconds_waited) 937 + printk("\n"); 938 939 bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, 940 print_device_status);