Merge branch 'syscore' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6

* 'syscore' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6:
Introduce ARCH_NO_SYSDEV_OPS config option (v2)
cpufreq: Use syscore_ops for boot CPU suspend/resume (v2)
KVM: Use syscore_ops instead of sysdev class and sysdev
PCI / Intel IOMMU: Use syscore_ops instead of sysdev class and sysdev
timekeeping: Use syscore_ops instead of sysdev class and sysdev
x86: Use syscore_ops instead of sysdev classes and sysdevs

+211 -356
+1
arch/x86/Kconfig
··· 71 71 select GENERIC_IRQ_SHOW 72 72 select IRQ_FORCED_THREADING 73 73 select USE_GENERIC_SMP_HELPERS if SMP 74 + select ARCH_NO_SYSDEV_OPS 74 75 75 76 config INSTRUCTION_DECODER 76 77 def_bool (KPROBES || PERF_EVENTS)
+6 -20
arch/x86/kernel/amd_iommu_init.c
··· 21 21 #include <linux/acpi.h> 22 22 #include <linux/list.h> 23 23 #include <linux/slab.h> 24 - #include <linux/sysdev.h> 24 + #include <linux/syscore_ops.h> 25 25 #include <linux/interrupt.h> 26 26 #include <linux/msi.h> 27 27 #include <asm/pci-direct.h> ··· 1260 1260 * disable suspend until real resume implemented 1261 1261 */ 1262 1262 1263 - static int amd_iommu_resume(struct sys_device *dev) 1263 + static void amd_iommu_resume(void) 1264 1264 { 1265 1265 struct amd_iommu *iommu; 1266 1266 ··· 1276 1276 */ 1277 1277 amd_iommu_flush_all_devices(); 1278 1278 amd_iommu_flush_all_domains(); 1279 - 1280 - return 0; 1281 1279 } 1282 1280 1283 - static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state) 1281 + static int amd_iommu_suspend(void) 1284 1282 { 1285 1283 /* disable IOMMUs to go out of the way for BIOS */ 1286 1284 disable_iommus(); ··· 1286 1288 return 0; 1287 1289 } 1288 1290 1289 - static struct sysdev_class amd_iommu_sysdev_class = { 1290 - .name = "amd_iommu", 1291 + static struct syscore_ops amd_iommu_syscore_ops = { 1291 1292 .suspend = amd_iommu_suspend, 1292 1293 .resume = amd_iommu_resume, 1293 - }; 1294 - 1295 - static struct sys_device device_amd_iommu = { 1296 - .id = 0, 1297 - .cls = &amd_iommu_sysdev_class, 1298 1294 }; 1299 1295 1300 1296 /* ··· 1407 1415 goto free; 1408 1416 } 1409 1417 1410 - ret = sysdev_class_register(&amd_iommu_sysdev_class); 1411 - if (ret) 1412 - goto free; 1413 - 1414 - ret = sysdev_register(&device_amd_iommu); 1415 - if (ret) 1416 - goto free; 1417 - 1418 1418 ret = amd_iommu_init_devices(); 1419 1419 if (ret) 1420 1420 goto free; ··· 1424 1440 amd_iommu_init_api(); 1425 1441 1426 1442 amd_iommu_init_notifier(); 1443 + 1444 + register_syscore_ops(&amd_iommu_syscore_ops); 1427 1445 1428 1446 if (iommu_pass_through) 1429 1447 goto out;
+9 -24
arch/x86/kernel/apic/apic.c
··· 24 24 #include <linux/ftrace.h> 25 25 #include <linux/ioport.h> 26 26 #include <linux/module.h> 27 - #include <linux/sysdev.h> 27 + #include <linux/syscore_ops.h> 28 28 #include <linux/delay.h> 29 29 #include <linux/timex.h> 30 30 #include <linux/dmar.h> ··· 2046 2046 unsigned int apic_thmr; 2047 2047 } apic_pm_state; 2048 2048 2049 - static int lapic_suspend(struct sys_device *dev, pm_message_t state) 2049 + static int lapic_suspend(void) 2050 2050 { 2051 2051 unsigned long flags; 2052 2052 int maxlvt; ··· 2084 2084 return 0; 2085 2085 } 2086 2086 2087 - static int lapic_resume(struct sys_device *dev) 2087 + static void lapic_resume(void) 2088 2088 { 2089 2089 unsigned int l, h; 2090 2090 unsigned long flags; 2091 - int maxlvt; 2092 - int ret = 0; 2091 + int maxlvt, ret; 2093 2092 struct IO_APIC_route_entry **ioapic_entries = NULL; 2094 2093 2095 2094 if (!apic_pm_state.active) 2096 - return 0; 2095 + return; 2097 2096 2098 2097 local_irq_save(flags); 2099 2098 if (intr_remapping_enabled) { 2100 2099 ioapic_entries = alloc_ioapic_entries(); 2101 2100 if (!ioapic_entries) { 2102 2101 WARN(1, "Alloc ioapic_entries in lapic resume failed."); 2103 - ret = -ENOMEM; 2104 2102 goto restore; 2105 2103 } 2106 2104 ··· 2160 2162 } 2161 2163 restore: 2162 2164 local_irq_restore(flags); 2163 - 2164 - return ret; 2165 2165 } 2166 2166 2167 2167 /* ··· 2167 2171 * are needed on every CPU up until machine_halt/restart/poweroff. 2168 2172 */ 2169 2173 2170 - static struct sysdev_class lapic_sysclass = { 2171 - .name = "lapic", 2174 + static struct syscore_ops lapic_syscore_ops = { 2172 2175 .resume = lapic_resume, 2173 2176 .suspend = lapic_suspend, 2174 - }; 2175 - 2176 - static struct sys_device device_lapic = { 2177 - .id = 0, 2178 - .cls = &lapic_sysclass, 2179 2177 }; 2180 2178 2181 2179 static void __cpuinit apic_pm_activate(void) ··· 2179 2189 2180 2190 static int __init init_lapic_sysfs(void) 2181 2191 { 2182 - int error; 2183 - 2184 - if (!cpu_has_apic) 2185 - return 0; 2186 2192 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */ 2193 + if (cpu_has_apic) 2194 + register_syscore_ops(&lapic_syscore_ops); 2187 2195 2188 - error = sysdev_class_register(&lapic_sysclass); 2189 - if (!error) 2190 - error = sysdev_register(&device_lapic); 2191 - return error; 2196 + return 0; 2192 2197 } 2193 2198 2194 2199 /* local apic needs to resume before other devices access its registers. */
+47 -52
arch/x86/kernel/apic/io_apic.c
··· 30 30 #include <linux/compiler.h> 31 31 #include <linux/acpi.h> 32 32 #include <linux/module.h> 33 - #include <linux/sysdev.h> 33 + #include <linux/syscore_ops.h> 34 34 #include <linux/msi.h> 35 35 #include <linux/htirq.h> 36 36 #include <linux/freezer.h> ··· 2918 2918 2919 2919 late_initcall(io_apic_bug_finalize); 2920 2920 2921 - struct sysfs_ioapic_data { 2922 - struct sys_device dev; 2923 - struct IO_APIC_route_entry entry[0]; 2924 - }; 2925 - static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS]; 2921 + static struct IO_APIC_route_entry *ioapic_saved_data[MAX_IO_APICS]; 2926 2922 2927 - static int ioapic_suspend(struct sys_device *dev, pm_message_t state) 2923 + static void suspend_ioapic(int ioapic_id) 2928 2924 { 2929 - struct IO_APIC_route_entry *entry; 2930 - struct sysfs_ioapic_data *data; 2925 + struct IO_APIC_route_entry *saved_data = ioapic_saved_data[ioapic_id]; 2931 2926 int i; 2932 2927 2933 - data = container_of(dev, struct sysfs_ioapic_data, dev); 2934 - entry = data->entry; 2935 - for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) 2936 - *entry = ioapic_read_entry(dev->id, i); 2928 + if (!saved_data) 2929 + return; 2930 + 2931 + for (i = 0; i < nr_ioapic_registers[ioapic_id]; i++) 2932 + saved_data[i] = ioapic_read_entry(ioapic_id, i); 2933 + } 2934 + 2935 + static int ioapic_suspend(void) 2936 + { 2937 + int ioapic_id; 2938 + 2939 + for (ioapic_id = 0; ioapic_id < nr_ioapics; ioapic_id++) 2940 + suspend_ioapic(ioapic_id); 2937 2941 2938 2942 return 0; 2939 2943 } 2940 2944 2941 - static int ioapic_resume(struct sys_device *dev) 2945 + static void resume_ioapic(int ioapic_id) 2942 2946 { 2943 - struct IO_APIC_route_entry *entry; 2944 - struct sysfs_ioapic_data *data; 2947 + struct IO_APIC_route_entry *saved_data = ioapic_saved_data[ioapic_id]; 2945 2948 unsigned long flags; 2946 2949 union IO_APIC_reg_00 reg_00; 2947 2950 int i; 2948 2951 2949 - data = container_of(dev, struct sysfs_ioapic_data, dev); 2950 - entry = data->entry; 2952 + if (!saved_data) 2953 + return; 2951 2954 2952 2955 raw_spin_lock_irqsave(&ioapic_lock, flags); 2953 - reg_00.raw = io_apic_read(dev->id, 0); 2954 - if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) { 2955 - reg_00.bits.ID = mp_ioapics[dev->id].apicid; 2956 - io_apic_write(dev->id, 0, reg_00.raw); 2956 + reg_00.raw = io_apic_read(ioapic_id, 0); 2957 + if (reg_00.bits.ID != mp_ioapics[ioapic_id].apicid) { 2958 + reg_00.bits.ID = mp_ioapics[ioapic_id].apicid; 2959 + io_apic_write(ioapic_id, 0, reg_00.raw); 2957 2960 } 2958 2961 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2959 - for (i = 0; i < nr_ioapic_registers[dev->id]; i++) 2960 - ioapic_write_entry(dev->id, i, entry[i]); 2961 - 2962 - return 0; 2962 + for (i = 0; i < nr_ioapic_registers[ioapic_id]; i++) 2963 + ioapic_write_entry(ioapic_id, i, saved_data[i]); 2963 2964 } 2964 2965 2965 - static struct sysdev_class ioapic_sysdev_class = { 2966 - .name = "ioapic", 2966 + static void ioapic_resume(void) 2967 + { 2968 + int ioapic_id; 2969 + 2970 + for (ioapic_id = nr_ioapics - 1; ioapic_id >= 0; ioapic_id--) 2971 + resume_ioapic(ioapic_id); 2972 + } 2973 + 2974 + static struct syscore_ops ioapic_syscore_ops = { 2967 2975 .suspend = ioapic_suspend, 2968 2976 .resume = ioapic_resume, 2969 2977 }; 2970 2978 2971 - static int __init ioapic_init_sysfs(void) 2979 + static int __init ioapic_init_ops(void) 2972 2980 { 2973 - struct sys_device * dev; 2974 - int i, size, error; 2981 + int i; 2975 2982 2976 - error = sysdev_class_register(&ioapic_sysdev_class); 2977 - if (error) 2978 - return error; 2983 + for (i = 0; i < nr_ioapics; i++) { 2984 + unsigned int size; 2979 2985 2980 - for (i = 0; i < nr_ioapics; i++ ) { 2981 - size = sizeof(struct sys_device) + nr_ioapic_registers[i] 2986 + size = nr_ioapic_registers[i] 2982 2987 * sizeof(struct IO_APIC_route_entry); 2983 - mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL); 2984 - if (!mp_ioapic_data[i]) { 2985 - printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); 2986 - continue; 2987 - } 2988 - dev = &mp_ioapic_data[i]->dev; 2989 - dev->id = i; 2990 - dev->cls = &ioapic_sysdev_class; 2991 - error = sysdev_register(dev); 2992 - if (error) { 2993 - kfree(mp_ioapic_data[i]); 2994 - mp_ioapic_data[i] = NULL; 2995 - printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); 2996 - continue; 2997 - } 2988 + ioapic_saved_data[i] = kzalloc(size, GFP_KERNEL); 2989 + if (!ioapic_saved_data[i]) 2990 + pr_err("IOAPIC %d: suspend/resume impossible!\n", i); 2998 2991 } 2992 + 2993 + register_syscore_ops(&ioapic_syscore_ops); 2999 2994 3000 2995 return 0; 3001 2996 } 3002 2997 3003 - device_initcall(ioapic_init_sysfs); 2998 + device_initcall(ioapic_init_ops); 3004 2999 3005 3000 /* 3006 3001 * Dynamic irq allocate and deallocation
+12 -9
arch/x86/kernel/cpu/mcheck/mce.c
··· 21 21 #include <linux/percpu.h> 22 22 #include <linux/string.h> 23 23 #include <linux/sysdev.h> 24 + #include <linux/syscore_ops.h> 24 25 #include <linux/delay.h> 25 26 #include <linux/ctype.h> 26 27 #include <linux/sched.h> ··· 1750 1749 return 0; 1751 1750 } 1752 1751 1753 - static int mce_suspend(struct sys_device *dev, pm_message_t state) 1752 + static int mce_suspend(void) 1754 1753 { 1755 1754 return mce_disable_error_reporting(); 1756 1755 } 1757 1756 1758 - static int mce_shutdown(struct sys_device *dev) 1757 + static void mce_shutdown(void) 1759 1758 { 1760 - return mce_disable_error_reporting(); 1759 + mce_disable_error_reporting(); 1761 1760 } 1762 1761 1763 1762 /* ··· 1765 1764 * Only one CPU is active at this time, the others get re-added later using 1766 1765 * CPU hotplug: 1767 1766 */ 1768 - static int mce_resume(struct sys_device *dev) 1767 + static void mce_resume(void) 1769 1768 { 1770 1769 __mcheck_cpu_init_generic(); 1771 1770 __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); 1772 - 1773 - return 0; 1774 1771 } 1772 + 1773 + static struct syscore_ops mce_syscore_ops = { 1774 + .suspend = mce_suspend, 1775 + .shutdown = mce_shutdown, 1776 + .resume = mce_resume, 1777 + }; 1775 1778 1776 1779 static void mce_cpu_restart(void *data) 1777 1780 { ··· 1813 1808 } 1814 1809 1815 1810 static struct sysdev_class mce_sysclass = { 1816 - .suspend = mce_suspend, 1817 - .shutdown = mce_shutdown, 1818 - .resume = mce_resume, 1819 1811 .name = "machinecheck", 1820 1812 }; 1821 1813 ··· 2141 2139 return err; 2142 2140 } 2143 2141 2142 + register_syscore_ops(&mce_syscore_ops); 2144 2143 register_hotcpu_notifier(&mce_cpu_notifier); 2145 2144 misc_register(&mce_log_device); 2146 2145
+5 -5
arch/x86/kernel/cpu/mtrr/main.c
··· 45 45 #include <linux/cpu.h> 46 46 #include <linux/pci.h> 47 47 #include <linux/smp.h> 48 + #include <linux/syscore_ops.h> 48 49 49 50 #include <asm/processor.h> 50 51 #include <asm/e820.h> ··· 631 630 632 631 static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; 633 632 634 - static int mtrr_save(struct sys_device *sysdev, pm_message_t state) 633 + static int mtrr_save(void) 635 634 { 636 635 int i; 637 636 ··· 643 642 return 0; 644 643 } 645 644 646 - static int mtrr_restore(struct sys_device *sysdev) 645 + static void mtrr_restore(void) 647 646 { 648 647 int i; 649 648 ··· 654 653 mtrr_value[i].ltype); 655 654 } 656 655 } 657 - return 0; 658 656 } 659 657 660 658 661 659 662 - static struct sysdev_driver mtrr_sysdev_driver = { 660 + static struct syscore_ops mtrr_syscore_ops = { 663 661 .suspend = mtrr_save, 664 662 .resume = mtrr_restore, 665 663 }; ··· 839 839 * TBD: is there any system with such CPU which supports 840 840 * suspend/resume? If no, we should remove the code. 841 841 */ 842 - sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver); 842 + register_syscore_ops(&mtrr_syscore_ops); 843 843 844 844 return 0; 845 845 }
+7 -23
arch/x86/kernel/i8237.c
··· 10 10 */ 11 11 12 12 #include <linux/init.h> 13 - #include <linux/sysdev.h> 13 + #include <linux/syscore_ops.h> 14 14 15 15 #include <asm/dma.h> 16 16 ··· 21 21 * in asm/dma.h. 22 22 */ 23 23 24 - static int i8237A_resume(struct sys_device *dev) 24 + static void i8237A_resume(void) 25 25 { 26 26 unsigned long flags; 27 27 int i; ··· 41 41 enable_dma(4); 42 42 43 43 release_dma_lock(flags); 44 - 45 - return 0; 46 44 } 47 45 48 - static int i8237A_suspend(struct sys_device *dev, pm_message_t state) 49 - { 50 - return 0; 51 - } 52 - 53 - static struct sysdev_class i8237_sysdev_class = { 54 - .name = "i8237", 55 - .suspend = i8237A_suspend, 46 + static struct syscore_ops i8237_syscore_ops = { 56 47 .resume = i8237A_resume, 57 48 }; 58 49 59 - static struct sys_device device_i8237A = { 60 - .id = 0, 61 - .cls = &i8237_sysdev_class, 62 - }; 63 - 64 - static int __init i8237A_init_sysfs(void) 50 + static int __init i8237A_init_ops(void) 65 51 { 66 - int error = sysdev_class_register(&i8237_sysdev_class); 67 - if (!error) 68 - error = sysdev_register(&device_i8237A); 69 - return error; 52 + register_syscore_ops(&i8237_syscore_ops); 53 + return 0; 70 54 } 71 - device_initcall(i8237A_init_sysfs); 55 + device_initcall(i8237A_init_ops);
+10 -23
arch/x86/kernel/i8259.c
··· 8 8 #include <linux/random.h> 9 9 #include <linux/init.h> 10 10 #include <linux/kernel_stat.h> 11 - #include <linux/sysdev.h> 11 + #include <linux/syscore_ops.h> 12 12 #include <linux/bitops.h> 13 13 #include <linux/acpi.h> 14 14 #include <linux/io.h> ··· 245 245 trigger[1] = inb(0x4d1) & 0xDE; 246 246 } 247 247 248 - static int i8259A_resume(struct sys_device *dev) 248 + static void i8259A_resume(void) 249 249 { 250 250 init_8259A(i8259A_auto_eoi); 251 251 restore_ELCR(irq_trigger); 252 - return 0; 253 252 } 254 253 255 - static int i8259A_suspend(struct sys_device *dev, pm_message_t state) 254 + static int i8259A_suspend(void) 256 255 { 257 256 save_ELCR(irq_trigger); 258 257 return 0; 259 258 } 260 259 261 - static int i8259A_shutdown(struct sys_device *dev) 260 + static void i8259A_shutdown(void) 262 261 { 263 262 /* Put the i8259A into a quiescent state that 264 263 * the kernel initialization code can get it ··· 265 266 */ 266 267 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 267 268 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ 268 - return 0; 269 269 } 270 270 271 - static struct sysdev_class i8259_sysdev_class = { 272 - .name = "i8259", 271 + static struct syscore_ops i8259_syscore_ops = { 273 272 .suspend = i8259A_suspend, 274 273 .resume = i8259A_resume, 275 274 .shutdown = i8259A_shutdown, 276 - }; 277 - 278 - static struct sys_device device_i8259A = { 279 - .id = 0, 280 - .cls = &i8259_sysdev_class, 281 275 }; 282 276 283 277 static void mask_8259A(void) ··· 391 399 392 400 struct legacy_pic *legacy_pic = &default_legacy_pic; 393 401 394 - static int __init i8259A_init_sysfs(void) 402 + static int __init i8259A_init_ops(void) 395 403 { 396 - int error; 404 + if (legacy_pic == &default_legacy_pic) 405 + register_syscore_ops(&i8259_syscore_ops); 397 406 398 - if (legacy_pic != &default_legacy_pic) 399 - return 0; 400 - 401 - error = sysdev_class_register(&i8259_sysdev_class); 402 - if (!error) 403 - error = sysdev_register(&device_i8259A); 404 - return error; 407 + return 0; 405 408 } 406 409 407 - device_initcall(i8259A_init_sysfs); 410 + device_initcall(i8259A_init_ops);
+18 -24
arch/x86/kernel/microcode_core.c
··· 82 82 #include <linux/cpu.h> 83 83 #include <linux/fs.h> 84 84 #include <linux/mm.h> 85 + #include <linux/syscore_ops.h> 85 86 86 87 #include <asm/microcode.h> 87 88 #include <asm/processor.h> ··· 439 438 return 0; 440 439 } 441 440 442 - static int mc_sysdev_resume(struct sys_device *dev) 443 - { 444 - int cpu = dev->id; 445 - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 446 - 447 - if (!cpu_online(cpu)) 448 - return 0; 449 - 450 - /* 451 - * All non-bootup cpus are still disabled, 452 - * so only CPU 0 will apply ucode here. 453 - * 454 - * Moreover, there can be no concurrent 455 - * updates from any other places at this point. 456 - */ 457 - WARN_ON(cpu != 0); 458 - 459 - if (uci->valid && uci->mc) 460 - microcode_ops->apply_microcode(cpu); 461 - 462 - return 0; 463 - } 464 - 465 441 static struct sysdev_driver mc_sysdev_driver = { 466 442 .add = mc_sysdev_add, 467 443 .remove = mc_sysdev_remove, 468 - .resume = mc_sysdev_resume, 444 + }; 445 + 446 + /** 447 + * mc_bp_resume - Update boot CPU microcode during resume. 448 + */ 449 + static void mc_bp_resume(void) 450 + { 451 + int cpu = smp_processor_id(); 452 + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 453 + 454 + if (uci->valid && uci->mc) 455 + microcode_ops->apply_microcode(cpu); 456 + } 457 + 458 + static struct syscore_ops mc_syscore_ops = { 459 + .resume = mc_bp_resume, 469 460 }; 470 461 471 462 static __cpuinit int ··· 535 542 if (error) 536 543 return error; 537 544 545 + register_syscore_ops(&mc_syscore_ops); 538 546 register_hotcpu_notifier(&mc_cpu_notifier); 539 547 540 548 pr_info("Microcode Update Driver: v" MICROCODE_VERSION
+7 -25
arch/x86/kernel/pci-gart_64.c
··· 27 27 #include <linux/kdebug.h> 28 28 #include <linux/scatterlist.h> 29 29 #include <linux/iommu-helper.h> 30 - #include <linux/sysdev.h> 30 + #include <linux/syscore_ops.h> 31 31 #include <linux/io.h> 32 32 #include <linux/gfp.h> 33 33 #include <asm/atomic.h> ··· 589 589 aperture_alloc = aper_alloc; 590 590 } 591 591 592 - static void gart_fixup_northbridges(struct sys_device *dev) 592 + static void gart_fixup_northbridges(void) 593 593 { 594 594 int i; 595 595 ··· 613 613 } 614 614 } 615 615 616 - static int gart_resume(struct sys_device *dev) 616 + static void gart_resume(void) 617 617 { 618 618 pr_info("PCI-DMA: Resuming GART IOMMU\n"); 619 619 620 - gart_fixup_northbridges(dev); 620 + gart_fixup_northbridges(); 621 621 622 622 enable_gart_translations(); 623 - 624 - return 0; 625 623 } 626 624 627 - static int gart_suspend(struct sys_device *dev, pm_message_t state) 628 - { 629 - return 0; 630 - } 631 - 632 - static struct sysdev_class gart_sysdev_class = { 633 - .name = "gart", 634 - .suspend = gart_suspend, 625 + static struct syscore_ops gart_syscore_ops = { 635 626 .resume = gart_resume, 636 627 637 - }; 638 - 639 - static struct sys_device device_gart = { 640 - .cls = &gart_sysdev_class, 641 628 }; 642 629 643 630 /* ··· 637 650 unsigned aper_base, new_aper_base; 638 651 struct pci_dev *dev; 639 652 void *gatt; 640 - int i, error; 653 + int i; 641 654 642 655 pr_info("PCI-DMA: Disabling AGP.\n"); 643 656 ··· 672 685 673 686 agp_gatt_table = gatt; 674 687 675 - error = sysdev_class_register(&gart_sysdev_class); 676 - if (!error) 677 - error = sysdev_register(&device_gart); 678 - if (error) 679 - panic("Could not register gart_sysdev -- " 680 - "would corrupt data on next suspend"); 688 + register_syscore_ops(&gart_syscore_ops); 681 689 682 690 flush_gart(); 683 691
+12 -32
arch/x86/oprofile/nmi_int.c
··· 15 15 #include <linux/notifier.h> 16 16 #include <linux/smp.h> 17 17 #include <linux/oprofile.h> 18 - #include <linux/sysdev.h> 18 + #include <linux/syscore_ops.h> 19 19 #include <linux/slab.h> 20 20 #include <linux/moduleparam.h> 21 21 #include <linux/kdebug.h> ··· 536 536 537 537 #ifdef CONFIG_PM 538 538 539 - static int nmi_suspend(struct sys_device *dev, pm_message_t state) 539 + static int nmi_suspend(void) 540 540 { 541 541 /* Only one CPU left, just stop that one */ 542 542 if (nmi_enabled == 1) ··· 544 544 return 0; 545 545 } 546 546 547 - static int nmi_resume(struct sys_device *dev) 547 + static void nmi_resume(void) 548 548 { 549 549 if (nmi_enabled == 1) 550 550 nmi_cpu_start(NULL); 551 - return 0; 552 551 } 553 552 554 - static struct sysdev_class oprofile_sysclass = { 555 - .name = "oprofile", 553 + static struct syscore_ops oprofile_syscore_ops = { 556 554 .resume = nmi_resume, 557 555 .suspend = nmi_suspend, 558 556 }; 559 557 560 - static struct sys_device device_oprofile = { 561 - .id = 0, 562 - .cls = &oprofile_sysclass, 563 - }; 564 - 565 - static int __init init_sysfs(void) 558 + static void __init init_suspend_resume(void) 566 559 { 567 - int error; 568 - 569 - error = sysdev_class_register(&oprofile_sysclass); 570 - if (error) 571 - return error; 572 - 573 - error = sysdev_register(&device_oprofile); 574 - if (error) 575 - sysdev_class_unregister(&oprofile_sysclass); 576 - 577 - return error; 560 + register_syscore_ops(&oprofile_syscore_ops); 578 561 } 579 562 580 - static void exit_sysfs(void) 563 + static void exit_suspend_resume(void) 581 564 { 582 - sysdev_unregister(&device_oprofile); 583 - sysdev_class_unregister(&oprofile_sysclass); 565 + unregister_syscore_ops(&oprofile_syscore_ops); 584 566 } 585 567 586 568 #else 587 569 588 - static inline int init_sysfs(void) { return 0; } 589 - static inline void exit_sysfs(void) { } 570 + static inline void init_suspend_resume(void) { } 571 + static inline void exit_suspend_resume(void) { } 590 572 591 573 #endif /* CONFIG_PM */ 592 574 ··· 771 789 772 790 mux_init(ops); 773 791 774 - ret = init_sysfs(); 775 - if (ret) 776 - return ret; 792 + init_suspend_resume(); 777 793 778 794 printk(KERN_INFO "oprofile: using NMI interrupt.\n"); 779 795 return 0; ··· 779 799 780 800 void op_nmi_exit(void) 781 801 { 782 - exit_sysfs(); 802 + exit_suspend_resume(); 783 803 }
+7
drivers/base/Kconfig
··· 168 168 bool 169 169 default n 170 170 171 + config ARCH_NO_SYSDEV_OPS 172 + bool 173 + ---help--- 174 + To be selected by architectures that don't use sysdev class or 175 + sysdev driver power management (suspend/resume) and shutdown 176 + operations. 177 + 171 178 endmenu
+2 -1
drivers/base/sys.c
··· 329 329 } 330 330 331 331 332 - 332 + #ifndef CONFIG_ARCH_NO_SYSDEV_OPS 333 333 /** 334 334 * sysdev_shutdown - Shut down all system devices. 335 335 * ··· 524 524 return 0; 525 525 } 526 526 EXPORT_SYMBOL_GPL(sysdev_resume); 527 + #endif /* CONFIG_ARCH_NO_SYSDEV_OPS */ 527 528 528 529 int __init system_bus_init(void) 529 530 {
+26 -40
drivers/cpufreq/cpufreq.c
··· 28 28 #include <linux/cpu.h> 29 29 #include <linux/completion.h> 30 30 #include <linux/mutex.h> 31 + #include <linux/syscore_ops.h> 31 32 32 33 #include <trace/events/power.h> 33 34 ··· 1341 1340 } 1342 1341 EXPORT_SYMBOL(cpufreq_get); 1343 1342 1343 + static struct sysdev_driver cpufreq_sysdev_driver = { 1344 + .add = cpufreq_add_dev, 1345 + .remove = cpufreq_remove_dev, 1346 + }; 1347 + 1344 1348 1345 1349 /** 1346 - * cpufreq_suspend - let the low level driver prepare for suspend 1350 + * cpufreq_bp_suspend - Prepare the boot CPU for system suspend. 1351 + * 1352 + * This function is only executed for the boot processor. The other CPUs 1353 + * have been put offline by means of CPU hotplug. 1347 1354 */ 1348 - 1349 - static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) 1355 + static int cpufreq_bp_suspend(void) 1350 1356 { 1351 1357 int ret = 0; 1352 1358 1353 - int cpu = sysdev->id; 1359 + int cpu = smp_processor_id(); 1354 1360 struct cpufreq_policy *cpu_policy; 1355 1361 1356 1362 dprintk("suspending cpu %u\n", cpu); 1357 1363 1358 - if (!cpu_online(cpu)) 1359 - return 0; 1360 - 1361 - /* we may be lax here as interrupts are off. Nonetheless 1362 - * we need to grab the correct cpu policy, as to check 1363 - * whether we really run on this CPU. 1364 - */ 1365 - 1364 + /* If there's no policy for the boot CPU, we have nothing to do. */ 1366 1365 cpu_policy = cpufreq_cpu_get(cpu); 1367 1366 if (!cpu_policy) 1368 - return -EINVAL; 1369 - 1370 - /* only handle each CPU group once */ 1371 - if (unlikely(cpu_policy->cpu != cpu)) 1372 - goto out; 1367 + return 0; 1373 1368 1374 1369 if (cpufreq_driver->suspend) { 1375 1370 ret = cpufreq_driver->suspend(cpu_policy); ··· 1374 1377 "step on CPU %u\n", cpu_policy->cpu); 1375 1378 } 1376 1379 1377 - out: 1378 1380 cpufreq_cpu_put(cpu_policy); 1379 1381 return ret; 1380 1382 } 1381 1383 1382 1384 /** 1383 - * cpufreq_resume - restore proper CPU frequency handling after resume 1385 + * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU. 1384 1386 * 1385 1387 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) 1386 1388 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are ··· 1387 1391 * what we believe it to be. This is a bit later than when it 1388 1392 * should be, but nonethteless it's better than calling 1389 1393 * cpufreq_driver->get() here which might re-enable interrupts... 1394 + * 1395 + * This function is only executed for the boot CPU. The other CPUs have not 1396 + * been turned on yet. 1390 1397 */ 1391 - static int cpufreq_resume(struct sys_device *sysdev) 1398 + static void cpufreq_bp_resume(void) 1392 1399 { 1393 1400 int ret = 0; 1394 1401 1395 - int cpu = sysdev->id; 1402 + int cpu = smp_processor_id(); 1396 1403 struct cpufreq_policy *cpu_policy; 1397 1404 1398 1405 dprintk("resuming cpu %u\n", cpu); 1399 1406 1400 - if (!cpu_online(cpu)) 1401 - return 0; 1402 - 1403 - /* we may be lax here as interrupts are off. Nonetheless 1404 - * we need to grab the correct cpu policy, as to check 1405 - * whether we really run on this CPU. 1406 - */ 1407 - 1407 + /* If there's no policy for the boot CPU, we have nothing to do. */ 1408 1408 cpu_policy = cpufreq_cpu_get(cpu); 1409 1409 if (!cpu_policy) 1410 - return -EINVAL; 1411 - 1412 - /* only handle each CPU group once */ 1413 - if (unlikely(cpu_policy->cpu != cpu)) 1414 - goto fail; 1410 + return; 1415 1411 1416 1412 if (cpufreq_driver->resume) { 1417 1413 ret = cpufreq_driver->resume(cpu_policy); ··· 1418 1430 1419 1431 fail: 1420 1432 cpufreq_cpu_put(cpu_policy); 1421 - return ret; 1422 1433 } 1423 1434 1424 - static struct sysdev_driver cpufreq_sysdev_driver = { 1425 - .add = cpufreq_add_dev, 1426 - .remove = cpufreq_remove_dev, 1427 - .suspend = cpufreq_suspend, 1428 - .resume = cpufreq_resume, 1435 + static struct syscore_ops cpufreq_syscore_ops = { 1436 + .suspend = cpufreq_bp_suspend, 1437 + .resume = cpufreq_bp_resume, 1429 1438 }; 1430 1439 1431 1440 ··· 1987 2002 cpufreq_global_kobject = kobject_create_and_add("cpufreq", 1988 2003 &cpu_sysdev_class.kset.kobj); 1989 2004 BUG_ON(!cpufreq_global_kobject); 2005 + register_syscore_ops(&cpufreq_syscore_ops); 1990 2006 1991 2007 return 0; 1992 2008 }
+9 -29
drivers/pci/intel-iommu.c
··· 36 36 #include <linux/iova.h> 37 37 #include <linux/iommu.h> 38 38 #include <linux/intel-iommu.h> 39 - #include <linux/sysdev.h> 39 + #include <linux/syscore_ops.h> 40 40 #include <linux/tboot.h> 41 41 #include <linux/dmi.h> 42 42 #include <asm/cacheflush.h> ··· 3135 3135 } 3136 3136 } 3137 3137 3138 - static int iommu_suspend(struct sys_device *dev, pm_message_t state) 3138 + static int iommu_suspend(void) 3139 3139 { 3140 3140 struct dmar_drhd_unit *drhd; 3141 3141 struct intel_iommu *iommu = NULL; ··· 3175 3175 return -ENOMEM; 3176 3176 } 3177 3177 3178 - static int iommu_resume(struct sys_device *dev) 3178 + static void iommu_resume(void) 3179 3179 { 3180 3180 struct dmar_drhd_unit *drhd; 3181 3181 struct intel_iommu *iommu = NULL; ··· 3183 3183 3184 3184 if (init_iommu_hw()) { 3185 3185 WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); 3186 - return -EIO; 3186 + return; 3187 3187 } 3188 3188 3189 3189 for_each_active_iommu(iommu, drhd) { ··· 3204 3204 3205 3205 for_each_active_iommu(iommu, drhd) 3206 3206 kfree(iommu->iommu_state); 3207 - 3208 - return 0; 3209 3207 } 3210 3208 3211 - static struct sysdev_class iommu_sysclass = { 3212 - .name = "iommu", 3209 + static struct syscore_ops iommu_syscore_ops = { 3213 3210 .resume = iommu_resume, 3214 3211 .suspend = iommu_suspend, 3215 3212 }; 3216 3213 3217 - static struct sys_device device_iommu = { 3218 - .cls = &iommu_sysclass, 3219 - }; 3220 - 3221 - static int __init init_iommu_sysfs(void) 3214 + static void __init init_iommu_pm_ops(void) 3222 3215 { 3223 - int error; 3224 - 3225 - error = sysdev_class_register(&iommu_sysclass); 3226 - if (error) 3227 - return error; 3228 - 3229 - error = sysdev_register(&device_iommu); 3230 - if (error) 3231 - sysdev_class_unregister(&iommu_sysclass); 3232 - 3233 - return error; 3216 + register_syscore_ops(&iommu_syscore_ops); 3234 3217 } 3235 3218 3236 3219 #else 3237 - static int __init init_iommu_sysfs(void) 3238 - { 3239 - return 0; 3240 - } 3220 + static inline int init_iommu_pm_ops(void) { } 3241 3221 #endif /* CONFIG_PM */ 3242 3222 3243 3223 /* ··· 3300 3320 #endif 3301 3321 dma_ops = &intel_dma_ops; 3302 3322 3303 - init_iommu_sysfs(); 3323 + init_iommu_pm_ops(); 3304 3324 3305 3325 register_iommu(&intel_iommu_ops); 3306 3326
+4
include/linux/device.h
··· 633 633 /* drivers/base/power/shutdown.c */ 634 634 extern void device_shutdown(void); 635 635 636 + #ifndef CONFIG_ARCH_NO_SYSDEV_OPS 636 637 /* drivers/base/sys.c */ 637 638 extern void sysdev_shutdown(void); 639 + #else 640 + static inline void sysdev_shutdown(void) { } 641 + #endif 638 642 639 643 /* debugging and troubleshooting/diagnostic helpers. */ 640 644 extern const char *dev_driver_string(const struct device *dev);
+8 -2
include/linux/pm.h
··· 529 529 */ 530 530 531 531 #ifdef CONFIG_PM_SLEEP 532 - extern void device_pm_lock(void); 532 + #ifndef CONFIG_ARCH_NO_SYSDEV_OPS 533 + extern int sysdev_suspend(pm_message_t state); 533 534 extern int sysdev_resume(void); 535 + #else 536 + static inline int sysdev_suspend(pm_message_t state) { return 0; } 537 + static inline int sysdev_resume(void) { return 0; } 538 + #endif 539 + 540 + extern void device_pm_lock(void); 534 541 extern void dpm_resume_noirq(pm_message_t state); 535 542 extern void dpm_resume_end(pm_message_t state); 536 543 537 544 extern void device_pm_unlock(void); 538 - extern int sysdev_suspend(pm_message_t state); 539 545 extern int dpm_suspend_noirq(pm_message_t state); 540 546 extern int dpm_suspend_start(pm_message_t state); 541 547
+5 -2
include/linux/sysdev.h
··· 33 33 const char *name; 34 34 struct list_head drivers; 35 35 struct sysdev_class_attribute **attrs; 36 - 36 + struct kset kset; 37 + #ifndef CONFIG_ARCH_NO_SYSDEV_OPS 37 38 /* Default operations for these types of devices */ 38 39 int (*shutdown)(struct sys_device *); 39 40 int (*suspend)(struct sys_device *, pm_message_t state); 40 41 int (*resume)(struct sys_device *); 41 - struct kset kset; 42 + #endif 42 43 }; 43 44 44 45 struct sysdev_class_attribute { ··· 77 76 struct list_head entry; 78 77 int (*add)(struct sys_device *); 79 78 int (*remove)(struct sys_device *); 79 + #ifndef CONFIG_ARCH_NO_SYSDEV_OPS 80 80 int (*shutdown)(struct sys_device *); 81 81 int (*suspend)(struct sys_device *, pm_message_t state); 82 82 int (*resume)(struct sys_device *); 83 + #endif 83 84 }; 84 85 85 86
+8 -19
kernel/time/timekeeping.c
··· 14 14 #include <linux/init.h> 15 15 #include <linux/mm.h> 16 16 #include <linux/sched.h> 17 - #include <linux/sysdev.h> 17 + #include <linux/syscore_ops.h> 18 18 #include <linux/clocksource.h> 19 19 #include <linux/jiffies.h> 20 20 #include <linux/time.h> ··· 597 597 598 598 /** 599 599 * timekeeping_resume - Resumes the generic timekeeping subsystem. 600 - * @dev: unused 601 600 * 602 601 * This is for the generic clocksource timekeeping. 603 602 * xtime/wall_to_monotonic/jiffies/etc are 604 603 * still managed by arch specific suspend/resume code. 605 604 */ 606 - static int timekeeping_resume(struct sys_device *dev) 605 + static void timekeeping_resume(void) 607 606 { 608 607 unsigned long flags; 609 608 struct timespec ts; ··· 631 632 632 633 /* Resume hrtimers */ 633 634 hres_timers_resume(); 634 - 635 - return 0; 636 635 } 637 636 638 - static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) 637 + static int timekeeping_suspend(void) 639 638 { 640 639 unsigned long flags; 641 640 ··· 651 654 } 652 655 653 656 /* sysfs resume/suspend bits for timekeeping */ 654 - static struct sysdev_class timekeeping_sysclass = { 655 - .name = "timekeeping", 657 + static struct syscore_ops timekeeping_syscore_ops = { 656 658 .resume = timekeeping_resume, 657 659 .suspend = timekeeping_suspend, 658 660 }; 659 661 660 - static struct sys_device device_timer = { 661 - .id = 0, 662 - .cls = &timekeeping_sysclass, 663 - }; 664 - 665 - static int __init timekeeping_init_device(void) 662 + static int __init timekeeping_init_ops(void) 666 663 { 667 - int error = sysdev_class_register(&timekeeping_sysclass); 668 - if (!error) 669 - error = sysdev_register(&device_timer); 670 - return error; 664 + register_syscore_ops(&timekeeping_syscore_ops); 665 + return 0; 671 666 } 672 667 673 - device_initcall(timekeeping_init_device); 668 + device_initcall(timekeeping_init_ops); 674 669 675 670 /* 676 671 * If the error is already larger, we look ahead even further
+8 -26
virt/kvm/kvm_main.c
··· 30 30 #include <linux/debugfs.h> 31 31 #include <linux/highmem.h> 32 32 #include <linux/file.h> 33 - #include <linux/sysdev.h> 33 + #include <linux/syscore_ops.h> 34 34 #include <linux/cpu.h> 35 35 #include <linux/sched.h> 36 36 #include <linux/cpumask.h> ··· 2446 2446 debugfs_remove(kvm_debugfs_dir); 2447 2447 } 2448 2448 2449 - static int kvm_suspend(struct sys_device *dev, pm_message_t state) 2449 + static int kvm_suspend(void) 2450 2450 { 2451 2451 if (kvm_usage_count) 2452 2452 hardware_disable_nolock(NULL); 2453 2453 return 0; 2454 2454 } 2455 2455 2456 - static int kvm_resume(struct sys_device *dev) 2456 + static void kvm_resume(void) 2457 2457 { 2458 2458 if (kvm_usage_count) { 2459 2459 WARN_ON(raw_spin_is_locked(&kvm_lock)); 2460 2460 hardware_enable_nolock(NULL); 2461 2461 } 2462 - return 0; 2463 2462 } 2464 2463 2465 - static struct sysdev_class kvm_sysdev_class = { 2466 - .name = "kvm", 2464 + static struct syscore_ops kvm_syscore_ops = { 2467 2465 .suspend = kvm_suspend, 2468 2466 .resume = kvm_resume, 2469 - }; 2470 - 2471 - static struct sys_device kvm_sysdev = { 2472 - .id = 0, 2473 - .cls = &kvm_sysdev_class, 2474 2467 }; 2475 2468 2476 2469 struct page *bad_page; ··· 2549 2556 goto out_free_2; 2550 2557 register_reboot_notifier(&kvm_reboot_notifier); 2551 2558 2552 - r = sysdev_class_register(&kvm_sysdev_class); 2553 - if (r) 2554 - goto out_free_3; 2555 - 2556 - r = sysdev_register(&kvm_sysdev); 2557 - if (r) 2558 - goto out_free_4; 2559 - 2560 2559 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 2561 2560 if (!vcpu_align) 2562 2561 vcpu_align = __alignof__(struct kvm_vcpu); ··· 2556 2571 0, NULL); 2557 2572 if (!kvm_vcpu_cache) { 2558 2573 r = -ENOMEM; 2559 - goto out_free_5; 2574 + goto out_free_3; 2560 2575 } 2561 2576 2562 2577 r = kvm_async_pf_init(); ··· 2573 2588 goto out_unreg; 2574 2589 } 2575 2590 2591 + register_syscore_ops(&kvm_syscore_ops); 2592 + 2576 2593 kvm_preempt_ops.sched_in = kvm_sched_in; 2577 2594 kvm_preempt_ops.sched_out = kvm_sched_out; 2578 2595 ··· 2586 2599 kvm_async_pf_deinit(); 2587 2600 out_free: 2588 2601 kmem_cache_destroy(kvm_vcpu_cache); 2589 - out_free_5: 2590 - sysdev_unregister(&kvm_sysdev); 2591 - out_free_4: 2592 - sysdev_class_unregister(&kvm_sysdev_class); 2593 2602 out_free_3: 2594 2603 unregister_reboot_notifier(&kvm_reboot_notifier); 2595 2604 unregister_cpu_notifier(&kvm_cpu_notifier); ··· 2613 2630 misc_deregister(&kvm_dev); 2614 2631 kmem_cache_destroy(kvm_vcpu_cache); 2615 2632 kvm_async_pf_deinit(); 2616 - sysdev_unregister(&kvm_sysdev); 2617 - sysdev_class_unregister(&kvm_sysdev_class); 2633 + unregister_syscore_ops(&kvm_syscore_ops); 2618 2634 unregister_reboot_notifier(&kvm_reboot_notifier); 2619 2635 unregister_cpu_notifier(&kvm_cpu_notifier); 2620 2636 on_each_cpu(hardware_disable_nolock, NULL, 1);