x86: Use syscore_ops instead of sysdev classes and sysdevs

Some subsystems in the x86 tree need to carry out suspend/resume and
shutdown operations with one CPU on-line and interrupts disabled and
they define sysdev classes and sysdevs or sysdev drivers for this
purpose. This leads to unnecessarily complicated code and excessive
memory usage, so switch them to using struct syscore_ops objects for
this purpose instead.

Generally, there are three categories of subsystems that use
sysdevs for implementing PM operations: (1) subsystems whose
suspend/resume callbacks ignore their arguments entirely (the
majority), (2) subsystems whose suspend/resume callbacks use their
struct sys_device argument, but don't really need to do that,
because they can be implemented differently in an arguably simpler
way (io_apic.c), and (3) subsystems whose suspend/resume callbacks
use their struct sys_device argument, but the value of that argument
is always the same and could be ignored (microcode_core.c). In all
of these cases the subsystems in question may be readily converted to
using struct syscore_ops objects for power management and shutdown.

Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ingo Molnar <mingo@elte.hu>

+133 -237
+6 -20
arch/x86/kernel/amd_iommu_init.c
··· 21 21 #include <linux/acpi.h> 22 22 #include <linux/list.h> 23 23 #include <linux/slab.h> 24 - #include <linux/sysdev.h> 24 + #include <linux/syscore_ops.h> 25 25 #include <linux/interrupt.h> 26 26 #include <linux/msi.h> 27 27 #include <asm/pci-direct.h> ··· 1260 1260 * disable suspend until real resume implemented 1261 1261 */ 1262 1262 1263 - static int amd_iommu_resume(struct sys_device *dev) 1263 + static void amd_iommu_resume(void) 1264 1264 { 1265 1265 struct amd_iommu *iommu; 1266 1266 ··· 1276 1276 */ 1277 1277 amd_iommu_flush_all_devices(); 1278 1278 amd_iommu_flush_all_domains(); 1279 - 1280 - return 0; 1281 1279 } 1282 1280 1283 - static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state) 1281 + static int amd_iommu_suspend(void) 1284 1282 { 1285 1283 /* disable IOMMUs to go out of the way for BIOS */ 1286 1284 disable_iommus(); ··· 1286 1288 return 0; 1287 1289 } 1288 1290 1289 - static struct sysdev_class amd_iommu_sysdev_class = { 1290 - .name = "amd_iommu", 1291 + static struct syscore_ops amd_iommu_syscore_ops = { 1291 1292 .suspend = amd_iommu_suspend, 1292 1293 .resume = amd_iommu_resume, 1293 - }; 1294 - 1295 - static struct sys_device device_amd_iommu = { 1296 - .id = 0, 1297 - .cls = &amd_iommu_sysdev_class, 1298 1294 }; 1299 1295 1300 1296 /* ··· 1407 1415 goto free; 1408 1416 } 1409 1417 1410 - ret = sysdev_class_register(&amd_iommu_sysdev_class); 1411 - if (ret) 1412 - goto free; 1413 - 1414 - ret = sysdev_register(&device_amd_iommu); 1415 - if (ret) 1416 - goto free; 1417 - 1418 1418 ret = amd_iommu_init_devices(); 1419 1419 if (ret) 1420 1420 goto free; ··· 1424 1440 amd_iommu_init_api(); 1425 1441 1426 1442 amd_iommu_init_notifier(); 1443 + 1444 + register_syscore_ops(&amd_iommu_syscore_ops); 1427 1445 1428 1446 if (iommu_pass_through) 1429 1447 goto out;
+9 -24
arch/x86/kernel/apic/apic.c
··· 24 24 #include <linux/ftrace.h> 25 25 #include <linux/ioport.h> 26 26 #include <linux/module.h> 27 - #include <linux/sysdev.h> 27 + #include <linux/syscore_ops.h> 28 28 #include <linux/delay.h> 29 29 #include <linux/timex.h> 30 30 #include <linux/dmar.h> ··· 2046 2046 unsigned int apic_thmr; 2047 2047 } apic_pm_state; 2048 2048 2049 - static int lapic_suspend(struct sys_device *dev, pm_message_t state) 2049 + static int lapic_suspend(void) 2050 2050 { 2051 2051 unsigned long flags; 2052 2052 int maxlvt; ··· 2084 2084 return 0; 2085 2085 } 2086 2086 2087 - static int lapic_resume(struct sys_device *dev) 2087 + static void lapic_resume(void) 2088 2088 { 2089 2089 unsigned int l, h; 2090 2090 unsigned long flags; 2091 - int maxlvt; 2092 - int ret = 0; 2091 + int maxlvt, ret; 2093 2092 struct IO_APIC_route_entry **ioapic_entries = NULL; 2094 2093 2095 2094 if (!apic_pm_state.active) 2096 - return 0; 2095 + return; 2097 2096 2098 2097 local_irq_save(flags); 2099 2098 if (intr_remapping_enabled) { 2100 2099 ioapic_entries = alloc_ioapic_entries(); 2101 2100 if (!ioapic_entries) { 2102 2101 WARN(1, "Alloc ioapic_entries in lapic resume failed."); 2103 - ret = -ENOMEM; 2104 2102 goto restore; 2105 2103 } 2106 2104 ··· 2160 2162 } 2161 2163 restore: 2162 2164 local_irq_restore(flags); 2163 - 2164 - return ret; 2165 2165 } 2166 2166 2167 2167 /* ··· 2167 2171 * are needed on every CPU up until machine_halt/restart/poweroff. 2168 2172 */ 2169 2173 2170 - static struct sysdev_class lapic_sysclass = { 2171 - .name = "lapic", 2174 + static struct syscore_ops lapic_syscore_ops = { 2172 2175 .resume = lapic_resume, 2173 2176 .suspend = lapic_suspend, 2174 - }; 2175 - 2176 - static struct sys_device device_lapic = { 2177 - .id = 0, 2178 - .cls = &lapic_sysclass, 2179 2177 }; 2180 2178 2181 2179 static void __cpuinit apic_pm_activate(void) ··· 2179 2189 2180 2190 static int __init init_lapic_sysfs(void) 2181 2191 { 2182 - int error; 2183 - 2184 - if (!cpu_has_apic) 2185 - return 0; 2186 2192 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */ 2193 + if (cpu_has_apic) 2194 + register_syscore_ops(&lapic_syscore_ops); 2187 2195 2188 - error = sysdev_class_register(&lapic_sysclass); 2189 - if (!error) 2190 - error = sysdev_register(&device_lapic); 2191 - return error; 2196 + return 0; 2192 2197 } 2193 2198 2194 2199 /* local apic needs to resume before other devices access its registers. */
+47 -52
arch/x86/kernel/apic/io_apic.c
··· 30 30 #include <linux/compiler.h> 31 31 #include <linux/acpi.h> 32 32 #include <linux/module.h> 33 - #include <linux/sysdev.h> 33 + #include <linux/syscore_ops.h> 34 34 #include <linux/msi.h> 35 35 #include <linux/htirq.h> 36 36 #include <linux/freezer.h> ··· 2918 2918 2919 2919 late_initcall(io_apic_bug_finalize); 2920 2920 2921 - struct sysfs_ioapic_data { 2922 - struct sys_device dev; 2923 - struct IO_APIC_route_entry entry[0]; 2924 - }; 2925 - static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS]; 2921 + static struct IO_APIC_route_entry *ioapic_saved_data[MAX_IO_APICS]; 2926 2922 2927 - static int ioapic_suspend(struct sys_device *dev, pm_message_t state) 2923 + static void suspend_ioapic(int ioapic_id) 2928 2924 { 2929 - struct IO_APIC_route_entry *entry; 2930 - struct sysfs_ioapic_data *data; 2925 + struct IO_APIC_route_entry *saved_data = ioapic_saved_data[ioapic_id]; 2931 2926 int i; 2932 2927 2933 - data = container_of(dev, struct sysfs_ioapic_data, dev); 2934 - entry = data->entry; 2935 - for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) 2936 - *entry = ioapic_read_entry(dev->id, i); 2928 + if (!saved_data) 2929 + return; 2930 + 2931 + for (i = 0; i < nr_ioapic_registers[ioapic_id]; i++) 2932 + saved_data[i] = ioapic_read_entry(ioapic_id, i); 2933 + } 2934 + 2935 + static int ioapic_suspend(void) 2936 + { 2937 + int ioapic_id; 2938 + 2939 + for (ioapic_id = 0; ioapic_id < nr_ioapics; ioapic_id++) 2940 + suspend_ioapic(ioapic_id); 2937 2941 2938 2942 return 0; 2939 2943 } 2940 2944 2941 - static int ioapic_resume(struct sys_device *dev) 2945 + static void resume_ioapic(int ioapic_id) 2942 2946 { 2943 - struct IO_APIC_route_entry *entry; 2944 - struct sysfs_ioapic_data *data; 2947 + struct IO_APIC_route_entry *saved_data = ioapic_saved_data[ioapic_id]; 2945 2948 unsigned long flags; 2946 2949 union IO_APIC_reg_00 reg_00; 2947 2950 int i; 2948 2951 2949 - data = container_of(dev, struct sysfs_ioapic_data, dev); 2950 - entry = data->entry; 2952 + if (!saved_data) 2953 + return; 2951 2954 2952 2955 raw_spin_lock_irqsave(&ioapic_lock, flags); 2953 - reg_00.raw = io_apic_read(dev->id, 0); 2954 - if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) { 2955 - reg_00.bits.ID = mp_ioapics[dev->id].apicid; 2956 - io_apic_write(dev->id, 0, reg_00.raw); 2956 + reg_00.raw = io_apic_read(ioapic_id, 0); 2957 + if (reg_00.bits.ID != mp_ioapics[ioapic_id].apicid) { 2958 + reg_00.bits.ID = mp_ioapics[ioapic_id].apicid; 2959 + io_apic_write(ioapic_id, 0, reg_00.raw); 2957 2960 } 2958 2961 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2959 - for (i = 0; i < nr_ioapic_registers[dev->id]; i++) 2960 - ioapic_write_entry(dev->id, i, entry[i]); 2961 - 2962 - return 0; 2962 + for (i = 0; i < nr_ioapic_registers[ioapic_id]; i++) 2963 + ioapic_write_entry(ioapic_id, i, saved_data[i]); 2963 2964 } 2964 2965 2965 - static struct sysdev_class ioapic_sysdev_class = { 2966 - .name = "ioapic", 2966 + static void ioapic_resume(void) 2967 + { 2968 + int ioapic_id; 2969 + 2970 + for (ioapic_id = nr_ioapics - 1; ioapic_id >= 0; ioapic_id--) 2971 + resume_ioapic(ioapic_id); 2972 + } 2973 + 2974 + static struct syscore_ops ioapic_syscore_ops = { 2967 2975 .suspend = ioapic_suspend, 2968 2976 .resume = ioapic_resume, 2969 2977 }; 2970 2978 2971 - static int __init ioapic_init_sysfs(void) 2979 + static int __init ioapic_init_ops(void) 2972 2980 { 2973 - struct sys_device * dev; 2974 - int i, size, error; 2981 + int i; 2975 2982 2976 - error = sysdev_class_register(&ioapic_sysdev_class); 2977 - if (error) 2978 - return error; 2983 + for (i = 0; i < nr_ioapics; i++) { 2984 + unsigned int size; 2979 2985 2980 - for (i = 0; i < nr_ioapics; i++ ) { 2981 - size = sizeof(struct sys_device) + nr_ioapic_registers[i] 2986 + size = nr_ioapic_registers[i] 2982 2987 * sizeof(struct IO_APIC_route_entry); 2983 - mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL); 2984 - if (!mp_ioapic_data[i]) { 2985 - printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); 2986 - continue; 2987 - } 2988 - dev = &mp_ioapic_data[i]->dev; 2989 - dev->id = i; 2990 - dev->cls = &ioapic_sysdev_class; 2991 - error = sysdev_register(dev); 2992 - if (error) { 2993 - kfree(mp_ioapic_data[i]); 2994 - mp_ioapic_data[i] = NULL; 2995 - printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); 2996 - continue; 2997 - } 2988 + ioapic_saved_data[i] = kzalloc(size, GFP_KERNEL); 2989 + if (!ioapic_saved_data[i]) 2990 + pr_err("IOAPIC %d: suspend/resume impossible!\n", i); 2998 2991 } 2992 + 2993 + register_syscore_ops(&ioapic_syscore_ops); 2999 2994 3000 2995 return 0; 3001 2996 } 3002 2997 3003 - device_initcall(ioapic_init_sysfs); 2998 + device_initcall(ioapic_init_ops); 3004 2999 3005 3000 /* 3006 3001 * Dynamic irq allocate and deallocation
+12 -9
arch/x86/kernel/cpu/mcheck/mce.c
··· 21 21 #include <linux/percpu.h> 22 22 #include <linux/string.h> 23 23 #include <linux/sysdev.h> 24 + #include <linux/syscore_ops.h> 24 25 #include <linux/delay.h> 25 26 #include <linux/ctype.h> 26 27 #include <linux/sched.h> ··· 1750 1749 return 0; 1751 1750 } 1752 1751 1753 - static int mce_suspend(struct sys_device *dev, pm_message_t state) 1752 + static int mce_suspend(void) 1754 1753 { 1755 1754 return mce_disable_error_reporting(); 1756 1755 } 1757 1756 1758 - static int mce_shutdown(struct sys_device *dev) 1757 + static void mce_shutdown(void) 1759 1758 { 1760 - return mce_disable_error_reporting(); 1759 + mce_disable_error_reporting(); 1761 1760 } 1762 1761 1763 1762 /* ··· 1765 1764 * Only one CPU is active at this time, the others get re-added later using 1766 1765 * CPU hotplug: 1767 1766 */ 1768 - static int mce_resume(struct sys_device *dev) 1767 + static void mce_resume(void) 1769 1768 { 1770 1769 __mcheck_cpu_init_generic(); 1771 1770 __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); 1772 - 1773 - return 0; 1774 1771 } 1772 + 1773 + static struct syscore_ops mce_syscore_ops = { 1774 + .suspend = mce_suspend, 1775 + .shutdown = mce_shutdown, 1776 + .resume = mce_resume, 1777 + }; 1775 1778 1776 1779 static void mce_cpu_restart(void *data) 1777 1780 { ··· 1813 1808 } 1814 1809 1815 1810 static struct sysdev_class mce_sysclass = { 1816 - .suspend = mce_suspend, 1817 - .shutdown = mce_shutdown, 1818 - .resume = mce_resume, 1819 1811 .name = "machinecheck", 1820 1812 }; 1821 1813 ··· 2141 2139 return err; 2142 2140 } 2143 2141 2142 + register_syscore_ops(&mce_syscore_ops); 2144 2143 register_hotcpu_notifier(&mce_cpu_notifier); 2145 2144 misc_register(&mce_log_device); 2146 2145
+5 -5
arch/x86/kernel/cpu/mtrr/main.c
··· 45 45 #include <linux/cpu.h> 46 46 #include <linux/pci.h> 47 47 #include <linux/smp.h> 48 + #include <linux/syscore_ops.h> 48 49 49 50 #include <asm/processor.h> 50 51 #include <asm/e820.h> ··· 631 630 632 631 static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; 633 632 634 - static int mtrr_save(struct sys_device *sysdev, pm_message_t state) 633 + static int mtrr_save(void) 635 634 { 636 635 int i; 637 636 ··· 643 642 return 0; 644 643 } 645 644 646 - static int mtrr_restore(struct sys_device *sysdev) 645 + static void mtrr_restore(void) 647 646 { 648 647 int i; 649 648 ··· 654 653 mtrr_value[i].ltype); 655 654 } 656 655 } 657 - return 0; 658 656 } 659 657 660 658 661 659 662 - static struct sysdev_driver mtrr_sysdev_driver = { 660 + static struct syscore_ops mtrr_syscore_ops = { 663 661 .suspend = mtrr_save, 664 662 .resume = mtrr_restore, 665 663 }; ··· 839 839 * TBD: is there any system with such CPU which supports 840 840 * suspend/resume? If no, we should remove the code. 841 841 */ 842 - sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver); 842 + register_syscore_ops(&mtrr_syscore_ops); 843 843 844 844 return 0; 845 845 }
+7 -23
arch/x86/kernel/i8237.c
··· 10 10 */ 11 11 12 12 #include <linux/init.h> 13 - #include <linux/sysdev.h> 13 + #include <linux/syscore_ops.h> 14 14 15 15 #include <asm/dma.h> 16 16 ··· 21 21 * in asm/dma.h. 22 22 */ 23 23 24 - static int i8237A_resume(struct sys_device *dev) 24 + static void i8237A_resume(void) 25 25 { 26 26 unsigned long flags; 27 27 int i; ··· 41 41 enable_dma(4); 42 42 43 43 release_dma_lock(flags); 44 - 45 - return 0; 46 44 } 47 45 48 - static int i8237A_suspend(struct sys_device *dev, pm_message_t state) 49 - { 50 - return 0; 51 - } 52 - 53 - static struct sysdev_class i8237_sysdev_class = { 54 - .name = "i8237", 55 - .suspend = i8237A_suspend, 46 + static struct syscore_ops i8237_syscore_ops = { 56 47 .resume = i8237A_resume, 57 48 }; 58 49 59 - static struct sys_device device_i8237A = { 60 - .id = 0, 61 - .cls = &i8237_sysdev_class, 62 - }; 63 - 64 - static int __init i8237A_init_sysfs(void) 50 + static int __init i8237A_init_ops(void) 65 51 { 66 - int error = sysdev_class_register(&i8237_sysdev_class); 67 - if (!error) 68 - error = sysdev_register(&device_i8237A); 69 - return error; 52 + register_syscore_ops(&i8237_syscore_ops); 53 + return 0; 70 54 } 71 - device_initcall(i8237A_init_sysfs); 55 + device_initcall(i8237A_init_ops);
+10 -23
arch/x86/kernel/i8259.c
··· 8 8 #include <linux/random.h> 9 9 #include <linux/init.h> 10 10 #include <linux/kernel_stat.h> 11 - #include <linux/sysdev.h> 11 + #include <linux/syscore_ops.h> 12 12 #include <linux/bitops.h> 13 13 #include <linux/acpi.h> 14 14 #include <linux/io.h> ··· 245 245 trigger[1] = inb(0x4d1) & 0xDE; 246 246 } 247 247 248 - static int i8259A_resume(struct sys_device *dev) 248 + static void i8259A_resume(void) 249 249 { 250 250 init_8259A(i8259A_auto_eoi); 251 251 restore_ELCR(irq_trigger); 252 - return 0; 253 252 } 254 253 255 - static int i8259A_suspend(struct sys_device *dev, pm_message_t state) 254 + static int i8259A_suspend(void) 256 255 { 257 256 save_ELCR(irq_trigger); 258 257 return 0; 259 258 } 260 259 261 - static int i8259A_shutdown(struct sys_device *dev) 260 + static void i8259A_shutdown(void) 262 261 { 263 262 /* Put the i8259A into a quiescent state that 264 263 * the kernel initialization code can get it ··· 265 266 */ 266 267 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 267 268 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ 268 - return 0; 269 269 } 270 270 271 - static struct sysdev_class i8259_sysdev_class = { 272 - .name = "i8259", 271 + static struct syscore_ops i8259_syscore_ops = { 273 272 .suspend = i8259A_suspend, 274 273 .resume = i8259A_resume, 275 274 .shutdown = i8259A_shutdown, 276 - }; 277 - 278 - static struct sys_device device_i8259A = { 279 - .id = 0, 280 - .cls = &i8259_sysdev_class, 281 275 }; 282 276 283 277 static void mask_8259A(void) ··· 391 399 392 400 struct legacy_pic *legacy_pic = &default_legacy_pic; 393 401 394 - static int __init i8259A_init_sysfs(void) 402 + static int __init i8259A_init_ops(void) 395 403 { 396 - int error; 404 + if (legacy_pic == &default_legacy_pic) 405 + register_syscore_ops(&i8259_syscore_ops); 397 406 398 - if (legacy_pic != &default_legacy_pic) 399 - return 0; 400 - 401 - error = sysdev_class_register(&i8259_sysdev_class); 402 - if (!error) 403 - error = sysdev_register(&device_i8259A); 404 - return error; 407 + return 0; 405 408 } 406 409 407 - device_initcall(i8259A_init_sysfs); 410 + device_initcall(i8259A_init_ops);
+18 -24
arch/x86/kernel/microcode_core.c
··· 82 82 #include <linux/cpu.h> 83 83 #include <linux/fs.h> 84 84 #include <linux/mm.h> 85 + #include <linux/syscore_ops.h> 85 86 86 87 #include <asm/microcode.h> 87 88 #include <asm/processor.h> ··· 439 438 return 0; 440 439 } 441 440 442 - static int mc_sysdev_resume(struct sys_device *dev) 443 - { 444 - int cpu = dev->id; 445 - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 446 - 447 - if (!cpu_online(cpu)) 448 - return 0; 449 - 450 - /* 451 - * All non-bootup cpus are still disabled, 452 - * so only CPU 0 will apply ucode here. 453 - * 454 - * Moreover, there can be no concurrent 455 - * updates from any other places at this point. 456 - */ 457 - WARN_ON(cpu != 0); 458 - 459 - if (uci->valid && uci->mc) 460 - microcode_ops->apply_microcode(cpu); 461 - 462 - return 0; 463 - } 464 - 465 441 static struct sysdev_driver mc_sysdev_driver = { 466 442 .add = mc_sysdev_add, 467 443 .remove = mc_sysdev_remove, 468 - .resume = mc_sysdev_resume, 444 + }; 445 + 446 + /** 447 + * mc_bp_resume - Update boot CPU microcode during resume. 448 + */ 449 + static void mc_bp_resume(void) 450 + { 451 + int cpu = smp_processor_id(); 452 + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 453 + 454 + if (uci->valid && uci->mc) 455 + microcode_ops->apply_microcode(cpu); 456 + } 457 + 458 + static struct syscore_ops mc_syscore_ops = { 459 + .resume = mc_bp_resume, 469 460 }; 470 461 471 462 static __cpuinit int ··· 535 542 if (error) 536 543 return error; 537 544 545 + register_syscore_ops(&mc_syscore_ops); 538 546 register_hotcpu_notifier(&mc_cpu_notifier); 539 547 540 548 pr_info("Microcode Update Driver: v" MICROCODE_VERSION
+7 -25
arch/x86/kernel/pci-gart_64.c
··· 27 27 #include <linux/kdebug.h> 28 28 #include <linux/scatterlist.h> 29 29 #include <linux/iommu-helper.h> 30 - #include <linux/sysdev.h> 30 + #include <linux/syscore_ops.h> 31 31 #include <linux/io.h> 32 32 #include <linux/gfp.h> 33 33 #include <asm/atomic.h> ··· 589 589 aperture_alloc = aper_alloc; 590 590 } 591 591 592 - static void gart_fixup_northbridges(struct sys_device *dev) 592 + static void gart_fixup_northbridges(void) 593 593 { 594 594 int i; 595 595 ··· 613 613 } 614 614 } 615 615 616 - static int gart_resume(struct sys_device *dev) 616 + static void gart_resume(void) 617 617 { 618 618 pr_info("PCI-DMA: Resuming GART IOMMU\n"); 619 619 620 - gart_fixup_northbridges(dev); 620 + gart_fixup_northbridges(); 621 621 622 622 enable_gart_translations(); 623 - 624 - return 0; 625 623 } 626 624 627 - static int gart_suspend(struct sys_device *dev, pm_message_t state) 628 - { 629 - return 0; 630 - } 631 - 632 - static struct sysdev_class gart_sysdev_class = { 633 - .name = "gart", 634 - .suspend = gart_suspend, 625 + static struct syscore_ops gart_syscore_ops = { 635 626 .resume = gart_resume, 636 627 637 - }; 638 - 639 - static struct sys_device device_gart = { 640 - .cls = &gart_sysdev_class, 641 628 }; 642 629 643 630 /* ··· 637 650 unsigned aper_base, new_aper_base; 638 651 struct pci_dev *dev; 639 652 void *gatt; 640 - int i, error; 653 + int i; 641 654 642 655 pr_info("PCI-DMA: Disabling AGP.\n"); 643 656 ··· 672 685 673 686 agp_gatt_table = gatt; 674 687 675 - error = sysdev_class_register(&gart_sysdev_class); 676 - if (!error) 677 - error = sysdev_register(&device_gart); 678 - if (error) 679 - panic("Could not register gart_sysdev -- " 680 - "would corrupt data on next suspend"); 688 + register_syscore_ops(&gart_syscore_ops); 681 689 682 690 flush_gart(); 683 691
+12 -32
arch/x86/oprofile/nmi_int.c
··· 15 15 #include <linux/notifier.h> 16 16 #include <linux/smp.h> 17 17 #include <linux/oprofile.h> 18 - #include <linux/sysdev.h> 18 + #include <linux/syscore_ops.h> 19 19 #include <linux/slab.h> 20 20 #include <linux/moduleparam.h> 21 21 #include <linux/kdebug.h> ··· 536 536 537 537 #ifdef CONFIG_PM 538 538 539 - static int nmi_suspend(struct sys_device *dev, pm_message_t state) 539 + static int nmi_suspend(void) 540 540 { 541 541 /* Only one CPU left, just stop that one */ 542 542 if (nmi_enabled == 1) ··· 544 544 return 0; 545 545 } 546 546 547 - static int nmi_resume(struct sys_device *dev) 547 + static void nmi_resume(void) 548 548 { 549 549 if (nmi_enabled == 1) 550 550 nmi_cpu_start(NULL); 551 - return 0; 552 551 } 553 552 554 - static struct sysdev_class oprofile_sysclass = { 555 - .name = "oprofile", 553 + static struct syscore_ops oprofile_syscore_ops = { 556 554 .resume = nmi_resume, 557 555 .suspend = nmi_suspend, 558 556 }; 559 557 560 - static struct sys_device device_oprofile = { 561 - .id = 0, 562 - .cls = &oprofile_sysclass, 563 - }; 564 - 565 - static int __init init_sysfs(void) 558 + static void __init init_suspend_resume(void) 566 559 { 567 - int error; 568 - 569 - error = sysdev_class_register(&oprofile_sysclass); 570 - if (error) 571 - return error; 572 - 573 - error = sysdev_register(&device_oprofile); 574 - if (error) 575 - sysdev_class_unregister(&oprofile_sysclass); 576 - 577 - return error; 560 + register_syscore_ops(&oprofile_syscore_ops); 578 561 } 579 562 580 - static void exit_sysfs(void) 563 + static void exit_suspend_resume(void) 581 564 { 582 - sysdev_unregister(&device_oprofile); 583 - sysdev_class_unregister(&oprofile_sysclass); 565 + unregister_syscore_ops(&oprofile_syscore_ops); 584 566 } 585 567 586 568 #else 587 569 588 - static inline int init_sysfs(void) { return 0; } 589 - static inline void exit_sysfs(void) { } 570 + static inline void init_suspend_resume(void) { } 571 + static inline void exit_suspend_resume(void) { } 590 572 591 573 #endif /* CONFIG_PM */ 592 574 ··· 771 789 772 790 mux_init(ops); 773 791 774 - ret = init_sysfs(); 775 - if (ret) 776 - return ret; 792 + init_suspend_resume(); 777 793 778 794 printk(KERN_INFO "oprofile: using NMI interrupt.\n"); 779 795 return 0; ··· 779 799 780 800 void op_nmi_exit(void) 781 801 { 782 - exit_sysfs(); 802 + exit_suspend_resume(); 783 803 }