x86: Use syscore_ops instead of sysdev classes and sysdevs

Some subsystems in the x86 tree need to carry out suspend/resume and
shutdown operations with one CPU on-line and interrupts disabled and
they define sysdev classes and sysdevs or sysdev drivers for this
purpose. This leads to unnecessarily complicated code and excessive
memory usage, so switch them to using struct syscore_ops objects for
this purpose instead.

Generally, there are three categories of subsystems that use
sysdevs for implementing PM operations: (1) subsystems whose
suspend/resume callbacks ignore their arguments entirely (the
majority), (2) subsystems whose suspend/resume callbacks use their
struct sys_device argument, but don't really need to do that,
because they can be implemented differently in an arguably simpler
way (io_apic.c), and (3) subsystems whose suspend/resume callbacks
use their struct sys_device argument, but the value of that argument
is always the same and could be ignored (microcode_core.c). In all
of these cases the subsystems in question may be readily converted to
using struct syscore_ops objects for power management and shutdown.

Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ingo Molnar <mingo@elte.hu>

+133 -237
+6 -20
arch/x86/kernel/amd_iommu_init.c
··· 21 #include <linux/acpi.h> 22 #include <linux/list.h> 23 #include <linux/slab.h> 24 - #include <linux/sysdev.h> 25 #include <linux/interrupt.h> 26 #include <linux/msi.h> 27 #include <asm/pci-direct.h> ··· 1260 * disable suspend until real resume implemented 1261 */ 1262 1263 - static int amd_iommu_resume(struct sys_device *dev) 1264 { 1265 struct amd_iommu *iommu; 1266 ··· 1276 */ 1277 amd_iommu_flush_all_devices(); 1278 amd_iommu_flush_all_domains(); 1279 - 1280 - return 0; 1281 } 1282 1283 - static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state) 1284 { 1285 /* disable IOMMUs to go out of the way for BIOS */ 1286 disable_iommus(); ··· 1286 return 0; 1287 } 1288 1289 - static struct sysdev_class amd_iommu_sysdev_class = { 1290 - .name = "amd_iommu", 1291 .suspend = amd_iommu_suspend, 1292 .resume = amd_iommu_resume, 1293 - }; 1294 - 1295 - static struct sys_device device_amd_iommu = { 1296 - .id = 0, 1297 - .cls = &amd_iommu_sysdev_class, 1298 }; 1299 1300 /* ··· 1407 goto free; 1408 } 1409 1410 - ret = sysdev_class_register(&amd_iommu_sysdev_class); 1411 - if (ret) 1412 - goto free; 1413 - 1414 - ret = sysdev_register(&device_amd_iommu); 1415 - if (ret) 1416 - goto free; 1417 - 1418 ret = amd_iommu_init_devices(); 1419 if (ret) 1420 goto free; ··· 1424 amd_iommu_init_api(); 1425 1426 amd_iommu_init_notifier(); 1427 1428 if (iommu_pass_through) 1429 goto out;
··· 21 #include <linux/acpi.h> 22 #include <linux/list.h> 23 #include <linux/slab.h> 24 + #include <linux/syscore_ops.h> 25 #include <linux/interrupt.h> 26 #include <linux/msi.h> 27 #include <asm/pci-direct.h> ··· 1260 * disable suspend until real resume implemented 1261 */ 1262 1263 + static void amd_iommu_resume(void) 1264 { 1265 struct amd_iommu *iommu; 1266 ··· 1276 */ 1277 amd_iommu_flush_all_devices(); 1278 amd_iommu_flush_all_domains(); 1279 } 1280 1281 + static int amd_iommu_suspend(void) 1282 { 1283 /* disable IOMMUs to go out of the way for BIOS */ 1284 disable_iommus(); ··· 1288 return 0; 1289 } 1290 1291 + static struct syscore_ops amd_iommu_syscore_ops = { 1292 .suspend = amd_iommu_suspend, 1293 .resume = amd_iommu_resume, 1294 }; 1295 1296 /* ··· 1415 goto free; 1416 } 1417 1418 ret = amd_iommu_init_devices(); 1419 if (ret) 1420 goto free; ··· 1440 amd_iommu_init_api(); 1441 1442 amd_iommu_init_notifier(); 1443 + 1444 + register_syscore_ops(&amd_iommu_syscore_ops); 1445 1446 if (iommu_pass_through) 1447 goto out;
+9 -24
arch/x86/kernel/apic/apic.c
··· 24 #include <linux/ftrace.h> 25 #include <linux/ioport.h> 26 #include <linux/module.h> 27 - #include <linux/sysdev.h> 28 #include <linux/delay.h> 29 #include <linux/timex.h> 30 #include <linux/dmar.h> ··· 2046 unsigned int apic_thmr; 2047 } apic_pm_state; 2048 2049 - static int lapic_suspend(struct sys_device *dev, pm_message_t state) 2050 { 2051 unsigned long flags; 2052 int maxlvt; ··· 2084 return 0; 2085 } 2086 2087 - static int lapic_resume(struct sys_device *dev) 2088 { 2089 unsigned int l, h; 2090 unsigned long flags; 2091 - int maxlvt; 2092 - int ret = 0; 2093 struct IO_APIC_route_entry **ioapic_entries = NULL; 2094 2095 if (!apic_pm_state.active) 2096 - return 0; 2097 2098 local_irq_save(flags); 2099 if (intr_remapping_enabled) { 2100 ioapic_entries = alloc_ioapic_entries(); 2101 if (!ioapic_entries) { 2102 WARN(1, "Alloc ioapic_entries in lapic resume failed."); 2103 - ret = -ENOMEM; 2104 goto restore; 2105 } 2106 ··· 2160 } 2161 restore: 2162 local_irq_restore(flags); 2163 - 2164 - return ret; 2165 } 2166 2167 /* ··· 2167 * are needed on every CPU up until machine_halt/restart/poweroff. 2168 */ 2169 2170 - static struct sysdev_class lapic_sysclass = { 2171 - .name = "lapic", 2172 .resume = lapic_resume, 2173 .suspend = lapic_suspend, 2174 - }; 2175 - 2176 - static struct sys_device device_lapic = { 2177 - .id = 0, 2178 - .cls = &lapic_sysclass, 2179 }; 2180 2181 static void __cpuinit apic_pm_activate(void) ··· 2179 2180 static int __init init_lapic_sysfs(void) 2181 { 2182 - int error; 2183 - 2184 - if (!cpu_has_apic) 2185 - return 0; 2186 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */ 2187 2188 - error = sysdev_class_register(&lapic_sysclass); 2189 - if (!error) 2190 - error = sysdev_register(&device_lapic); 2191 - return error; 2192 } 2193 2194 /* local apic needs to resume before other devices access its registers. */
··· 24 #include <linux/ftrace.h> 25 #include <linux/ioport.h> 26 #include <linux/module.h> 27 + #include <linux/syscore_ops.h> 28 #include <linux/delay.h> 29 #include <linux/timex.h> 30 #include <linux/dmar.h> ··· 2046 unsigned int apic_thmr; 2047 } apic_pm_state; 2048 2049 + static int lapic_suspend(void) 2050 { 2051 unsigned long flags; 2052 int maxlvt; ··· 2084 return 0; 2085 } 2086 2087 + static void lapic_resume(void) 2088 { 2089 unsigned int l, h; 2090 unsigned long flags; 2091 + int maxlvt, ret; 2092 struct IO_APIC_route_entry **ioapic_entries = NULL; 2093 2094 if (!apic_pm_state.active) 2095 + return; 2096 2097 local_irq_save(flags); 2098 if (intr_remapping_enabled) { 2099 ioapic_entries = alloc_ioapic_entries(); 2100 if (!ioapic_entries) { 2101 WARN(1, "Alloc ioapic_entries in lapic resume failed."); 2102 goto restore; 2103 } 2104 ··· 2162 } 2163 restore: 2164 local_irq_restore(flags); 2165 } 2166 2167 /* ··· 2171 * are needed on every CPU up until machine_halt/restart/poweroff. 2172 */ 2173 2174 + static struct syscore_ops lapic_syscore_ops = { 2175 .resume = lapic_resume, 2176 .suspend = lapic_suspend, 2177 }; 2178 2179 static void __cpuinit apic_pm_activate(void) ··· 2189 2190 static int __init init_lapic_sysfs(void) 2191 { 2192 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */ 2193 + if (cpu_has_apic) 2194 + register_syscore_ops(&lapic_syscore_ops); 2195 2196 + return 0; 2197 } 2198 2199 /* local apic needs to resume before other devices access its registers. */
+47 -52
arch/x86/kernel/apic/io_apic.c
··· 30 #include <linux/compiler.h> 31 #include <linux/acpi.h> 32 #include <linux/module.h> 33 - #include <linux/sysdev.h> 34 #include <linux/msi.h> 35 #include <linux/htirq.h> 36 #include <linux/freezer.h> ··· 2918 2919 late_initcall(io_apic_bug_finalize); 2920 2921 - struct sysfs_ioapic_data { 2922 - struct sys_device dev; 2923 - struct IO_APIC_route_entry entry[0]; 2924 - }; 2925 - static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS]; 2926 2927 - static int ioapic_suspend(struct sys_device *dev, pm_message_t state) 2928 { 2929 - struct IO_APIC_route_entry *entry; 2930 - struct sysfs_ioapic_data *data; 2931 int i; 2932 2933 - data = container_of(dev, struct sysfs_ioapic_data, dev); 2934 - entry = data->entry; 2935 - for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) 2936 - *entry = ioapic_read_entry(dev->id, i); 2937 2938 return 0; 2939 } 2940 2941 - static int ioapic_resume(struct sys_device *dev) 2942 { 2943 - struct IO_APIC_route_entry *entry; 2944 - struct sysfs_ioapic_data *data; 2945 unsigned long flags; 2946 union IO_APIC_reg_00 reg_00; 2947 int i; 2948 2949 - data = container_of(dev, struct sysfs_ioapic_data, dev); 2950 - entry = data->entry; 2951 2952 raw_spin_lock_irqsave(&ioapic_lock, flags); 2953 - reg_00.raw = io_apic_read(dev->id, 0); 2954 - if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) { 2955 - reg_00.bits.ID = mp_ioapics[dev->id].apicid; 2956 - io_apic_write(dev->id, 0, reg_00.raw); 2957 } 2958 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2959 - for (i = 0; i < nr_ioapic_registers[dev->id]; i++) 2960 - ioapic_write_entry(dev->id, i, entry[i]); 2961 - 2962 - return 0; 2963 } 2964 2965 - static struct sysdev_class ioapic_sysdev_class = { 2966 - .name = "ioapic", 2967 .suspend = ioapic_suspend, 2968 .resume = ioapic_resume, 2969 }; 2970 2971 - static int __init ioapic_init_sysfs(void) 2972 { 2973 - struct sys_device * dev; 2974 - int i, size, error; 2975 2976 - error = sysdev_class_register(&ioapic_sysdev_class); 2977 - if (error) 2978 - return error; 2979 2980 - for (i = 0; i < nr_ioapics; i++ ) { 2981 - size = sizeof(struct sys_device) + nr_ioapic_registers[i] 2982 * sizeof(struct IO_APIC_route_entry); 2983 - mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL); 2984 - if (!mp_ioapic_data[i]) { 2985 - printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); 2986 - continue; 2987 - } 2988 - dev = &mp_ioapic_data[i]->dev; 2989 - dev->id = i; 2990 - dev->cls = &ioapic_sysdev_class; 2991 - error = sysdev_register(dev); 2992 - if (error) { 2993 - kfree(mp_ioapic_data[i]); 2994 - mp_ioapic_data[i] = NULL; 2995 - printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); 2996 - continue; 2997 - } 2998 } 2999 3000 return 0; 3001 } 3002 3003 - device_initcall(ioapic_init_sysfs); 3004 3005 /* 3006 * Dynamic irq allocate and deallocation
··· 30 #include <linux/compiler.h> 31 #include <linux/acpi.h> 32 #include <linux/module.h> 33 + #include <linux/syscore_ops.h> 34 #include <linux/msi.h> 35 #include <linux/htirq.h> 36 #include <linux/freezer.h> ··· 2918 2919 late_initcall(io_apic_bug_finalize); 2920 2921 + static struct IO_APIC_route_entry *ioapic_saved_data[MAX_IO_APICS]; 2922 2923 + static void suspend_ioapic(int ioapic_id) 2924 { 2925 + struct IO_APIC_route_entry *saved_data = ioapic_saved_data[ioapic_id]; 2926 int i; 2927 2928 + if (!saved_data) 2929 + return; 2930 + 2931 + for (i = 0; i < nr_ioapic_registers[ioapic_id]; i++) 2932 + saved_data[i] = ioapic_read_entry(ioapic_id, i); 2933 + } 2934 + 2935 + static int ioapic_suspend(void) 2936 + { 2937 + int ioapic_id; 2938 + 2939 + for (ioapic_id = 0; ioapic_id < nr_ioapics; ioapic_id++) 2940 + suspend_ioapic(ioapic_id); 2941 2942 return 0; 2943 } 2944 2945 + static void resume_ioapic(int ioapic_id) 2946 { 2947 + struct IO_APIC_route_entry *saved_data = ioapic_saved_data[ioapic_id]; 2948 unsigned long flags; 2949 union IO_APIC_reg_00 reg_00; 2950 int i; 2951 2952 + if (!saved_data) 2953 + return; 2954 2955 raw_spin_lock_irqsave(&ioapic_lock, flags); 2956 + reg_00.raw = io_apic_read(ioapic_id, 0); 2957 + if (reg_00.bits.ID != mp_ioapics[ioapic_id].apicid) { 2958 + reg_00.bits.ID = mp_ioapics[ioapic_id].apicid; 2959 + io_apic_write(ioapic_id, 0, reg_00.raw); 2960 } 2961 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2962 + for (i = 0; i < nr_ioapic_registers[ioapic_id]; i++) 2963 + ioapic_write_entry(ioapic_id, i, saved_data[i]); 2964 } 2965 2966 + static void ioapic_resume(void) 2967 + { 2968 + int ioapic_id; 2969 + 2970 + for (ioapic_id = nr_ioapics - 1; ioapic_id >= 0; ioapic_id--) 2971 + resume_ioapic(ioapic_id); 2972 + } 2973 + 2974 + static struct syscore_ops ioapic_syscore_ops = { 2975 .suspend = ioapic_suspend, 2976 .resume = ioapic_resume, 2977 }; 2978 2979 + static int __init ioapic_init_ops(void) 2980 { 2981 + int i; 2982 2983 + for (i = 0; i < nr_ioapics; i++) { 2984 + unsigned int size; 2985 2986 + size = nr_ioapic_registers[i] 2987 * sizeof(struct IO_APIC_route_entry); 2988 + ioapic_saved_data[i] = kzalloc(size, GFP_KERNEL); 2989 + if (!ioapic_saved_data[i]) 2990 + pr_err("IOAPIC %d: suspend/resume impossible!\n", i); 2991 } 2992 + 2993 + register_syscore_ops(&ioapic_syscore_ops); 2994 2995 return 0; 2996 } 2997 2998 + device_initcall(ioapic_init_ops); 2999 3000 /* 3001 * Dynamic irq allocate and deallocation
+12 -9
arch/x86/kernel/cpu/mcheck/mce.c
··· 21 #include <linux/percpu.h> 22 #include <linux/string.h> 23 #include <linux/sysdev.h> 24 #include <linux/delay.h> 25 #include <linux/ctype.h> 26 #include <linux/sched.h> ··· 1750 return 0; 1751 } 1752 1753 - static int mce_suspend(struct sys_device *dev, pm_message_t state) 1754 { 1755 return mce_disable_error_reporting(); 1756 } 1757 1758 - static int mce_shutdown(struct sys_device *dev) 1759 { 1760 - return mce_disable_error_reporting(); 1761 } 1762 1763 /* ··· 1765 * Only one CPU is active at this time, the others get re-added later using 1766 * CPU hotplug: 1767 */ 1768 - static int mce_resume(struct sys_device *dev) 1769 { 1770 __mcheck_cpu_init_generic(); 1771 __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); 1772 - 1773 - return 0; 1774 } 1775 1776 static void mce_cpu_restart(void *data) 1777 { ··· 1813 } 1814 1815 static struct sysdev_class mce_sysclass = { 1816 - .suspend = mce_suspend, 1817 - .shutdown = mce_shutdown, 1818 - .resume = mce_resume, 1819 .name = "machinecheck", 1820 }; 1821 ··· 2141 return err; 2142 } 2143 2144 register_hotcpu_notifier(&mce_cpu_notifier); 2145 misc_register(&mce_log_device); 2146
··· 21 #include <linux/percpu.h> 22 #include <linux/string.h> 23 #include <linux/sysdev.h> 24 + #include <linux/syscore_ops.h> 25 #include <linux/delay.h> 26 #include <linux/ctype.h> 27 #include <linux/sched.h> ··· 1749 return 0; 1750 } 1751 1752 + static int mce_suspend(void) 1753 { 1754 return mce_disable_error_reporting(); 1755 } 1756 1757 + static void mce_shutdown(void) 1758 { 1759 + mce_disable_error_reporting(); 1760 } 1761 1762 /* ··· 1764 * Only one CPU is active at this time, the others get re-added later using 1765 * CPU hotplug: 1766 */ 1767 + static void mce_resume(void) 1768 { 1769 __mcheck_cpu_init_generic(); 1770 __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); 1771 } 1772 + 1773 + static struct syscore_ops mce_syscore_ops = { 1774 + .suspend = mce_suspend, 1775 + .shutdown = mce_shutdown, 1776 + .resume = mce_resume, 1777 + }; 1778 1779 static void mce_cpu_restart(void *data) 1780 { ··· 1808 } 1809 1810 static struct sysdev_class mce_sysclass = { 1811 .name = "machinecheck", 1812 }; 1813 ··· 2139 return err; 2140 } 2141 2142 + register_syscore_ops(&mce_syscore_ops); 2143 register_hotcpu_notifier(&mce_cpu_notifier); 2144 misc_register(&mce_log_device); 2145
+5 -5
arch/x86/kernel/cpu/mtrr/main.c
··· 45 #include <linux/cpu.h> 46 #include <linux/pci.h> 47 #include <linux/smp.h> 48 49 #include <asm/processor.h> 50 #include <asm/e820.h> ··· 631 632 static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; 633 634 - static int mtrr_save(struct sys_device *sysdev, pm_message_t state) 635 { 636 int i; 637 ··· 643 return 0; 644 } 645 646 - static int mtrr_restore(struct sys_device *sysdev) 647 { 648 int i; 649 ··· 654 mtrr_value[i].ltype); 655 } 656 } 657 - return 0; 658 } 659 660 661 662 - static struct sysdev_driver mtrr_sysdev_driver = { 663 .suspend = mtrr_save, 664 .resume = mtrr_restore, 665 }; ··· 839 * TBD: is there any system with such CPU which supports 840 * suspend/resume? If no, we should remove the code. 841 */ 842 - sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver); 843 844 return 0; 845 }
··· 45 #include <linux/cpu.h> 46 #include <linux/pci.h> 47 #include <linux/smp.h> 48 + #include <linux/syscore_ops.h> 49 50 #include <asm/processor.h> 51 #include <asm/e820.h> ··· 630 631 static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; 632 633 + static int mtrr_save(void) 634 { 635 int i; 636 ··· 642 return 0; 643 } 644 645 + static void mtrr_restore(void) 646 { 647 int i; 648 ··· 653 mtrr_value[i].ltype); 654 } 655 } 656 } 657 658 659 660 + static struct syscore_ops mtrr_syscore_ops = { 661 .suspend = mtrr_save, 662 .resume = mtrr_restore, 663 }; ··· 839 * TBD: is there any system with such CPU which supports 840 * suspend/resume? If no, we should remove the code. 841 */ 842 + register_syscore_ops(&mtrr_syscore_ops); 843 844 return 0; 845 }
+7 -23
arch/x86/kernel/i8237.c
··· 10 */ 11 12 #include <linux/init.h> 13 - #include <linux/sysdev.h> 14 15 #include <asm/dma.h> 16 ··· 21 * in asm/dma.h. 22 */ 23 24 - static int i8237A_resume(struct sys_device *dev) 25 { 26 unsigned long flags; 27 int i; ··· 41 enable_dma(4); 42 43 release_dma_lock(flags); 44 - 45 - return 0; 46 } 47 48 - static int i8237A_suspend(struct sys_device *dev, pm_message_t state) 49 - { 50 - return 0; 51 - } 52 - 53 - static struct sysdev_class i8237_sysdev_class = { 54 - .name = "i8237", 55 - .suspend = i8237A_suspend, 56 .resume = i8237A_resume, 57 }; 58 59 - static struct sys_device device_i8237A = { 60 - .id = 0, 61 - .cls = &i8237_sysdev_class, 62 - }; 63 - 64 - static int __init i8237A_init_sysfs(void) 65 { 66 - int error = sysdev_class_register(&i8237_sysdev_class); 67 - if (!error) 68 - error = sysdev_register(&device_i8237A); 69 - return error; 70 } 71 - device_initcall(i8237A_init_sysfs);
··· 10 */ 11 12 #include <linux/init.h> 13 + #include <linux/syscore_ops.h> 14 15 #include <asm/dma.h> 16 ··· 21 * in asm/dma.h. 22 */ 23 24 + static void i8237A_resume(void) 25 { 26 unsigned long flags; 27 int i; ··· 41 enable_dma(4); 42 43 release_dma_lock(flags); 44 } 45 46 + static struct syscore_ops i8237_syscore_ops = { 47 .resume = i8237A_resume, 48 }; 49 50 + static int __init i8237A_init_ops(void) 51 { 52 + register_syscore_ops(&i8237_syscore_ops); 53 + return 0; 54 } 55 + device_initcall(i8237A_init_ops);
+10 -23
arch/x86/kernel/i8259.c
··· 8 #include <linux/random.h> 9 #include <linux/init.h> 10 #include <linux/kernel_stat.h> 11 - #include <linux/sysdev.h> 12 #include <linux/bitops.h> 13 #include <linux/acpi.h> 14 #include <linux/io.h> ··· 245 trigger[1] = inb(0x4d1) & 0xDE; 246 } 247 248 - static int i8259A_resume(struct sys_device *dev) 249 { 250 init_8259A(i8259A_auto_eoi); 251 restore_ELCR(irq_trigger); 252 - return 0; 253 } 254 255 - static int i8259A_suspend(struct sys_device *dev, pm_message_t state) 256 { 257 save_ELCR(irq_trigger); 258 return 0; 259 } 260 261 - static int i8259A_shutdown(struct sys_device *dev) 262 { 263 /* Put the i8259A into a quiescent state that 264 * the kernel initialization code can get it ··· 265 */ 266 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 267 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ 268 - return 0; 269 } 270 271 - static struct sysdev_class i8259_sysdev_class = { 272 - .name = "i8259", 273 .suspend = i8259A_suspend, 274 .resume = i8259A_resume, 275 .shutdown = i8259A_shutdown, 276 - }; 277 - 278 - static struct sys_device device_i8259A = { 279 - .id = 0, 280 - .cls = &i8259_sysdev_class, 281 }; 282 283 static void mask_8259A(void) ··· 391 392 struct legacy_pic *legacy_pic = &default_legacy_pic; 393 394 - static int __init i8259A_init_sysfs(void) 395 { 396 - int error; 397 398 - if (legacy_pic != &default_legacy_pic) 399 - return 0; 400 - 401 - error = sysdev_class_register(&i8259_sysdev_class); 402 - if (!error) 403 - error = sysdev_register(&device_i8259A); 404 - return error; 405 } 406 407 - device_initcall(i8259A_init_sysfs);
··· 8 #include <linux/random.h> 9 #include <linux/init.h> 10 #include <linux/kernel_stat.h> 11 + #include <linux/syscore_ops.h> 12 #include <linux/bitops.h> 13 #include <linux/acpi.h> 14 #include <linux/io.h> ··· 245 trigger[1] = inb(0x4d1) & 0xDE; 246 } 247 248 + static void i8259A_resume(void) 249 { 250 init_8259A(i8259A_auto_eoi); 251 restore_ELCR(irq_trigger); 252 } 253 254 + static int i8259A_suspend(void) 255 { 256 save_ELCR(irq_trigger); 257 return 0; 258 } 259 260 + static void i8259A_shutdown(void) 261 { 262 /* Put the i8259A into a quiescent state that 263 * the kernel initialization code can get it ··· 266 */ 267 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 268 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ 269 } 270 271 + static struct syscore_ops i8259_syscore_ops = { 272 .suspend = i8259A_suspend, 273 .resume = i8259A_resume, 274 .shutdown = i8259A_shutdown, 275 }; 276 277 static void mask_8259A(void) ··· 399 400 struct legacy_pic *legacy_pic = &default_legacy_pic; 401 402 + static int __init i8259A_init_ops(void) 403 { 404 + if (legacy_pic == &default_legacy_pic) 405 + register_syscore_ops(&i8259_syscore_ops); 406 407 + return 0; 408 } 409 410 + device_initcall(i8259A_init_ops);
+18 -24
arch/x86/kernel/microcode_core.c
··· 82 #include <linux/cpu.h> 83 #include <linux/fs.h> 84 #include <linux/mm.h> 85 86 #include <asm/microcode.h> 87 #include <asm/processor.h> ··· 439 return 0; 440 } 441 442 - static int mc_sysdev_resume(struct sys_device *dev) 443 - { 444 - int cpu = dev->id; 445 - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 446 - 447 - if (!cpu_online(cpu)) 448 - return 0; 449 - 450 - /* 451 - * All non-bootup cpus are still disabled, 452 - * so only CPU 0 will apply ucode here. 453 - * 454 - * Moreover, there can be no concurrent 455 - * updates from any other places at this point. 456 - */ 457 - WARN_ON(cpu != 0); 458 - 459 - if (uci->valid && uci->mc) 460 - microcode_ops->apply_microcode(cpu); 461 - 462 - return 0; 463 - } 464 - 465 static struct sysdev_driver mc_sysdev_driver = { 466 .add = mc_sysdev_add, 467 .remove = mc_sysdev_remove, 468 - .resume = mc_sysdev_resume, 469 }; 470 471 static __cpuinit int ··· 535 if (error) 536 return error; 537 538 register_hotcpu_notifier(&mc_cpu_notifier); 539 540 pr_info("Microcode Update Driver: v" MICROCODE_VERSION
··· 82 #include <linux/cpu.h> 83 #include <linux/fs.h> 84 #include <linux/mm.h> 85 + #include <linux/syscore_ops.h> 86 87 #include <asm/microcode.h> 88 #include <asm/processor.h> ··· 438 return 0; 439 } 440 441 static struct sysdev_driver mc_sysdev_driver = { 442 .add = mc_sysdev_add, 443 .remove = mc_sysdev_remove, 444 + }; 445 + 446 + /** 447 + * mc_bp_resume - Update boot CPU microcode during resume. 448 + */ 449 + static void mc_bp_resume(void) 450 + { 451 + int cpu = smp_processor_id(); 452 + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 453 + 454 + if (uci->valid && uci->mc) 455 + microcode_ops->apply_microcode(cpu); 456 + } 457 + 458 + static struct syscore_ops mc_syscore_ops = { 459 + .resume = mc_bp_resume, 460 }; 461 462 static __cpuinit int ··· 542 if (error) 543 return error; 544 545 + register_syscore_ops(&mc_syscore_ops); 546 register_hotcpu_notifier(&mc_cpu_notifier); 547 548 pr_info("Microcode Update Driver: v" MICROCODE_VERSION
+7 -25
arch/x86/kernel/pci-gart_64.c
··· 27 #include <linux/kdebug.h> 28 #include <linux/scatterlist.h> 29 #include <linux/iommu-helper.h> 30 - #include <linux/sysdev.h> 31 #include <linux/io.h> 32 #include <linux/gfp.h> 33 #include <asm/atomic.h> ··· 589 aperture_alloc = aper_alloc; 590 } 591 592 - static void gart_fixup_northbridges(struct sys_device *dev) 593 { 594 int i; 595 ··· 613 } 614 } 615 616 - static int gart_resume(struct sys_device *dev) 617 { 618 pr_info("PCI-DMA: Resuming GART IOMMU\n"); 619 620 - gart_fixup_northbridges(dev); 621 622 enable_gart_translations(); 623 - 624 - return 0; 625 } 626 627 - static int gart_suspend(struct sys_device *dev, pm_message_t state) 628 - { 629 - return 0; 630 - } 631 - 632 - static struct sysdev_class gart_sysdev_class = { 633 - .name = "gart", 634 - .suspend = gart_suspend, 635 .resume = gart_resume, 636 637 - }; 638 - 639 - static struct sys_device device_gart = { 640 - .cls = &gart_sysdev_class, 641 }; 642 643 /* ··· 637 unsigned aper_base, new_aper_base; 638 struct pci_dev *dev; 639 void *gatt; 640 - int i, error; 641 642 pr_info("PCI-DMA: Disabling AGP.\n"); 643 ··· 672 673 agp_gatt_table = gatt; 674 675 - error = sysdev_class_register(&gart_sysdev_class); 676 - if (!error) 677 - error = sysdev_register(&device_gart); 678 - if (error) 679 - panic("Could not register gart_sysdev -- " 680 - "would corrupt data on next suspend"); 681 682 flush_gart(); 683
··· 27 #include <linux/kdebug.h> 28 #include <linux/scatterlist.h> 29 #include <linux/iommu-helper.h> 30 + #include <linux/syscore_ops.h> 31 #include <linux/io.h> 32 #include <linux/gfp.h> 33 #include <asm/atomic.h> ··· 589 aperture_alloc = aper_alloc; 590 } 591 592 + static void gart_fixup_northbridges(void) 593 { 594 int i; 595 ··· 613 } 614 } 615 616 + static void gart_resume(void) 617 { 618 pr_info("PCI-DMA: Resuming GART IOMMU\n"); 619 620 + gart_fixup_northbridges(); 621 622 enable_gart_translations(); 623 } 624 625 + static struct syscore_ops gart_syscore_ops = { 626 .resume = gart_resume, 627 628 }; 629 630 /* ··· 650 unsigned aper_base, new_aper_base; 651 struct pci_dev *dev; 652 void *gatt; 653 + int i; 654 655 pr_info("PCI-DMA: Disabling AGP.\n"); 656 ··· 685 686 agp_gatt_table = gatt; 687 688 + register_syscore_ops(&gart_syscore_ops); 689 690 flush_gart(); 691
+12 -32
arch/x86/oprofile/nmi_int.c
··· 15 #include <linux/notifier.h> 16 #include <linux/smp.h> 17 #include <linux/oprofile.h> 18 - #include <linux/sysdev.h> 19 #include <linux/slab.h> 20 #include <linux/moduleparam.h> 21 #include <linux/kdebug.h> ··· 536 537 #ifdef CONFIG_PM 538 539 - static int nmi_suspend(struct sys_device *dev, pm_message_t state) 540 { 541 /* Only one CPU left, just stop that one */ 542 if (nmi_enabled == 1) ··· 544 return 0; 545 } 546 547 - static int nmi_resume(struct sys_device *dev) 548 { 549 if (nmi_enabled == 1) 550 nmi_cpu_start(NULL); 551 - return 0; 552 } 553 554 - static struct sysdev_class oprofile_sysclass = { 555 - .name = "oprofile", 556 .resume = nmi_resume, 557 .suspend = nmi_suspend, 558 }; 559 560 - static struct sys_device device_oprofile = { 561 - .id = 0, 562 - .cls = &oprofile_sysclass, 563 - }; 564 - 565 - static int __init init_sysfs(void) 566 { 567 - int error; 568 - 569 - error = sysdev_class_register(&oprofile_sysclass); 570 - if (error) 571 - return error; 572 - 573 - error = sysdev_register(&device_oprofile); 574 - if (error) 575 - sysdev_class_unregister(&oprofile_sysclass); 576 - 577 - return error; 578 } 579 580 - static void exit_sysfs(void) 581 { 582 - sysdev_unregister(&device_oprofile); 583 - sysdev_class_unregister(&oprofile_sysclass); 584 } 585 586 #else 587 588 - static inline int init_sysfs(void) { return 0; } 589 - static inline void exit_sysfs(void) { } 590 591 #endif /* CONFIG_PM */ 592 ··· 771 772 mux_init(ops); 773 774 - ret = init_sysfs(); 775 - if (ret) 776 - return ret; 777 778 printk(KERN_INFO "oprofile: using NMI interrupt.\n"); 779 return 0; ··· 779 780 void op_nmi_exit(void) 781 { 782 - exit_sysfs(); 783 }
··· 15 #include <linux/notifier.h> 16 #include <linux/smp.h> 17 #include <linux/oprofile.h> 18 + #include <linux/syscore_ops.h> 19 #include <linux/slab.h> 20 #include <linux/moduleparam.h> 21 #include <linux/kdebug.h> ··· 536 537 #ifdef CONFIG_PM 538 539 + static int nmi_suspend(void) 540 { 541 /* Only one CPU left, just stop that one */ 542 if (nmi_enabled == 1) ··· 544 return 0; 545 } 546 547 + static void nmi_resume(void) 548 { 549 if (nmi_enabled == 1) 550 nmi_cpu_start(NULL); 551 } 552 553 + static struct syscore_ops oprofile_syscore_ops = { 554 .resume = nmi_resume, 555 .suspend = nmi_suspend, 556 }; 557 558 + static void __init init_suspend_resume(void) 559 { 560 + register_syscore_ops(&oprofile_syscore_ops); 561 } 562 563 + static void exit_suspend_resume(void) 564 { 565 + unregister_syscore_ops(&oprofile_syscore_ops); 566 } 567 568 #else 569 570 + static inline void init_suspend_resume(void) { } 571 + static inline void exit_suspend_resume(void) { } 572 573 #endif /* CONFIG_PM */ 574 ··· 789 790 mux_init(ops); 791 792 + init_suspend_resume(); 793 794 printk(KERN_INFO "oprofile: using NMI interrupt.\n"); 795 return 0; ··· 799 800 void op_nmi_exit(void) 801 { 802 + exit_suspend_resume(); 803 }