Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/pseries: Migration code reorganization / hibernation prep

Partition hibernation will use some of the same code as is
currently used for Live Partition Migration. This function
further abstracts this code such that code outside of rtas.c
can utilize it. It also changes the error field in the suspend
me data structure to be an atomic type, since it is set and
checked on different cpus without any barriers or locking.

Signed-off-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

authored by

Brian King and committed by
Benjamin Herrenschmidt
8fe93f8d c1aa687d

+81 -35
+1
arch/powerpc/include/asm/hvcall.h
··· 74 74 #define H_NOT_ENOUGH_RESOURCES -44 75 75 #define H_R_STATE -45 76 76 #define H_RESCINDEND -46 77 + #define H_MULTI_THREADS_ACTIVE -9005 77 78 78 79 79 80 /* Long Busy is a condition that can be returned by the firmware
+10
arch/powerpc/include/asm/rtas.h
··· 63 63 struct device_node *dev; /* virtual address pointer */ 64 64 }; 65 65 66 + struct rtas_suspend_me_data { 67 + atomic_t working; /* number of cpus accessing this struct */ 68 + atomic_t done; 69 + int token; /* ibm,suspend-me */ 70 + atomic_t error; 71 + struct completion *complete; /* wait on this until working == 0 */ 72 + }; 73 + 66 74 /* RTAS event classes */ 67 75 #define RTAS_INTERNAL_ERROR 0x80000000 /* set bit 0 */ 68 76 #define RTAS_EPOW_WARNING 0x40000000 /* set bit 1 */ ··· 182 174 extern int rtas_set_indicator_fast(int indicator, int index, int new_value); 183 175 extern void rtas_progress(char *s, unsigned short hex); 184 176 extern void rtas_initialize(void); 177 + extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); 178 + extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); 185 179 186 180 struct rtc_time; 187 181 extern unsigned long rtas_get_boot_time(void);
+70 -35
arch/powerpc/kernel/rtas.c
··· 47 47 }; 48 48 EXPORT_SYMBOL(rtas); 49 49 50 - struct rtas_suspend_me_data { 51 - atomic_t working; /* number of cpus accessing this struct */ 52 - atomic_t done; 53 - int token; /* ibm,suspend-me */ 54 - int error; 55 - struct completion *complete; /* wait on this until working == 0 */ 56 - }; 57 - 58 50 DEFINE_SPINLOCK(rtas_data_buf_lock); 59 51 EXPORT_SYMBOL(rtas_data_buf_lock); 60 52 ··· 706 714 707 715 static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE; 708 716 #ifdef CONFIG_PPC_PSERIES 709 - static void rtas_percpu_suspend_me(void *info) 717 + static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done) 718 + { 719 + u16 slb_size = mmu_slb_size; 720 + int rc = H_MULTI_THREADS_ACTIVE; 721 + int cpu; 722 + 723 + slb_set_size(SLB_MIN_SIZE); 724 + printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id()); 725 + 726 + while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) && 727 + !atomic_read(&data->error)) 728 + rc = rtas_call(data->token, 0, 1, NULL); 729 + 730 + if (rc || atomic_read(&data->error)) { 731 + printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc); 732 + slb_set_size(slb_size); 733 + } 734 + 735 + if (atomic_read(&data->error)) 736 + rc = atomic_read(&data->error); 737 + 738 + atomic_set(&data->error, rc); 739 + 740 + if (wake_when_done) { 741 + atomic_set(&data->done, 1); 742 + 743 + for_each_online_cpu(cpu) 744 + plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); 745 + } 746 + 747 + if (atomic_dec_return(&data->working) == 0) 748 + complete(data->complete); 749 + 750 + return rc; 751 + } 752 + 753 + int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data) 754 + { 755 + atomic_inc(&data->working); 756 + return __rtas_suspend_last_cpu(data, 0); 757 + } 758 + 759 + static int __rtas_suspend_cpu(struct rtas_suspend_me_data *data, int wake_when_done) 710 760 { 711 761 long rc = H_SUCCESS; 712 762 unsigned long msr_save; 713 - u16 slb_size = mmu_slb_size; 714 763 int cpu; 715 - struct rtas_suspend_me_data *data = 716 - (struct rtas_suspend_me_data *)info; 717 764 718 765 atomic_inc(&data->working); 719 766 ··· 760 729 msr_save = mfmsr(); 761 730 mtmsr(msr_save & ~(MSR_EE)); 762 731 763 - while (rc == H_SUCCESS && !atomic_read(&data->done)) 732 + while (rc == H_SUCCESS && !atomic_read(&data->done) && !atomic_read(&data->error)) 764 733 rc = plpar_hcall_norets(H_JOIN); 765 734 766 735 mtmsr(msr_save); ··· 772 741 /* All other cpus are in H_JOIN, this cpu does 773 742 * the suspend. 774 743 */ 775 - slb_set_size(SLB_MIN_SIZE); 776 - printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", 777 - smp_processor_id()); 778 - data->error = rtas_call(data->token, 0, 1, NULL); 779 - 780 - if (data->error) { 781 - printk(KERN_DEBUG "ibm,suspend-me returned %d\n", 782 - data->error); 783 - slb_set_size(slb_size); 784 - } 744 + return __rtas_suspend_last_cpu(data, wake_when_done); 785 745 } else { 786 746 printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n", 787 747 smp_processor_id(), rc); 788 - data->error = rc; 748 + atomic_set(&data->error, rc); 789 749 } 790 750 791 - atomic_set(&data->done, 1); 751 + if (wake_when_done) { 752 + atomic_set(&data->done, 1); 792 753 793 - /* This cpu did the suspend or got an error; in either case, 794 - * we need to prod all other other cpus out of join state. 795 - * Extra prods are harmless. 796 - */ 797 - for_each_online_cpu(cpu) 798 - plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); 754 + /* This cpu did the suspend or got an error; in either case, 755 + * we need to prod all other other cpus out of join state. 756 + * Extra prods are harmless. 757 + */ 758 + for_each_online_cpu(cpu) 759 + plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); 760 + } 799 761 out: 800 762 if (atomic_dec_return(&data->working) == 0) 801 763 complete(data->complete); 764 + return rc; 765 + } 766 + 767 + int rtas_suspend_cpu(struct rtas_suspend_me_data *data) 768 + { 769 + return __rtas_suspend_cpu(data, 0); 770 + } 771 + 772 + static void rtas_percpu_suspend_me(void *info) 773 + { 774 + __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); 802 775 } 803 776 804 777 static int rtas_ibm_suspend_me(struct rtas_args *args) ··· 837 802 838 803 atomic_set(&data.working, 0); 839 804 atomic_set(&data.done, 0); 805 + atomic_set(&data.error, 0); 840 806 data.token = rtas_token("ibm,suspend-me"); 841 - data.error = 0; 842 807 data.complete = &done; 843 808 844 809 /* Call function on all CPUs. One of us will make the 845 810 * rtas call 846 811 */ 847 812 if (on_each_cpu(rtas_percpu_suspend_me, &data, 0)) 848 - data.error = -EINVAL; 813 + atomic_set(&data.error, -EINVAL); 849 814 850 815 wait_for_completion(&done); 851 816 852 - if (data.error != 0) 817 + if (atomic_read(&data.error) != 0) 853 818 printk(KERN_ERR "Error doing global join\n"); 854 819 855 - return data.error; 820 + return atomic_read(&data.error); 856 821 } 857 822 #else /* CONFIG_PPC_PSERIES */ 858 823 static int rtas_ibm_suspend_me(struct rtas_args *args)