[POWERPC] spufs: cell spu problem state mapping updates

This patch adds a new "psmap" file to spufs that allows mmap of all of
the problem state mapping of SPEs. It is compatible with 64k pages. In
addition, it removes mmap ability of individual files when using 64k
pages, with the exception of signal1 and signal2 which will both map the
entire 64k page holding both registers. It also removes
CONFIG_SPUFS_MMAP as there is no point in not building mmap support in
spufs.

It goes along a separate patch to libspe implementing usage of that new
file to access problem state registers.

Another patch will follow up to fix races opened up by accessing
the 'runcntl' register directly, which is made possible with this
patch.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by Benjamin Herrenschmidt and committed by Paul Mackerras 27d5bf2a 3bdc9d0b

+84 -51
-5
arch/powerpc/platforms/cell/Kconfig
··· 16 16 bool 17 17 default n 18 18 19 - config SPUFS_MMAP 20 - bool 21 - depends on SPU_FS && SPARSEMEM 22 - default y 23 - 24 19 config CBE_RAS 25 20 bool "RAS features for bare metal Cell BE" 26 21 default y
+84 -46
arch/powerpc/platforms/cell/spufs/file.c
··· 36 36 37 37 #include "spufs.h" 38 38 39 + #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000) 40 + 39 41 40 42 static int 41 43 spufs_mem_open(struct inode *inode, struct file *file) ··· 90 88 return ret; 91 89 } 92 90 93 - #ifdef CONFIG_SPUFS_MMAP 94 91 static struct page * 95 92 spufs_mem_mmap_nopage(struct vm_area_struct *vma, 96 93 unsigned long address, int *type) ··· 134 133 vma->vm_ops = &spufs_mem_mmap_vmops; 135 134 return 0; 136 135 } 137 - #endif 138 136 139 137 static struct file_operations spufs_mem_fops = { 140 138 .open = spufs_mem_open, 141 139 .read = spufs_mem_read, 142 140 .write = spufs_mem_write, 143 141 .llseek = generic_file_llseek, 144 - #ifdef CONFIG_SPUFS_MMAP 145 142 .mmap = spufs_mem_mmap, 146 - #endif 147 143 }; 148 144 149 - #ifdef CONFIG_SPUFS_MMAP 150 145 static struct page *spufs_ps_nopage(struct vm_area_struct *vma, 151 146 unsigned long address, 152 - int *type, unsigned long ps_offs) 147 + int *type, unsigned long ps_offs, 148 + unsigned long ps_size) 153 149 { 154 150 struct page *page = NOPAGE_SIGBUS; 155 151 int fault_type = VM_FAULT_SIGBUS; ··· 156 158 int ret; 157 159 158 160 offset += vma->vm_pgoff << PAGE_SHIFT; 159 - if (offset >= 0x4000) 161 + if (offset >= ps_size) 160 162 goto out; 161 163 162 164 ret = spu_acquire_runnable(ctx); ··· 177 179 return page; 178 180 } 179 181 182 + #if SPUFS_MMAP_4K 180 183 static struct page *spufs_cntl_mmap_nopage(struct vm_area_struct *vma, 181 184 unsigned long address, int *type) 182 185 { 183 - return spufs_ps_nopage(vma, address, type, 0x4000); 186 + return spufs_ps_nopage(vma, address, type, 0x4000, 0x1000); 184 187 } 185 188 186 189 static struct vm_operations_struct spufs_cntl_mmap_vmops = { ··· 190 191 191 192 /* 192 193 * mmap support for problem state control area [0x4000 - 0x4fff]. 193 - * Mapping this area requires that the application have CAP_SYS_RAWIO, 194 - * as these registers require special care when read/writing. 195 194 */ 196 195 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) 197 196 { 198 197 if (!(vma->vm_flags & VM_SHARED)) 199 198 return -EINVAL; 200 - 201 - if (!capable(CAP_SYS_RAWIO)) 202 - return -EPERM; 203 199 204 200 vma->vm_flags |= VM_RESERVED; 205 201 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) ··· 203 209 vma->vm_ops = &spufs_cntl_mmap_vmops; 204 210 return 0; 205 211 } 206 - #endif 212 + #else /* SPUFS_MMAP_4K */ 213 + #define spufs_cntl_mmap NULL 214 + #endif /* !SPUFS_MMAP_4K */ 207 215 208 216 static int spufs_cntl_open(struct inode *inode, struct file *file) 209 217 { ··· 238 242 .open = spufs_cntl_open, 239 243 .read = spufs_cntl_read, 240 244 .write = spufs_cntl_write, 241 - #ifdef CONFIG_SPUFS_MMAP 242 245 .mmap = spufs_cntl_mmap, 243 - #endif 244 246 }; 245 247 246 248 static int ··· 651 657 return 4; 652 658 } 653 659 654 - #ifdef CONFIG_SPUFS_MMAP 655 660 static struct page *spufs_signal1_mmap_nopage(struct vm_area_struct *vma, 656 661 unsigned long address, int *type) 657 662 { 658 - return spufs_ps_nopage(vma, address, type, 0x14000); 663 + #if PAGE_SIZE == 0x1000 664 + return spufs_ps_nopage(vma, address, type, 0x14000, 0x1000); 665 + #elif PAGE_SIZE == 0x10000 666 + /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 667 + * signal 1 and 2 area 668 + */ 669 + return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000); 670 + #else 671 + #error unsupported page size 672 + #endif 659 673 } 660 674 661 675 static struct vm_operations_struct spufs_signal1_mmap_vmops = { ··· 682 680 vma->vm_ops = &spufs_signal1_mmap_vmops; 683 681 return 0; 684 682 } 685 - #endif 686 683 687 684 static struct file_operations spufs_signal1_fops = { 688 685 .open = spufs_signal1_open, 689 686 .read = spufs_signal1_read, 690 687 .write = spufs_signal1_write, 691 - #ifdef CONFIG_SPUFS_MMAP 692 688 .mmap = spufs_signal1_mmap, 693 - #endif 694 689 }; 695 690 696 691 static int spufs_signal2_open(struct inode *inode, struct file *file) ··· 742 743 return 4; 743 744 } 744 745 745 - #ifdef CONFIG_SPUFS_MMAP 746 + #if SPUFS_MMAP_4K 746 747 static struct page *spufs_signal2_mmap_nopage(struct vm_area_struct *vma, 747 748 unsigned long address, int *type) 748 749 { 749 - return spufs_ps_nopage(vma, address, type, 0x1c000); 750 + #if PAGE_SIZE == 0x1000 751 + return spufs_ps_nopage(vma, address, type, 0x1c000, 0x1000); 752 + #elif PAGE_SIZE == 0x10000 753 + /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 754 + * signal 1 and 2 area 755 + */ 756 + return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000); 757 + #else 758 + #error unsupported page size 759 + #endif 750 760 } 751 761 752 762 static struct vm_operations_struct spufs_signal2_mmap_vmops = { ··· 775 767 vma->vm_ops = &spufs_signal2_mmap_vmops; 776 768 return 0; 777 769 } 778 - #endif 770 + #else /* SPUFS_MMAP_4K */ 771 + #define spufs_signal2_mmap NULL 772 + #endif /* !SPUFS_MMAP_4K */ 779 773 780 774 static struct file_operations spufs_signal2_fops = { 781 775 .open = spufs_signal2_open, 782 776 .read = spufs_signal2_read, 783 777 .write = spufs_signal2_write, 784 - #ifdef CONFIG_SPUFS_MMAP 785 778 .mmap = spufs_signal2_mmap, 786 - #endif 787 779 }; 788 780 789 781 static void spufs_signal1_type_set(void *data, u64 val) ··· 832 824 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, 833 825 spufs_signal2_type_set, "%llu"); 834 826 835 - #ifdef CONFIG_SPUFS_MMAP 827 + #if SPUFS_MMAP_4K 836 828 static struct page *spufs_mss_mmap_nopage(struct vm_area_struct *vma, 837 829 unsigned long address, int *type) 838 830 { 839 - return spufs_ps_nopage(vma, address, type, 0x0000); 831 + return spufs_ps_nopage(vma, address, type, 0x0000, 0x1000); 840 832 } 841 833 842 834 static struct vm_operations_struct spufs_mss_mmap_vmops = { ··· 845 837 846 838 /* 847 839 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 848 - * Mapping this area requires that the application have CAP_SYS_RAWIO, 849 - * as these registers require special care when read/writing. 850 840 */ 851 841 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) 852 842 { 853 843 if (!(vma->vm_flags & VM_SHARED)) 854 844 return -EINVAL; 855 - 856 - if (!capable(CAP_SYS_RAWIO)) 857 - return -EPERM; 858 845 859 846 vma->vm_flags |= VM_RESERVED; 860 847 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) ··· 858 855 vma->vm_ops = &spufs_mss_mmap_vmops; 859 856 return 0; 860 857 } 861 - #endif 858 + #else /* SPUFS_MMAP_4K */ 859 + #define spufs_mss_mmap NULL 860 + #endif /* !SPUFS_MMAP_4K */ 862 861 863 862 static int spufs_mss_open(struct inode *inode, struct file *file) 864 863 { ··· 872 867 873 868 static struct file_operations spufs_mss_fops = { 874 869 .open = spufs_mss_open, 875 - #ifdef CONFIG_SPUFS_MMAP 876 870 .mmap = spufs_mss_mmap, 877 - #endif 871 + }; 872 + 873 + static struct page *spufs_psmap_mmap_nopage(struct vm_area_struct *vma, 874 + unsigned long address, int *type) 875 + { 876 + return spufs_ps_nopage(vma, address, type, 0x0000, 0x20000); 877 + } 878 + 879 + static struct vm_operations_struct spufs_psmap_mmap_vmops = { 880 + .nopage = spufs_psmap_mmap_nopage, 881 + }; 882 + 883 + /* 884 + * mmap support for full problem state area [0x00000 - 0x1ffff]. 885 + */ 886 + static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) 887 + { 888 + if (!(vma->vm_flags & VM_SHARED)) 889 + return -EINVAL; 890 + 891 + vma->vm_flags |= VM_RESERVED; 892 + vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 893 + | _PAGE_NO_CACHE | _PAGE_GUARDED); 894 + 895 + vma->vm_ops = &spufs_psmap_mmap_vmops; 896 + return 0; 897 + } 898 + 899 + static int spufs_psmap_open(struct inode *inode, struct file *file) 900 + { 901 + struct spufs_inode_info *i = SPUFS_I(inode); 902 + 903 + file->private_data = i->i_ctx; 904 + return nonseekable_open(inode, file); 905 + } 906 + 907 + static struct file_operations spufs_psmap_fops = { 908 + .open = spufs_psmap_open, 909 + .mmap = spufs_psmap_mmap, 878 910 }; 879 911 880 912 881 - #ifdef CONFIG_SPUFS_MMAP 913 + #if SPUFS_MMAP_4K 882 914 static struct page *spufs_mfc_mmap_nopage(struct vm_area_struct *vma, 883 915 unsigned long address, int *type) 884 916 { 885 - return spufs_ps_nopage(vma, address, type, 0x3000); 917 + return spufs_ps_nopage(vma, address, type, 0x3000, 0x1000); 886 918 } 887 919 888 920 static struct vm_operations_struct spufs_mfc_mmap_vmops = { ··· 928 886 929 887 /* 930 888 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 931 - * Mapping this area requires that the application have CAP_SYS_RAWIO, 932 - * as these registers require special care when read/writing. 933 889 */ 934 890 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) 935 891 { 936 892 if (!(vma->vm_flags & VM_SHARED)) 937 893 return -EINVAL; 938 - 939 - if (!capable(CAP_SYS_RAWIO)) 940 - return -EPERM; 941 894 942 895 vma->vm_flags |= VM_RESERVED; 943 896 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) ··· 941 904 vma->vm_ops = &spufs_mfc_mmap_vmops; 942 905 return 0; 943 906 } 944 - #endif 907 + #else /* SPUFS_MMAP_4K */ 908 + #define spufs_mfc_mmap NULL 909 + #endif /* !SPUFS_MMAP_4K */ 945 910 946 911 static int spufs_mfc_open(struct inode *inode, struct file *file) 947 912 { ··· 1233 1194 .flush = spufs_mfc_flush, 1234 1195 .fsync = spufs_mfc_fsync, 1235 1196 .fasync = spufs_mfc_fasync, 1236 - #ifdef CONFIG_SPUFS_MMAP 1237 1197 .mmap = spufs_mfc_mmap, 1238 - #endif 1239 1198 }; 1240 1199 1241 1200 static void spufs_npc_set(void *data, u64 val) ··· 1405 1368 { "event_mask", &spufs_event_mask_ops, 0666, }, 1406 1369 { "srr0", &spufs_srr0_ops, 0666, }, 1407 1370 { "phys-id", &spufs_id_ops, 0666, }, 1371 + { "psmap", &spufs_psmap_fops, 0666, }, 1408 1372 {}, 1409 1373 };