Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'parisc-3.9' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller.

The bulk of this is optimized page coping/clearing and cache flushing
(virtual caches are lovely) by John David Anglin.

* 'parisc-3.9' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: (31 commits)
arch/parisc/include/asm: use ARRAY_SIZE macro in mmzone.h
parisc: remove empty lines and unnecessary #ifdef coding in include/asm/signal.h
parisc: sendfile and sendfile64 syscall cleanups
parisc: switch to available compat_sched_rr_get_interval implementation
parisc: fix fallocate syscall
parisc: fix error return codes for rt_sigaction and rt_sigprocmask
parisc: convert msgrcv and msgsnd syscalls to use compat layer
parisc: correctly wire up mq_* functions for CONFIG_COMPAT case
parisc: fix personality on 32bit kernel
parisc: wire up process_vm_readv, process_vm_writev, kcmp and finit_module syscalls
parisc: led driver requires CONFIG_VM_EVENT_COUNTERS
parisc: remove unused compat_rt_sigframe.h header
parisc/mm/fault.c: Port OOM changes to do_page_fault
parisc: space register variables need to be in native length (unsigned long)
parisc: fix ptrace breakage
parisc: always detect multiple physical ranges
parisc: ensure that mmapped shared pages are aligned at SHMLBA addresses
parisc: disable preemption while flushing D- or I-caches through TMPALIAS region
parisc: remove IRQF_DISABLED
parisc: fixes and cleanups in page cache flushing (4/4)
...

+709 -317
+21
arch/parisc/Kconfig
··· 160 160 def_bool y 161 161 depends on PA8X00 || PA7200 162 162 163 + config MLONGCALLS 164 + bool "Enable the -mlong-calls compiler option for big kernels" 165 + def_bool y if (!MODULES) 166 + depends on PA8X00 167 + help 168 + If you configure the kernel to include many drivers built-in instead 169 + as modules, the kernel executable may become too big, so that the 170 + linker will not be able to resolve some long branches and fails to link 171 + your vmlinux kernel. In that case enabling this option will help you 172 + to overcome this limit by using the -mlong-calls compiler option. 173 + 174 + Usually you want to say N here, unless you e.g. want to build 175 + a kernel which includes all necessary drivers built-in and which can 176 + be used for TFTP booting without the need to have an initrd ramdisk. 177 + 178 + Enabling this option will probably slow down your kernel. 179 + 163 180 config 64BIT 164 181 bool "64-bit kernel" 165 182 depends on PA8X00 ··· 270 253 config COMPAT 271 254 def_bool y 272 255 depends on 64BIT 256 + 257 + config SYSVIPC_COMPAT 258 + def_bool y 259 + depends on COMPAT && SYSVIPC 273 260 274 261 config HPUX 275 262 bool "Support for HP-UX binaries"
+8 -5
arch/parisc/Makefile
··· 32 32 UTS_MACHINE := parisc64 33 33 CHECKFLAGS += -D__LP64__=1 -m64 34 34 WIDTH := 64 35 - 36 - # FIXME: if no default set, should really try to locate dynamically 37 - ifeq ($(CROSS_COMPILE),) 38 - CROSS_COMPILE := hppa64-linux-gnu- 39 - endif 40 35 else # 32-bit 41 36 WIDTH := 42 37 endif ··· 39 44 # attempt to help out folks who are cross-compiling 40 45 ifeq ($(NATIVE),1) 41 46 CROSS_COMPILE := hppa$(WIDTH)-linux- 47 + else 48 + ifeq ($(CROSS_COMPILE),) 49 + CROSS_COMPILE := hppa$(WIDTH)-linux-gnu- 50 + endif 42 51 endif 43 52 44 53 OBJCOPY_FLAGS =-O binary -R .note -R .comment -S ··· 63 64 ifndef CONFIG_FUNCTION_TRACER 64 65 cflags-y += -ffunction-sections 65 66 endif 67 + 68 + # Use long jumps instead of long branches (needed if your linker fails to 69 + # link a too big vmlinux executable) 70 + cflags-$(CONFIG_MLONGCALLS) += -mlong-calls 66 71 67 72 # select which processor to optimise for 68 73 cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100
+1 -2
arch/parisc/hpux/fs.c
··· 43 43 44 44 error = do_execve(filename->name, 45 45 (const char __user *const __user *) regs->gr[25], 46 - (const char __user *const __user *) regs->gr[24], 47 - regs); 46 + (const char __user *const __user *) regs->gr[24]); 48 47 49 48 putname(filename); 50 49
+2
arch/parisc/include/asm/cacheflush.h
··· 115 115 { 116 116 if (PageAnon(page)) { 117 117 flush_tlb_page(vma, vmaddr); 118 + preempt_disable(); 118 119 flush_dcache_page_asm(page_to_phys(page), vmaddr); 120 + preempt_enable(); 119 121 } 120 122 } 121 123
+61
arch/parisc/include/asm/compat.h
··· 28 28 typedef u16 compat_ipc_pid_t; 29 29 typedef s32 compat_daddr_t; 30 30 typedef u32 compat_caddr_t; 31 + typedef s32 compat_key_t; 31 32 typedef s32 compat_timer_t; 32 33 33 34 typedef s32 compat_int_t; ··· 188 187 189 188 #define COMPAT_OFF_T_MAX 0x7fffffff 190 189 #define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL 190 + 191 + struct compat_ipc64_perm { 192 + compat_key_t key; 193 + __compat_uid_t uid; 194 + __compat_gid_t gid; 195 + __compat_uid_t cuid; 196 + __compat_gid_t cgid; 197 + unsigned short int __pad1; 198 + compat_mode_t mode; 199 + unsigned short int __pad2; 200 + unsigned short int seq; 201 + unsigned int __pad3; 202 + unsigned long __unused1; /* yes they really are 64bit pads */ 203 + unsigned long __unused2; 204 + }; 205 + 206 + struct compat_semid64_ds { 207 + struct compat_ipc64_perm sem_perm; 208 + compat_time_t sem_otime; 209 + unsigned int __unused1; 210 + compat_time_t sem_ctime; 211 + unsigned int __unused2; 212 + compat_ulong_t sem_nsems; 213 + compat_ulong_t __unused3; 214 + compat_ulong_t __unused4; 215 + }; 216 + 217 + struct compat_msqid64_ds { 218 + struct compat_ipc64_perm msg_perm; 219 + unsigned int __unused1; 220 + compat_time_t msg_stime; 221 + unsigned int __unused2; 222 + compat_time_t msg_rtime; 223 + unsigned int __unused3; 224 + compat_time_t msg_ctime; 225 + compat_ulong_t msg_cbytes; 226 + compat_ulong_t msg_qnum; 227 + compat_ulong_t msg_qbytes; 228 + compat_pid_t msg_lspid; 229 + compat_pid_t msg_lrpid; 230 + compat_ulong_t __unused4; 231 + compat_ulong_t __unused5; 232 + }; 233 + 234 + struct compat_shmid64_ds { 235 + struct compat_ipc64_perm shm_perm; 236 + unsigned int __unused1; 237 + compat_time_t shm_atime; 238 + unsigned int __unused2; 239 + compat_time_t shm_dtime; 240 + unsigned int __unused3; 241 + compat_time_t shm_ctime; 242 + unsigned int __unused4; 243 + compat_size_t shm_segsz; 244 + compat_pid_t shm_cpid; 245 + compat_pid_t shm_lpid; 246 + compat_ulong_t shm_nattch; 247 + compat_ulong_t __unused5; 248 + compat_ulong_t __unused6; 249 + }; 191 250 192 251 /* 193 252 * A pointer passed in from user mode. This should not
-50
arch/parisc/include/asm/compat_rt_sigframe.h
··· 1 - #include <linux/compat.h> 2 - #include <linux/compat_siginfo.h> 3 - #include <asm/compat_ucontext.h> 4 - 5 - #ifndef _ASM_PARISC_COMPAT_RT_SIGFRAME_H 6 - #define _ASM_PARISC_COMPAT_RT_SIGFRAME_H 7 - 8 - /* In a deft move of uber-hackery, we decide to carry the top half of all 9 - * 64-bit registers in a non-portable, non-ABI, hidden structure. 10 - * Userspace can read the hidden structure if it *wants* but is never 11 - * guaranteed to be in the same place. Infact the uc_sigmask from the 12 - * ucontext_t structure may push the hidden register file downards 13 - */ 14 - struct compat_regfile { 15 - /* Upper half of all the 64-bit registers that were truncated 16 - on a copy to a 32-bit userspace */ 17 - compat_int_t rf_gr[32]; 18 - compat_int_t rf_iasq[2]; 19 - compat_int_t rf_iaoq[2]; 20 - compat_int_t rf_sar; 21 - }; 22 - 23 - #define COMPAT_SIGRETURN_TRAMP 4 24 - #define COMPAT_SIGRESTARTBLOCK_TRAMP 5 25 - #define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + COMPAT_SIGRESTARTBLOCK_TRAMP) 26 - 27 - struct compat_rt_sigframe { 28 - /* XXX: Must match trampoline size in arch/parisc/kernel/signal.c 29 - Secondary to that it must protect the ERESTART_RESTARTBLOCK 30 - trampoline we left on the stack (we were bad and didn't 31 - change sp so we could run really fast.) */ 32 - compat_uint_t tramp[COMPAT_TRAMP_SIZE]; 33 - compat_siginfo_t info; 34 - struct compat_ucontext uc; 35 - /* Hidden location of truncated registers, *must* be last. */ 36 - struct compat_regfile regs; 37 - }; 38 - 39 - /* 40 - * The 32-bit ABI wants at least 48 bytes for a function call frame: 41 - * 16 bytes for arg0-arg3, and 32 bytes for magic (the only part of 42 - * which Linux/parisc uses is sp-20 for the saved return pointer...) 43 - * Then, the stack pointer must be rounded to a cache line (64 bytes). 44 - */ 45 - #define SIGFRAME32 64 46 - #define FUNCTIONCALLFRAME32 48 47 - #define PARISC_RT_SIGFRAME_SIZE32 \ 48 - (((sizeof(struct compat_rt_sigframe) + FUNCTIONCALLFRAME32) + SIGFRAME32) & -SIGFRAME32) 49 - 50 - #endif
+1 -1
arch/parisc/include/asm/elf.h
··· 247 247 #define ELF_PLATFORM ("PARISC\0") 248 248 249 249 #define SET_PERSONALITY(ex) \ 250 - current->personality = PER_LINUX; \ 250 + set_personality((current->personality & ~PER_MASK) | PER_LINUX); \ 251 251 current->thread.map_base = DEFAULT_MAP_BASE; \ 252 252 current->thread.task_size = DEFAULT_TASK_SIZE \ 253 253
+2 -2
arch/parisc/include/asm/floppy.h
··· 157 157 { 158 158 if(can_use_virtual_dma) 159 159 return request_irq(FLOPPY_IRQ, floppy_hardint, 160 - IRQF_DISABLED, "floppy", NULL); 160 + 0, "floppy", NULL); 161 161 else 162 162 return request_irq(FLOPPY_IRQ, floppy_interrupt, 163 - IRQF_DISABLED, "floppy", NULL); 163 + 0, "floppy", NULL); 164 164 } 165 165 166 166 static unsigned long dma_mem_alloc(unsigned long size)
+3 -4
arch/parisc/include/asm/mmzone.h
··· 1 1 #ifndef _PARISC_MMZONE_H 2 2 #define _PARISC_MMZONE_H 3 3 4 + #define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */ 5 + 4 6 #ifdef CONFIG_DISCONTIGMEM 5 7 6 - #define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */ 7 8 extern int npmem_ranges; 8 9 9 10 struct node_map_data { ··· 45 44 return 0; 46 45 47 46 i = pfn >> PFNNID_SHIFT; 48 - BUG_ON(i >= sizeof(pfnnid_map) / sizeof(pfnnid_map[0])); 47 + BUG_ON(i >= ARRAY_SIZE(pfnnid_map)); 49 48 r = pfnnid_map[i]; 50 49 BUG_ON(r == 0xff); 51 50 ··· 61 60 return 0; 62 61 } 63 62 64 - #else /* !CONFIG_DISCONTIGMEM */ 65 - #define MAX_PHYSMEM_RANGES 1 66 63 #endif 67 64 #endif /* _PARISC_MMZONE_H */
+16 -4
arch/parisc/include/asm/page.h
··· 21 21 #include <asm/types.h> 22 22 #include <asm/cache.h> 23 23 24 - #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 25 - #define copy_page(to,from) copy_user_page_asm((void *)(to), (void *)(from)) 24 + #define clear_page(page) clear_page_asm((void *)(page)) 25 + #define copy_page(to, from) copy_page_asm((void *)(to), (void *)(from)) 26 26 27 27 struct page; 28 28 29 - void copy_user_page_asm(void *to, void *from); 29 + void clear_page_asm(void *page); 30 + void copy_page_asm(void *to, void *from); 31 + void clear_user_page(void *vto, unsigned long vaddr, struct page *pg); 30 32 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 31 33 struct page *pg); 32 - void clear_user_page(void *page, unsigned long vaddr, struct page *pg); 34 + 35 + /* #define CONFIG_PARISC_TMPALIAS */ 36 + 37 + #ifdef CONFIG_PARISC_TMPALIAS 38 + void clear_user_highpage(struct page *page, unsigned long vaddr); 39 + #define clear_user_highpage clear_user_highpage 40 + struct vm_area_struct; 41 + void copy_user_highpage(struct page *to, struct page *from, 42 + unsigned long vaddr, struct vm_area_struct *vma); 43 + #define __HAVE_ARCH_COPY_USER_HIGHPAGE 44 + #endif 33 45 34 46 /* 35 47 * These are used to make use of C type-checking..
+10 -3
arch/parisc/include/asm/pgtable.h
··· 12 12 13 13 #include <linux/bitops.h> 14 14 #include <linux/spinlock.h> 15 + #include <linux/mm_types.h> 15 16 #include <asm/processor.h> 16 17 #include <asm/cache.h> 17 - 18 - struct vm_area_struct; 19 18 20 19 /* 21 20 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel ··· 39 40 do{ \ 40 41 *(pteptr) = (pteval); \ 41 42 } while(0) 42 - #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 43 + 44 + extern void purge_tlb_entries(struct mm_struct *, unsigned long); 45 + 46 + #define set_pte_at(mm, addr, ptep, pteval) \ 47 + do { \ 48 + set_pte(ptep, pteval); \ 49 + purge_tlb_entries(mm, addr); \ 50 + } while (0) 43 51 44 52 #endif /* !__ASSEMBLY__ */ 45 53 ··· 472 466 old = pte_val(*ptep); 473 467 new = pte_val(pte_wrprotect(__pte (old))); 474 468 } while (cmpxchg((unsigned long *) ptep, old, new) != old); 469 + purge_tlb_entries(mm, addr); 475 470 #else 476 471 pte_t old_pte = *ptep; 477 472 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
-4
arch/parisc/include/asm/signal.h
··· 3 3 4 4 #include <uapi/asm/signal.h> 5 5 6 - 7 6 #define _NSIG 64 8 7 /* bits-per-word, where word apparently means 'long' not 'int' */ 9 8 #define _NSIG_BPW BITS_PER_LONG 10 9 #define _NSIG_WORDS (_NSIG / _NSIG_BPW) 11 10 12 11 # ifndef __ASSEMBLY__ 13 - #ifdef CONFIG_64BIT 14 - #else 15 - #endif 16 12 17 13 /* Most things should be clean enough to redefine this at will, if care 18 14 is taken to make libc match. */
+2
arch/parisc/include/asm/unistd.h
··· 149 149 #define __ARCH_WANT_SYS_SIGNAL 150 150 #define __ARCH_WANT_SYS_TIME 151 151 #define __ARCH_WANT_COMPAT_SYS_TIME 152 + #define __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL 152 153 #define __ARCH_WANT_SYS_UTIME 153 154 #define __ARCH_WANT_SYS_WAITPID 154 155 #define __ARCH_WANT_SYS_SOCKETCALL ··· 167 166 #define __ARCH_WANT_SYS_FORK 168 167 #define __ARCH_WANT_SYS_VFORK 169 168 #define __ARCH_WANT_SYS_CLONE 169 + #define __ARCH_WANT_COMPAT_SYS_SENDFILE 170 170 171 171 #endif /* __ASSEMBLY__ */ 172 172
+5 -1
arch/parisc/include/uapi/asm/unistd.h
··· 822 822 #define __NR_syncfs (__NR_Linux + 327) 823 823 #define __NR_setns (__NR_Linux + 328) 824 824 #define __NR_sendmmsg (__NR_Linux + 329) 825 + #define __NR_process_vm_readv (__NR_Linux + 330) 826 + #define __NR_process_vm_writev (__NR_Linux + 331) 827 + #define __NR_kcmp (__NR_Linux + 332) 828 + #define __NR_finit_module (__NR_Linux + 333) 825 829 826 - #define __NR_Linux_syscalls (__NR_sendmmsg + 1) 830 + #define __NR_Linux_syscalls (__NR_finit_module + 1) 827 831 828 832 829 833 #define __IGNORE_select /* newselect */
+187 -36
arch/parisc/kernel/cache.c
··· 267 267 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, 268 268 unsigned long physaddr) 269 269 { 270 + preempt_disable(); 270 271 flush_dcache_page_asm(physaddr, vmaddr); 271 272 if (vma->vm_flags & VM_EXEC) 272 273 flush_icache_page_asm(physaddr, vmaddr); 274 + preempt_enable(); 273 275 } 274 276 275 277 void flush_dcache_page(struct page *page) ··· 331 329 EXPORT_SYMBOL(flush_data_cache_local); 332 330 EXPORT_SYMBOL(flush_kernel_icache_range_asm); 333 331 334 - void clear_user_page_asm(void *page, unsigned long vaddr) 335 - { 336 - unsigned long flags; 337 - /* This function is implemented in assembly in pacache.S */ 338 - extern void __clear_user_page_asm(void *page, unsigned long vaddr); 339 - 340 - purge_tlb_start(flags); 341 - __clear_user_page_asm(page, vaddr); 342 - purge_tlb_end(flags); 343 - } 344 - 345 332 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ 346 333 int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; 347 334 ··· 364 373 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus()); 365 374 } 366 375 367 - extern void purge_kernel_dcache_page(unsigned long); 368 - extern void clear_user_page_asm(void *page, unsigned long vaddr); 369 - 370 - void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 371 - { 372 - unsigned long flags; 373 - 374 - purge_kernel_dcache_page((unsigned long)page); 375 - purge_tlb_start(flags); 376 - pdtlb_kernel(page); 377 - purge_tlb_end(flags); 378 - clear_user_page_asm(page, vaddr); 379 - } 380 - EXPORT_SYMBOL(clear_user_page); 376 + extern void purge_kernel_dcache_page_asm(unsigned long); 377 + extern void clear_user_page_asm(void *, unsigned long); 378 + extern void copy_user_page_asm(void *, void *, unsigned long); 381 379 382 380 void flush_kernel_dcache_page_addr(void *addr) 383 381 { ··· 379 399 } 380 400 EXPORT_SYMBOL(flush_kernel_dcache_page_addr); 381 401 382 - void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 383 - struct page *pg) 402 + void clear_user_page(void *vto, unsigned long vaddr, struct page *page) 384 403 { 385 - /* no coherency needed (all in kmap/kunmap) */ 386 - copy_user_page_asm(vto, vfrom); 404 + clear_page_asm(vto); 405 + if (!parisc_requires_coherency()) 406 + flush_kernel_dcache_page_asm(vto); 407 + } 408 + EXPORT_SYMBOL(clear_user_page); 409 + 410 + void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 411 + struct page *pg) 412 + { 413 + /* Copy using kernel mapping. No coherency is needed 414 + (all in kmap/kunmap) on machines that don't support 415 + non-equivalent aliasing. However, the `from' page 416 + needs to be flushed before it can be accessed through 417 + the kernel mapping. */ 418 + preempt_disable(); 419 + flush_dcache_page_asm(__pa(vfrom), vaddr); 420 + preempt_enable(); 421 + copy_page_asm(vto, vfrom); 387 422 if (!parisc_requires_coherency()) 388 423 flush_kernel_dcache_page_asm(vto); 389 424 } ··· 413 418 } 414 419 EXPORT_SYMBOL(kunmap_parisc); 415 420 #endif 421 + 422 + void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) 423 + { 424 + unsigned long flags; 425 + 426 + /* Note: purge_tlb_entries can be called at startup with 427 + no context. */ 428 + 429 + /* Disable preemption while we play with %sr1. */ 430 + preempt_disable(); 431 + mtsp(mm->context, 1); 432 + purge_tlb_start(flags); 433 + pdtlb(addr); 434 + pitlb(addr); 435 + purge_tlb_end(flags); 436 + preempt_enable(); 437 + } 438 + EXPORT_SYMBOL(purge_tlb_entries); 416 439 417 440 void __flush_tlb_range(unsigned long sid, unsigned long start, 418 441 unsigned long end) ··· 471 458 on_each_cpu(cacheflush_h_tmp_function, NULL, 1); 472 459 } 473 460 461 + static inline unsigned long mm_total_size(struct mm_struct *mm) 462 + { 463 + struct vm_area_struct *vma; 464 + unsigned long usize = 0; 465 + 466 + for (vma = mm->mmap; vma; vma = vma->vm_next) 467 + usize += vma->vm_end - vma->vm_start; 468 + return usize; 469 + } 470 + 471 + static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr) 472 + { 473 + pte_t *ptep = NULL; 474 + 475 + if (!pgd_none(*pgd)) { 476 + pud_t *pud = pud_offset(pgd, addr); 477 + if (!pud_none(*pud)) { 478 + pmd_t *pmd = pmd_offset(pud, addr); 479 + if (!pmd_none(*pmd)) 480 + ptep = pte_offset_map(pmd, addr); 481 + } 482 + } 483 + return ptep; 484 + } 485 + 474 486 void flush_cache_mm(struct mm_struct *mm) 475 487 { 488 + /* Flushing the whole cache on each cpu takes forever on 489 + rp3440, etc. So, avoid it if the mm isn't too big. */ 490 + if (mm_total_size(mm) < parisc_cache_flush_threshold) { 491 + struct vm_area_struct *vma; 492 + 493 + if (mm->context == mfsp(3)) { 494 + for (vma = mm->mmap; vma; vma = vma->vm_next) { 495 + flush_user_dcache_range_asm(vma->vm_start, 496 + vma->vm_end); 497 + if (vma->vm_flags & VM_EXEC) 498 + flush_user_icache_range_asm( 499 + vma->vm_start, vma->vm_end); 500 + } 501 + } else { 502 + pgd_t *pgd = mm->pgd; 503 + 504 + for (vma = mm->mmap; vma; vma = vma->vm_next) { 505 + unsigned long addr; 506 + 507 + for (addr = vma->vm_start; addr < vma->vm_end; 508 + addr += PAGE_SIZE) { 509 + pte_t *ptep = get_ptep(pgd, addr); 510 + if (ptep != NULL) { 511 + pte_t pte = *ptep; 512 + __flush_cache_page(vma, addr, 513 + page_to_phys(pte_page(pte))); 514 + } 515 + } 516 + } 517 + } 518 + return; 519 + } 520 + 476 521 #ifdef CONFIG_SMP 477 522 flush_cache_all(); 478 523 #else ··· 556 485 flush_instruction_cache(); 557 486 } 558 487 559 - 560 488 void flush_cache_range(struct vm_area_struct *vma, 561 489 unsigned long start, unsigned long end) 562 490 { 563 - int sr3; 564 - 565 491 BUG_ON(!vma->vm_mm->context); 566 492 567 - sr3 = mfsp(3); 568 - if (vma->vm_mm->context == sr3) { 569 - flush_user_dcache_range(start,end); 570 - flush_user_icache_range(start,end); 493 + if ((end - start) < parisc_cache_flush_threshold) { 494 + if (vma->vm_mm->context == mfsp(3)) { 495 + flush_user_dcache_range_asm(start, end); 496 + if (vma->vm_flags & VM_EXEC) 497 + flush_user_icache_range_asm(start, end); 498 + } else { 499 + unsigned long addr; 500 + pgd_t *pgd = vma->vm_mm->pgd; 501 + 502 + for (addr = start & PAGE_MASK; addr < end; 503 + addr += PAGE_SIZE) { 504 + pte_t *ptep = get_ptep(pgd, addr); 505 + if (ptep != NULL) { 506 + pte_t pte = *ptep; 507 + flush_cache_page(vma, 508 + addr, pte_pfn(pte)); 509 + } 510 + } 511 + } 571 512 } else { 513 + #ifdef CONFIG_SMP 572 514 flush_cache_all(); 515 + #else 516 + flush_cache_all_local(); 517 + #endif 573 518 } 574 519 } 575 520 ··· 598 511 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); 599 512 600 513 } 514 + 515 + #ifdef CONFIG_PARISC_TMPALIAS 516 + 517 + void clear_user_highpage(struct page *page, unsigned long vaddr) 518 + { 519 + void *vto; 520 + unsigned long flags; 521 + 522 + /* Clear using TMPALIAS region. The page doesn't need to 523 + be flushed but the kernel mapping needs to be purged. */ 524 + 525 + vto = kmap_atomic(page, KM_USER0); 526 + 527 + /* The PA-RISC 2.0 Architecture book states on page F-6: 528 + "Before a write-capable translation is enabled, *all* 529 + non-equivalently-aliased translations must be removed 530 + from the page table and purged from the TLB. (Note 531 + that the caches are not required to be flushed at this 532 + time.) Before any non-equivalent aliased translation 533 + is re-enabled, the virtual address range for the writeable 534 + page (the entire page) must be flushed from the cache, 535 + and the write-capable translation removed from the page 536 + table and purged from the TLB." */ 537 + 538 + purge_kernel_dcache_page_asm((unsigned long)vto); 539 + purge_tlb_start(flags); 540 + pdtlb_kernel(vto); 541 + purge_tlb_end(flags); 542 + preempt_disable(); 543 + clear_user_page_asm(vto, vaddr); 544 + preempt_enable(); 545 + 546 + pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */ 547 + } 548 + 549 + void copy_user_highpage(struct page *to, struct page *from, 550 + unsigned long vaddr, struct vm_area_struct *vma) 551 + { 552 + void *vfrom, *vto; 553 + unsigned long flags; 554 + 555 + /* Copy using TMPALIAS region. This has the advantage 556 + that the `from' page doesn't need to be flushed. However, 557 + the `to' page must be flushed in copy_user_page_asm since 558 + it can be used to bring in executable code. */ 559 + 560 + vfrom = kmap_atomic(from, KM_USER0); 561 + vto = kmap_atomic(to, KM_USER1); 562 + 563 + purge_kernel_dcache_page_asm((unsigned long)vto); 564 + purge_tlb_start(flags); 565 + pdtlb_kernel(vto); 566 + pdtlb_kernel(vfrom); 567 + purge_tlb_end(flags); 568 + preempt_disable(); 569 + copy_user_page_asm(vto, vfrom, vaddr); 570 + flush_dcache_page_asm(__pa(vto), vaddr); 571 + preempt_enable(); 572 + 573 + pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */ 574 + pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */ 575 + } 576 + 577 + #endif /* CONFIG_PARISC_TMPALIAS */
+2 -2
arch/parisc/kernel/entry.S
··· 483 483 * B <-> _PAGE_DMB (memory break) 484 484 * 485 485 * Then incredible subtlety: The access rights are 486 - * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ 486 + * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE 487 487 * See 3-14 of the parisc 2.0 manual 488 488 * 489 489 * Finally, _PAGE_READ goes in the top bit of PL1 (so we ··· 493 493 494 494 /* PAGE_USER indicates the page can be read with user privileges, 495 495 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 496 - * contains _PAGE_READ */ 496 + * contains _PAGE_READ) */ 497 497 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 498 498 depdi 7,11,3,\prot 499 499 /* If we're a gateway page, drop PL2 back to zero for promotion
+2
arch/parisc/kernel/inventory.c
··· 186 186 187 187 if (status != PDC_OK) { 188 188 /* no more cell modules or error */ 189 + kfree(pa_pdc_cell); 189 190 return status; 190 191 } 191 192 192 193 temp = pa_pdc_cell->cba; 193 194 dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path)); 194 195 if (!dev) { 196 + kfree(pa_pdc_cell); 195 197 return PDC_OK; 196 198 } 197 199
+2 -2
arch/parisc/kernel/irq.c
··· 379 379 static struct irqaction timer_action = { 380 380 .handler = timer_interrupt, 381 381 .name = "timer", 382 - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL, 382 + .flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL, 383 383 }; 384 384 385 385 #ifdef CONFIG_SMP 386 386 static struct irqaction ipi_action = { 387 387 .handler = ipi_interrupt, 388 388 .name = "IPI", 389 - .flags = IRQF_DISABLED | IRQF_PERCPU, 389 + .flags = IRQF_PERCPU, 390 390 }; 391 391 #endif 392 392
+290 -45
arch/parisc/kernel/pacache.S
··· 199 199 .callinfo NO_CALLS 200 200 .entry 201 201 202 - mtsp %r0, %sr1 203 202 load32 cache_info, %r1 204 203 205 204 /* Flush Instruction Cache */ ··· 207 208 LDREG ICACHE_STRIDE(%r1), %arg1 208 209 LDREG ICACHE_COUNT(%r1), %arg2 209 210 LDREG ICACHE_LOOP(%r1), %arg3 210 - rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/ 211 + rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/ 212 + mtsp %r0, %sr1 211 213 addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */ 212 214 movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */ 213 215 ··· 220 220 addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */ 221 221 222 222 fioneloop: /* Loop if LOOP = 1 */ 223 - addib,COND(>) -1, %arg2, fioneloop /* Outer loop count decr */ 223 + /* Some implementations may flush with a single fice instruction */ 224 + cmpib,COND(>>=),n 15, %arg2, fioneloop2 225 + 226 + fioneloop1: 227 + fice,m %arg1(%sr1, %arg0) 228 + fice,m %arg1(%sr1, %arg0) 229 + fice,m %arg1(%sr1, %arg0) 230 + fice,m %arg1(%sr1, %arg0) 231 + fice,m %arg1(%sr1, %arg0) 232 + fice,m %arg1(%sr1, %arg0) 233 + fice,m %arg1(%sr1, %arg0) 234 + fice,m %arg1(%sr1, %arg0) 235 + fice,m %arg1(%sr1, %arg0) 236 + fice,m %arg1(%sr1, %arg0) 237 + fice,m %arg1(%sr1, %arg0) 238 + fice,m %arg1(%sr1, %arg0) 239 + fice,m %arg1(%sr1, %arg0) 240 + fice,m %arg1(%sr1, %arg0) 241 + fice,m %arg1(%sr1, %arg0) 242 + addib,COND(>) -16, %arg2, fioneloop1 243 + fice,m %arg1(%sr1, %arg0) 244 + 245 + /* Check if done */ 246 + cmpb,COND(=),n %arg2, %r0, fisync /* Predict branch taken */ 247 + 248 + fioneloop2: 249 + addib,COND(>) -1, %arg2, fioneloop2 /* Outer loop count decr */ 224 250 fice,m %arg1(%sr1, %arg0) /* Fice for one loop */ 225 251 226 252 fisync: ··· 266 240 .callinfo NO_CALLS 267 241 .entry 268 242 269 - mtsp %r0, %sr1 270 - load32 cache_info, %r1 243 + load32 cache_info, %r1 271 244 272 245 /* Flush Data Cache */ 273 246 ··· 274 249 LDREG DCACHE_STRIDE(%r1), %arg1 275 250 LDREG DCACHE_COUNT(%r1), %arg2 276 251 LDREG DCACHE_LOOP(%r1), %arg3 277 - rsm PSW_SM_I, %r22 252 + rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/ 253 + mtsp %r0, %sr1 278 254 addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */ 279 255 movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */ 280 256 ··· 287 261 addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */ 288 262 289 263 fdoneloop: /* Loop if LOOP = 1 */ 290 - addib,COND(>) -1, %arg2, fdoneloop /* Outer loop count decr */ 264 + /* Some implementations may flush with a single fdce instruction */ 265 + cmpib,COND(>>=),n 15, %arg2, fdoneloop2 266 + 267 + fdoneloop1: 268 + fdce,m %arg1(%sr1, %arg0) 269 + fdce,m %arg1(%sr1, %arg0) 270 + fdce,m %arg1(%sr1, %arg0) 271 + fdce,m %arg1(%sr1, %arg0) 272 + fdce,m %arg1(%sr1, %arg0) 273 + fdce,m %arg1(%sr1, %arg0) 274 + fdce,m %arg1(%sr1, %arg0) 275 + fdce,m %arg1(%sr1, %arg0) 276 + fdce,m %arg1(%sr1, %arg0) 277 + fdce,m %arg1(%sr1, %arg0) 278 + fdce,m %arg1(%sr1, %arg0) 279 + fdce,m %arg1(%sr1, %arg0) 280 + fdce,m %arg1(%sr1, %arg0) 281 + fdce,m %arg1(%sr1, %arg0) 282 + fdce,m %arg1(%sr1, %arg0) 283 + addib,COND(>) -16, %arg2, fdoneloop1 284 + fdce,m %arg1(%sr1, %arg0) 285 + 286 + /* Check if done */ 287 + cmpb,COND(=),n %arg2, %r0, fdsync /* Predict branch taken */ 288 + 289 + fdoneloop2: 290 + addib,COND(>) -1, %arg2, fdoneloop2 /* Outer loop count decr */ 291 291 fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */ 292 292 293 293 fdsync: ··· 329 277 330 278 .align 16 331 279 332 - ENTRY(copy_user_page_asm) 280 + /* Macros to serialize TLB purge operations on SMP. */ 281 + 282 + .macro tlb_lock la,flags,tmp 283 + #ifdef CONFIG_SMP 284 + ldil L%pa_tlb_lock,%r1 285 + ldo R%pa_tlb_lock(%r1),\la 286 + rsm PSW_SM_I,\flags 287 + 1: LDCW 0(\la),\tmp 288 + cmpib,<>,n 0,\tmp,3f 289 + 2: ldw 0(\la),\tmp 290 + cmpb,<> %r0,\tmp,1b 291 + nop 292 + b,n 2b 293 + 3: 294 + #endif 295 + .endm 296 + 297 + .macro tlb_unlock la,flags,tmp 298 + #ifdef CONFIG_SMP 299 + ldi 1,\tmp 300 + stw \tmp,0(\la) 301 + mtsm \flags 302 + #endif 303 + .endm 304 + 305 + /* Clear page using kernel mapping. */ 306 + 307 + ENTRY(clear_page_asm) 308 + .proc 309 + .callinfo NO_CALLS 310 + .entry 311 + 312 + #ifdef CONFIG_64BIT 313 + 314 + /* Unroll the loop. */ 315 + ldi (PAGE_SIZE / 128), %r1 316 + 317 + 1: 318 + std %r0, 0(%r26) 319 + std %r0, 8(%r26) 320 + std %r0, 16(%r26) 321 + std %r0, 24(%r26) 322 + std %r0, 32(%r26) 323 + std %r0, 40(%r26) 324 + std %r0, 48(%r26) 325 + std %r0, 56(%r26) 326 + std %r0, 64(%r26) 327 + std %r0, 72(%r26) 328 + std %r0, 80(%r26) 329 + std %r0, 88(%r26) 330 + std %r0, 96(%r26) 331 + std %r0, 104(%r26) 332 + std %r0, 112(%r26) 333 + std %r0, 120(%r26) 334 + 335 + /* Note reverse branch hint for addib is taken. */ 336 + addib,COND(>),n -1, %r1, 1b 337 + ldo 128(%r26), %r26 338 + 339 + #else 340 + 341 + /* 342 + * Note that until (if) we start saving the full 64-bit register 343 + * values on interrupt, we can't use std on a 32 bit kernel. 344 + */ 345 + ldi (PAGE_SIZE / 64), %r1 346 + 347 + 1: 348 + stw %r0, 0(%r26) 349 + stw %r0, 4(%r26) 350 + stw %r0, 8(%r26) 351 + stw %r0, 12(%r26) 352 + stw %r0, 16(%r26) 353 + stw %r0, 20(%r26) 354 + stw %r0, 24(%r26) 355 + stw %r0, 28(%r26) 356 + stw %r0, 32(%r26) 357 + stw %r0, 36(%r26) 358 + stw %r0, 40(%r26) 359 + stw %r0, 44(%r26) 360 + stw %r0, 48(%r26) 361 + stw %r0, 52(%r26) 362 + stw %r0, 56(%r26) 363 + stw %r0, 60(%r26) 364 + 365 + addib,COND(>),n -1, %r1, 1b 366 + ldo 64(%r26), %r26 367 + #endif 368 + bv %r0(%r2) 369 + nop 370 + .exit 371 + 372 + .procend 373 + ENDPROC(clear_page_asm) 374 + 375 + /* Copy page using kernel mapping. */ 376 + 377 + ENTRY(copy_page_asm) 333 378 .proc 334 379 .callinfo NO_CALLS 335 380 .entry ··· 434 285 #ifdef CONFIG_64BIT 435 286 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle. 436 287 * Unroll the loop by hand and arrange insn appropriately. 437 - * GCC probably can do this just as well. 288 + * Prefetch doesn't improve performance on rp3440. 289 + * GCC probably can do this just as well... 438 290 */ 439 291 440 - ldd 0(%r25), %r19 441 292 ldi (PAGE_SIZE / 128), %r1 442 293 443 - ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */ 444 - ldw 128(%r25), %r0 /* prefetch 2 */ 445 - 446 - 1: ldd 8(%r25), %r20 447 - ldw 192(%r25), %r0 /* prefetch 3 */ 448 - ldw 256(%r25), %r0 /* prefetch 4 */ 294 + 1: ldd 0(%r25), %r19 295 + ldd 8(%r25), %r20 449 296 450 297 ldd 16(%r25), %r21 451 298 ldd 24(%r25), %r22 ··· 475 330 476 331 ldd 112(%r25), %r21 477 332 ldd 120(%r25), %r22 333 + ldo 128(%r25), %r25 478 334 std %r19, 96(%r26) 479 335 std %r20, 104(%r26) 480 336 481 - ldo 128(%r25), %r25 482 337 std %r21, 112(%r26) 483 338 std %r22, 120(%r26) 484 - ldo 128(%r26), %r26 485 339 486 - /* conditional branches nullify on forward taken branch, and on 487 - * non-taken backward branch. Note that .+4 is a backwards branch. 488 - * The ldd should only get executed if the branch is taken. 489 - */ 490 - addib,COND(>),n -1, %r1, 1b /* bundle 10 */ 491 - ldd 0(%r25), %r19 /* start next loads */ 340 + /* Note reverse branch hint for addib is taken. */ 341 + addib,COND(>),n -1, %r1, 1b 342 + ldo 128(%r26), %r26 492 343 493 344 #else 494 345 ··· 540 399 .exit 541 400 542 401 .procend 543 - ENDPROC(copy_user_page_asm) 402 + ENDPROC(copy_page_asm) 544 403 545 404 /* 546 405 * NOTE: Code in clear_user_page has a hard coded dependency on the ··· 563 422 * %r23 physical page (shifted for tlb insert) of "from" translation 564 423 */ 565 424 566 - #if 0 567 - 568 425 /* 569 426 * We can't do this since copy_user_page is used to bring in 570 427 * file data that might have instructions. Since the data would ··· 574 435 * use it if more information is passed into copy_user_page(). 575 436 * Have to do some measurements to see if it is worthwhile to 576 437 * lobby for such a change. 438 + * 577 439 */ 578 440 579 441 ENTRY(copy_user_page_asm) ··· 582 442 .callinfo NO_CALLS 583 443 .entry 584 444 445 + /* Convert virtual `to' and `from' addresses to physical addresses. 446 + Move `from' physical address to non shadowed register. */ 585 447 ldil L%(__PAGE_OFFSET), %r1 586 448 sub %r26, %r1, %r26 587 - sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */ 449 + sub %r25, %r1, %r23 588 450 589 451 ldil L%(TMPALIAS_MAP_START), %r28 590 452 /* FIXME for different page sizes != 4k */ 591 453 #ifdef CONFIG_64BIT 592 - extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */ 593 - extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */ 594 - depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */ 454 + #if (TMPALIAS_MAP_START >= 0x80000000) 455 + depdi 0, 31,32, %r28 /* clear any sign extension */ 456 + #endif 457 + extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */ 458 + extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */ 459 + depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */ 595 460 depdi 0, 63,12, %r28 /* Clear any offset bits */ 596 461 copy %r28, %r29 597 462 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */ ··· 611 466 612 467 /* Purge any old translations */ 613 468 469 + #ifdef CONFIG_PA20 470 + pdtlb,l 0(%r28) 471 + pdtlb,l 0(%r29) 472 + #else 473 + tlb_lock %r20,%r21,%r22 614 474 pdtlb 0(%r28) 615 475 pdtlb 0(%r29) 476 + tlb_unlock %r20,%r21,%r22 477 + #endif 616 478 617 - ldi 64, %r1 479 + #ifdef CONFIG_64BIT 480 + /* PA8x00 CPUs can consume 2 loads or 1 store per cycle. 481 + * Unroll the loop by hand and arrange insn appropriately. 482 + * GCC probably can do this just as well. 483 + */ 484 + 485 + ldd 0(%r29), %r19 486 + ldi (PAGE_SIZE / 128), %r1 487 + 488 + 1: ldd 8(%r29), %r20 489 + 490 + ldd 16(%r29), %r21 491 + ldd 24(%r29), %r22 492 + std %r19, 0(%r28) 493 + std %r20, 8(%r28) 494 + 495 + ldd 32(%r29), %r19 496 + ldd 40(%r29), %r20 497 + std %r21, 16(%r28) 498 + std %r22, 24(%r28) 499 + 500 + ldd 48(%r29), %r21 501 + ldd 56(%r29), %r22 502 + std %r19, 32(%r28) 503 + std %r20, 40(%r28) 504 + 505 + ldd 64(%r29), %r19 506 + ldd 72(%r29), %r20 507 + std %r21, 48(%r28) 508 + std %r22, 56(%r28) 509 + 510 + ldd 80(%r29), %r21 511 + ldd 88(%r29), %r22 512 + std %r19, 64(%r28) 513 + std %r20, 72(%r28) 514 + 515 + ldd 96(%r29), %r19 516 + ldd 104(%r29), %r20 517 + std %r21, 80(%r28) 518 + std %r22, 88(%r28) 519 + 520 + ldd 112(%r29), %r21 521 + ldd 120(%r29), %r22 522 + std %r19, 96(%r28) 523 + std %r20, 104(%r28) 524 + 525 + ldo 128(%r29), %r29 526 + std %r21, 112(%r28) 527 + std %r22, 120(%r28) 528 + ldo 128(%r28), %r28 529 + 530 + /* conditional branches nullify on forward taken branch, and on 531 + * non-taken backward branch. Note that .+4 is a backwards branch. 532 + * The ldd should only get executed if the branch is taken. 533 + */ 534 + addib,COND(>),n -1, %r1, 1b /* bundle 10 */ 535 + ldd 0(%r29), %r19 /* start next loads */ 536 + 537 + #else 538 + ldi (PAGE_SIZE / 64), %r1 618 539 619 540 /* 620 541 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw ··· 691 480 * use ldd/std on a 32 bit kernel. 692 481 */ 693 482 694 - 695 - 1: 696 - ldw 0(%r29), %r19 483 + 1: ldw 0(%r29), %r19 697 484 ldw 4(%r29), %r20 698 485 ldw 8(%r29), %r21 699 486 ldw 12(%r29), %r22 ··· 724 515 stw %r21, 56(%r28) 725 516 stw %r22, 60(%r28) 726 517 ldo 64(%r28), %r28 518 + 727 519 addib,COND(>) -1, %r1,1b 728 520 ldo 64(%r29), %r29 521 + #endif 729 522 730 523 bv %r0(%r2) 731 524 nop ··· 735 524 736 525 .procend 737 526 ENDPROC(copy_user_page_asm) 738 - #endif 739 527 740 - ENTRY(__clear_user_page_asm) 528 + ENTRY(clear_user_page_asm) 741 529 .proc 742 530 .callinfo NO_CALLS 743 531 .entry ··· 760 550 761 551 /* Purge any old translation */ 762 552 553 + #ifdef CONFIG_PA20 554 + pdtlb,l 0(%r28) 555 + #else 556 + tlb_lock %r20,%r21,%r22 763 557 pdtlb 0(%r28) 558 + tlb_unlock %r20,%r21,%r22 559 + #endif 764 560 765 561 #ifdef CONFIG_64BIT 766 562 ldi (PAGE_SIZE / 128), %r1 ··· 796 580 #else /* ! CONFIG_64BIT */ 797 581 ldi (PAGE_SIZE / 64), %r1 798 582 799 - 1: 800 - stw %r0, 0(%r28) 583 + 1: stw %r0, 0(%r28) 801 584 stw %r0, 4(%r28) 802 585 stw %r0, 8(%r28) 803 586 stw %r0, 12(%r28) ··· 821 606 .exit 822 607 823 608 .procend 824 - ENDPROC(__clear_user_page_asm) 609 + ENDPROC(clear_user_page_asm) 825 610 826 611 ENTRY(flush_dcache_page_asm) 827 612 .proc ··· 845 630 846 631 /* Purge any old translation */ 847 632 633 + #ifdef CONFIG_PA20 634 + pdtlb,l 0(%r28) 635 + #else 636 + tlb_lock %r20,%r21,%r22 848 637 pdtlb 0(%r28) 638 + tlb_unlock %r20,%r21,%r22 639 + #endif 849 640 850 641 ldil L%dcache_stride, %r1 851 642 ldw R%dcache_stride(%r1), %r1 ··· 884 663 fdc,m %r1(%r28) 885 664 886 665 sync 666 + 667 + #ifdef CONFIG_PA20 668 + pdtlb,l 0(%r25) 669 + #else 670 + tlb_lock %r20,%r21,%r22 671 + pdtlb 0(%r25) 672 + tlb_unlock %r20,%r21,%r22 673 + #endif 674 + 887 675 bv %r0(%r2) 888 - pdtlb (%r25) 676 + nop 889 677 .exit 890 678 891 679 .procend ··· 922 692 923 693 /* Purge any old translation */ 924 694 925 - pitlb (%sr4,%r28) 695 + #ifdef CONFIG_PA20 696 + pitlb,l %r0(%sr4,%r28) 697 + #else 698 + tlb_lock %r20,%r21,%r22 699 + pitlb (%sr4,%r28) 700 + tlb_unlock %r20,%r21,%r22 701 + #endif 926 702 927 703 ldil L%icache_stride, %r1 928 704 ldw R%icache_stride(%r1), %r1 ··· 963 727 fic,m %r1(%sr4,%r28) 964 728 965 729 sync 730 + 731 + #ifdef CONFIG_PA20 732 + pitlb,l %r0(%sr4,%r25) 733 + #else 734 + tlb_lock %r20,%r21,%r22 735 + pitlb (%sr4,%r25) 736 + tlb_unlock %r20,%r21,%r22 737 + #endif 738 + 966 739 bv %r0(%r2) 967 - pitlb (%sr4,%r25) 740 + nop 968 741 .exit 969 742 970 743 .procend ··· 1022 777 .procend 1023 778 ENDPROC(flush_kernel_dcache_page_asm) 1024 779 1025 - ENTRY(purge_kernel_dcache_page) 780 + ENTRY(purge_kernel_dcache_page_asm) 1026 781 .proc 1027 782 .callinfo NO_CALLS 1028 783 .entry ··· 1062 817 .exit 1063 818 1064 819 .procend 1065 - ENDPROC(purge_kernel_dcache_page) 820 + ENDPROC(purge_kernel_dcache_page_asm) 1066 821 1067 822 ENTRY(flush_user_dcache_range_asm) 1068 823 .proc
+3 -2
arch/parisc/kernel/parisc_ksyms.c
··· 157 157 EXPORT_SYMBOL(_mcount); 158 158 #endif 159 159 160 - /* from pacache.S -- needed for copy_page */ 161 - EXPORT_SYMBOL(copy_user_page_asm); 160 + /* from pacache.S -- needed for clear/copy_page */ 161 + EXPORT_SYMBOL(clear_page_asm); 162 + EXPORT_SYMBOL(copy_page_asm);
+1 -1
arch/parisc/kernel/signal.c
··· 312 312 #if DEBUG_SIG 313 313 /* Assert that we're flushing in the correct space... */ 314 314 { 315 - int sid; 315 + unsigned long sid; 316 316 asm ("mfsp %%sr3,%0" : "=r" (sid)); 317 317 DBG(1,"setup_rt_frame: Flushing 64 bytes at space %#x offset %p\n", 318 318 sid, frame->tramp);
+11 -4
arch/parisc/kernel/signal32.c
··· 65 65 { 66 66 compat_sigset_t s; 67 67 68 - if (sz != sizeof *set) 68 + if (sz != sizeof(compat_sigset_t)) 69 69 return -EINVAL; 70 70 sigset_64to32(&s, set); 71 71 ··· 78 78 compat_sigset_t s; 79 79 int r; 80 80 81 - if (sz != sizeof *set) 81 + if (sz != sizeof(compat_sigset_t)) 82 82 return -EINVAL; 83 83 84 84 if ((r = copy_from_user(&s, up, sz)) == 0) { ··· 94 94 sigset_t old_set, new_set; 95 95 int ret; 96 96 97 - if (set && get_sigset32(set, &new_set, sigsetsize)) 98 - return -EFAULT; 97 + if (set) { 98 + ret = get_sigset32(set, &new_set, sigsetsize); 99 + if (ret) 100 + return ret; 101 + } 99 102 100 103 KERNEL_SYSCALL(ret, sys_rt_sigprocmask, how, set ? (sigset_t __user *)&new_set : NULL, 101 104 oset ? (sigset_t __user *)&old_set : NULL, sigsetsize); ··· 130 127 struct k_sigaction32 new_sa32, old_sa32; 131 128 struct k_sigaction new_sa, old_sa; 132 129 int ret = -EINVAL; 130 + 131 + /* XXX: Don't preclude handling different sized sigset_t's. */ 132 + if (sigsetsize != sizeof(compat_sigset_t)) 133 + return -EINVAL; 133 134 134 135 if (act) { 135 136 if (copy_from_user(&new_sa32.sa, act, sizeof new_sa32.sa))
+12 -4
arch/parisc/kernel/sys_parisc.c
··· 94 94 { 95 95 if (len > TASK_SIZE) 96 96 return -ENOMEM; 97 - /* Might want to check for cache aliasing issues for MAP_FIXED case 98 - * like ARM or MIPS ??? --BenH. 99 - */ 100 - if (flags & MAP_FIXED) 97 + if (flags & MAP_FIXED) { 98 + if ((flags & MAP_SHARED) && 99 + (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) 100 + return -EINVAL; 101 101 return addr; 102 + } 102 103 if (!addr) 103 104 addr = TASK_UNMAPPED_BASE; 104 105 ··· 211 210 { 212 211 return sys_sync_file_range(fd, (loff_t)hi_off << 32 | lo_off, 213 212 (loff_t)hi_nbytes << 32 | lo_nbytes, flags); 213 + } 214 + 215 + asmlinkage long parisc_fallocate(int fd, int mode, u32 offhi, u32 offlo, 216 + u32 lenhi, u32 lenlo) 217 + { 218 + return sys_fallocate(fd, mode, ((u64)offhi << 32) | offlo, 219 + ((u64)lenhi << 32) | lenlo); 214 220 } 215 221 216 222 asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag)
+13 -109
arch/parisc/kernel/sys_parisc32.c
··· 21 21 #include <linux/time.h> 22 22 #include <linux/smp.h> 23 23 #include <linux/sem.h> 24 - #include <linux/msg.h> 25 24 #include <linux/shm.h> 26 25 #include <linux/slab.h> 27 26 #include <linux/uio.h> ··· 60 61 return -ENOSYS; 61 62 } 62 63 63 - asmlinkage long sys32_sched_rr_get_interval(pid_t pid, 64 - struct compat_timespec __user *interval) 64 + /* Note: it is necessary to treat out_fd and in_fd as unsigned ints, with the 65 + * corresponding cast to a signed int to insure that the proper conversion 66 + * (sign extension) between the register representation of a signed int (msr in 67 + * 32-bit mode) and the register representation of a signed int (msr in 64-bit 68 + * mode) is performed. 69 + */ 70 + asmlinkage long sys32_sendfile(u32 out_fd, u32 in_fd, 71 + compat_off_t __user *offset, compat_size_t count) 65 72 { 66 - struct timespec t; 67 - int ret; 68 - 69 - KERNEL_SYSCALL(ret, sys_sched_rr_get_interval, pid, (struct timespec __user *)&t); 70 - if (put_compat_timespec(&t, interval)) 71 - return -EFAULT; 72 - return ret; 73 + return compat_sys_sendfile((int)out_fd, (int)in_fd, offset, count); 73 74 } 74 75 75 - struct msgbuf32 { 76 - int mtype; 77 - char mtext[1]; 78 - }; 79 - 80 - asmlinkage long sys32_msgsnd(int msqid, 81 - struct msgbuf32 __user *umsgp32, 82 - size_t msgsz, int msgflg) 76 + asmlinkage long sys32_sendfile64(u32 out_fd, u32 in_fd, 77 + compat_loff_t __user *offset, compat_size_t count) 83 78 { 84 - struct msgbuf *mb; 85 - struct msgbuf32 mb32; 86 - int err; 87 - 88 - if ((mb = kmalloc(msgsz + sizeof *mb + 4, GFP_KERNEL)) == NULL) 89 - return -ENOMEM; 90 - 91 - err = get_user(mb32.mtype, &umsgp32->mtype); 92 - mb->mtype = mb32.mtype; 93 - err |= copy_from_user(mb->mtext, &umsgp32->mtext, msgsz); 94 - 95 - if (err) 96 - err = -EFAULT; 97 - else 98 - KERNEL_SYSCALL(err, sys_msgsnd, msqid, (struct msgbuf __user *)mb, msgsz, msgflg); 99 - 100 - kfree(mb); 101 - return err; 102 - } 103 - 104 - asmlinkage long sys32_msgrcv(int msqid, 105 - struct msgbuf32 __user *umsgp32, 106 - size_t msgsz, long msgtyp, int msgflg) 107 - { 108 - struct msgbuf *mb; 109 - struct msgbuf32 mb32; 110 - int err, len; 111 - 112 - if ((mb = kmalloc(msgsz + sizeof *mb + 4, GFP_KERNEL)) == NULL) 113 - return -ENOMEM; 114 - 115 - KERNEL_SYSCALL(err, sys_msgrcv, msqid, (struct msgbuf __user *)mb, msgsz, msgtyp, msgflg); 116 - 117 - if (err >= 0) { 118 - len = err; 119 - mb32.mtype = mb->mtype; 120 - err = put_user(mb32.mtype, &umsgp32->mtype); 121 - err |= copy_to_user(&umsgp32->mtext, mb->mtext, len); 122 - if (err) 123 - err = -EFAULT; 124 - else 125 - err = len; 126 - } 127 - 128 - kfree(mb); 129 - return err; 130 - } 131 - 132 - asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count) 133 - { 134 - mm_segment_t old_fs = get_fs(); 135 - int ret; 136 - off_t of; 137 - 138 - if (offset && get_user(of, offset)) 139 - return -EFAULT; 140 - 141 - set_fs(KERNEL_DS); 142 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, count); 143 - set_fs(old_fs); 144 - 145 - if (offset && put_user(of, offset)) 146 - return -EFAULT; 147 - 148 - return ret; 149 - } 150 - 151 - asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count) 152 - { 153 - mm_segment_t old_fs = get_fs(); 154 - int ret; 155 - loff_t lof; 156 - 157 - if (offset && get_user(lof, offset)) 158 - return -EFAULT; 159 - 160 - set_fs(KERNEL_DS); 161 - ret = sys_sendfile64(out_fd, in_fd, offset ? (loff_t __user *)&lof : NULL, count); 162 - set_fs(old_fs); 163 - 164 - if (offset && put_user(lof, offset)) 165 - return -EFAULT; 166 - 167 - return ret; 79 + return sys_sendfile64((int)out_fd, (int)in_fd, 80 + (loff_t __user *)offset, count); 168 81 } 169 82 170 83 ··· 109 198 { 110 199 return sys_lookup_dcookie((u64)cookie_high << 32 | cookie_low, 111 200 buf, len); 112 - } 113 - 114 - asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo, 115 - u32 lenhi, u32 lenlo) 116 - { 117 - return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo, 118 - ((loff_t)lenhi << 32) | lenlo); 119 201 } 120 202 121 203 asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi,
+4 -1
arch/parisc/kernel/syscall.S
··· 309 309 LDREG TASK_PT_GR25(%r1), %r25 310 310 LDREG TASK_PT_GR24(%r1), %r24 311 311 LDREG TASK_PT_GR23(%r1), %r23 312 - #ifdef CONFIG_64BIT 313 312 LDREG TASK_PT_GR22(%r1), %r22 314 313 LDREG TASK_PT_GR21(%r1), %r21 314 + #ifdef CONFIG_64BIT 315 315 ldo -16(%r30),%r29 /* Reference param save area */ 316 + #else 317 + stw %r22, -52(%r30) /* 5th argument */ 318 + stw %r21, -56(%r30) /* 6th argument */ 316 319 #endif 317 320 318 321 comiclr,>>= __NR_Linux_syscalls, %r20, %r0
+14 -13
arch/parisc/kernel/syscall_table.S
··· 247 247 ENTRY_SAME(sched_yield) 248 248 ENTRY_SAME(sched_get_priority_max) 249 249 ENTRY_SAME(sched_get_priority_min) /* 160 */ 250 - /* These 2 would've worked if someone had defined struct timespec 251 - * carefully, like timeval for example (which is about the same). 252 - * Unfortunately it contains a long :-( */ 253 - ENTRY_DIFF(sched_rr_get_interval) 250 + ENTRY_COMP(sched_rr_get_interval) 254 251 ENTRY_COMP(nanosleep) 255 252 ENTRY_SAME(mremap) 256 253 ENTRY_SAME(setresuid) ··· 283 286 ENTRY_SAME(semop) /* 185 */ 284 287 ENTRY_SAME(semget) 285 288 ENTRY_DIFF(semctl) 286 - ENTRY_DIFF(msgsnd) 287 - ENTRY_DIFF(msgrcv) 289 + ENTRY_COMP(msgsnd) 290 + ENTRY_COMP(msgrcv) 288 291 ENTRY_SAME(msgget) /* 190 */ 289 292 ENTRY_SAME(msgctl) 290 293 ENTRY_SAME(shmat) ··· 304 307 ENTRY_SAME(gettid) 305 308 ENTRY_OURS(readahead) 306 309 ENTRY_SAME(tkill) 307 - ENTRY_SAME(sendfile64) 310 + ENTRY_DIFF(sendfile64) 308 311 ENTRY_COMP(futex) /* 210 */ 309 312 ENTRY_COMP(sched_setaffinity) 310 313 ENTRY_COMP(sched_getaffinity) ··· 324 327 ENTRY_SAME(epoll_wait) 325 328 ENTRY_SAME(remap_file_pages) 326 329 ENTRY_SAME(semtimedop) 327 - ENTRY_SAME(mq_open) 330 + ENTRY_COMP(mq_open) 328 331 ENTRY_SAME(mq_unlink) /* 230 */ 329 - ENTRY_SAME(mq_timedsend) 330 - ENTRY_SAME(mq_timedreceive) 331 - ENTRY_SAME(mq_notify) 332 - ENTRY_SAME(mq_getsetattr) 332 + ENTRY_COMP(mq_timedsend) 333 + ENTRY_COMP(mq_timedreceive) 334 + ENTRY_COMP(mq_notify) 335 + ENTRY_COMP(mq_getsetattr) 333 336 ENTRY_COMP(waitid) /* 235 */ 334 337 ENTRY_OURS(fadvise64_64) 335 338 ENTRY_SAME(set_tid_address) ··· 400 403 ENTRY_COMP(signalfd) 401 404 ENTRY_SAME(ni_syscall) /* was timerfd */ 402 405 ENTRY_SAME(eventfd) 403 - ENTRY_COMP(fallocate) /* 305 */ 406 + ENTRY_OURS(fallocate) /* 305 */ 404 407 ENTRY_SAME(timerfd_create) 405 408 ENTRY_COMP(timerfd_settime) 406 409 ENTRY_COMP(timerfd_gettime) ··· 425 428 ENTRY_SAME(syncfs) 426 429 ENTRY_SAME(setns) 427 430 ENTRY_COMP(sendmmsg) 431 + ENTRY_COMP(process_vm_readv) /* 330 */ 432 + ENTRY_COMP(process_vm_writev) 433 + ENTRY_SAME(kcmp) 434 + ENTRY_SAME(finit_module) 428 435 429 436 /* Nothing yet */ 430 437
+25 -5
arch/parisc/mm/fault.c
··· 175 175 struct mm_struct *mm = tsk->mm; 176 176 unsigned long acc_type; 177 177 int fault; 178 + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 178 179 179 180 if (in_atomic() || !mm) 180 181 goto no_context; 181 182 183 + retry: 182 184 down_read(&mm->mmap_sem); 183 185 vma = find_vma_prev(mm, address, &prev_vma); 184 186 if (!vma || address < vma->vm_start) ··· 203 201 * fault. 204 202 */ 205 203 206 - fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0); 204 + fault = handle_mm_fault(mm, vma, address, 205 + flags | ((acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0)); 206 + 207 + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 208 + return; 209 + 207 210 if (unlikely(fault & VM_FAULT_ERROR)) { 208 211 /* 209 212 * We hit a shared mapping outside of the file, or some ··· 221 214 goto bad_area; 222 215 BUG(); 223 216 } 224 - if (fault & VM_FAULT_MAJOR) 225 - current->maj_flt++; 226 - else 227 - current->min_flt++; 217 + if (flags & FAULT_FLAG_ALLOW_RETRY) { 218 + if (fault & VM_FAULT_MAJOR) 219 + current->maj_flt++; 220 + else 221 + current->min_flt++; 222 + if (fault & VM_FAULT_RETRY) { 223 + flags &= ~FAULT_FLAG_ALLOW_RETRY; 224 + 225 + /* 226 + * No need to up_read(&mm->mmap_sem) as we would 227 + * have already released it in __lock_page_or_retry 228 + * in mm/filemap.c. 229 + */ 230 + 231 + goto retry; 232 + } 233 + } 228 234 up_read(&mm->mmap_sem); 229 235 return; 230 236
+1
drivers/parisc/Kconfig
··· 128 128 config CHASSIS_LCD_LED 129 129 bool "Chassis LCD and LED support" 130 130 default y 131 + select VM_EVENT_COUNTERS 131 132 help 132 133 Say Y here if you want to enable support for the Heartbeat, 133 134 Disk/Network activities LEDs on some PA-RISC machines,
+5 -8
drivers/parisc/dino.c
··· 580 580 581 581 } 582 582 583 - DBG("DEBUG %s assigning %d [0x%lx,0x%lx]\n", 583 + DBG("DEBUG %s assigning %d [%pR]\n", 584 584 dev_name(&bus->self->dev), i, 585 - bus->self->resource[i].start, 586 - bus->self->resource[i].end); 585 + &bus->self->resource[i]); 587 586 WARN_ON(pci_assign_resource(bus->self, i)); 588 - DBG("DEBUG %s after assign %d [0x%lx,0x%lx]\n", 587 + DBG("DEBUG %s after assign %d [%pR]\n", 589 588 dev_name(&bus->self->dev), i, 590 - bus->self->resource[i].start, 591 - bus->self->resource[i].end); 589 + &bus->self->resource[i]); 592 590 } 593 591 } 594 592 ··· 770 772 result = ccio_request_resource(dino_dev->hba.dev, &res[i]); 771 773 if (result < 0) { 772 774 printk(KERN_ERR "%s: failed to claim PCI Bus address " 773 - "space %d (0x%lx-0x%lx)!\n", name, i, 774 - (unsigned long)res[i].start, (unsigned long)res[i].end); 775 + "space %d (%pR)!\n", name, i, &res[i]); 775 776 return result; 776 777 } 777 778 }
+2 -4
drivers/parisc/hppb.c
··· 74 74 75 75 status = ccio_request_resource(dev, &card->mmio_region); 76 76 if(status < 0) { 77 - printk(KERN_ERR "%s: failed to claim HP-PB " 78 - "bus space (0x%08llx, 0x%08llx)\n", 79 - __FILE__, (unsigned long long) card->mmio_region.start, 80 - (unsigned long long) card->mmio_region.end); 77 + printk(KERN_ERR "%s: failed to claim HP-PB bus space (%pR)\n", 78 + __FILE__, &card->mmio_region); 81 79 } 82 80 83 81 return 0;
+2 -4
drivers/parisc/pdc_stable.c
··· 212 212 entry, devpath, entry->addr); 213 213 214 214 /* addr, devpath and count must be word aligned */ 215 - if (pdc_stable_write(entry->addr, devpath, sizeof(*devpath)) != PDC_OK) { 216 - printk(KERN_ERR "%s: an error occurred when writing to PDC.\n" 215 + if (pdc_stable_write(entry->addr, devpath, sizeof(*devpath)) != PDC_OK) 216 + WARN(1, KERN_ERR "%s: an error occurred when writing to PDC.\n" 217 217 "It is likely that the Stable Storage data has been corrupted.\n" 218 218 "Please check it carefully upon next reboot.\n", __func__); 219 - WARN_ON(1); 220 - } 221 219 222 220 /* kobject is already registered */ 223 221 entry->ready = 2;
+1 -1
drivers/parisc/superio.c
··· 274 274 else 275 275 printk(KERN_ERR PFX "USB regulator not initialized!\n"); 276 276 277 - if (request_irq(pdev->irq, superio_interrupt, IRQF_DISABLED, 277 + if (request_irq(pdev->irq, superio_interrupt, 0, 278 278 SUPERIO, (void *)sio)) { 279 279 280 280 printk(KERN_ERR PFX "could not get irq\n");