Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6:
[PARISC] Convert to new irq_chip functions
[PARISC] fix per-cpu flag problem in the cpu affinity checkers
[PARISC] fix vmap flush/invalidate
eliminate special FLUSH flag from page table
parisc: flush pages through tmpalias space

+390 -442
+1
arch/parisc/Kconfig
··· 15 select HAVE_GENERIC_HARDIRQS 16 select GENERIC_IRQ_PROBE 17 select IRQ_PER_CPU 18 19 help 20 The PA-RISC microprocessor is designed by Hewlett-Packard and used
··· 15 select HAVE_GENERIC_HARDIRQS 16 select GENERIC_IRQ_PROBE 17 select IRQ_PER_CPU 18 + select GENERIC_HARDIRQS_NO_DEPRECATED 19 20 help 21 The PA-RISC microprocessor is designed by Hewlett-Packard and used
+21 -10
arch/parisc/include/asm/cacheflush.h
··· 26 void flush_kernel_dcache_range_asm(unsigned long, unsigned long); 27 void flush_kernel_dcache_page_asm(void *); 28 void flush_kernel_icache_page(void *); 29 - void flush_user_dcache_page(unsigned long); 30 - void flush_user_icache_page(unsigned long); 31 void flush_user_dcache_range(unsigned long, unsigned long); 32 void flush_user_icache_range(unsigned long, unsigned long); 33 ··· 34 void flush_cache_all_local(void); 35 void flush_cache_all(void); 36 void flush_cache_mm(struct mm_struct *mm); 37 38 #define flush_kernel_dcache_range(start,size) \ 39 flush_kernel_dcache_range_asm((start), (start)+(size)); ··· 55 } 56 static inline void invalidate_kernel_vmap_range(void *vaddr, int size) 57 { 58 } 59 60 #define flush_cache_vmap(start, end) flush_cache_all() ··· 105 void flush_cache_range(struct vm_area_struct *vma, 106 unsigned long start, unsigned long end); 107 108 #define ARCH_HAS_FLUSH_ANON_PAGE 109 static inline void 110 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 111 { 112 if (PageAnon(page)) 113 - flush_user_dcache_page(vmaddr); 114 - } 115 - 116 - #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 117 - void flush_kernel_dcache_page_addr(void *addr); 118 - static inline void flush_kernel_dcache_page(struct page *page) 119 - { 120 - flush_kernel_dcache_page_addr(page_address(page)); 121 } 122 123 #ifdef CONFIG_DEBUG_RODATA
··· 26 void flush_kernel_dcache_range_asm(unsigned long, unsigned long); 27 void flush_kernel_dcache_page_asm(void *); 28 void flush_kernel_icache_page(void *); 29 void flush_user_dcache_range(unsigned long, unsigned long); 30 void flush_user_icache_range(unsigned long, unsigned long); 31 ··· 36 void flush_cache_all_local(void); 37 void flush_cache_all(void); 38 void flush_cache_mm(struct mm_struct *mm); 39 + 40 + #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 41 + void flush_kernel_dcache_page_addr(void *addr); 42 + static inline void flush_kernel_dcache_page(struct page *page) 43 + { 44 + flush_kernel_dcache_page_addr(page_address(page)); 45 + } 46 47 #define flush_kernel_dcache_range(start,size) \ 48 flush_kernel_dcache_range_asm((start), (start)+(size)); ··· 50 } 51 static inline void invalidate_kernel_vmap_range(void *vaddr, int size) 52 { 53 + unsigned long start = (unsigned long)vaddr; 54 + void *cursor = vaddr; 55 + 56 + for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) { 57 + struct page *page = vmalloc_to_page(cursor); 58 + 59 + if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) 60 + flush_kernel_dcache_page(page); 61 + } 62 + flush_kernel_dcache_range_asm(start, start + size); 63 } 64 65 #define flush_cache_vmap(start, end) flush_cache_all() ··· 90 void flush_cache_range(struct vm_area_struct *vma, 91 unsigned long start, unsigned long end); 92 93 + /* defined in pacache.S exported in cache.c used by flush_anon_page */ 94 + void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); 95 + 96 #define ARCH_HAS_FLUSH_ANON_PAGE 97 static inline void 98 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 99 { 100 if (PageAnon(page)) 101 + flush_dcache_page_asm(page_to_phys(page), vmaddr); 102 } 103 104 #ifdef CONFIG_DEBUG_RODATA
+4 -9
arch/parisc/include/asm/irq.h
··· 32 } 33 34 struct irq_chip; 35 36 - /* 37 - * Some useful "we don't have to do anything here" handlers. Should 38 - * probably be provided by the generic code. 39 - */ 40 - void no_ack_irq(unsigned int irq); 41 - void no_end_irq(unsigned int irq); 42 - void cpu_ack_irq(unsigned int irq); 43 - void cpu_eoi_irq(unsigned int irq); 44 45 extern int txn_alloc_irq(unsigned int nbits); 46 extern int txn_claim_irq(int); ··· 44 extern unsigned long txn_affinity_addr(unsigned int irq, int cpu); 45 46 extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *); 47 - extern int cpu_check_affinity(unsigned int irq, const struct cpumask *dest); 48 49 /* soft power switch support (power.c) */ 50 extern struct tasklet_struct power_tasklet;
··· 32 } 33 34 struct irq_chip; 35 + struct irq_data; 36 37 + void cpu_ack_irq(struct irq_data *d); 38 + void cpu_eoi_irq(struct irq_data *d); 39 40 extern int txn_alloc_irq(unsigned int nbits); 41 extern int txn_claim_irq(int); ··· 49 extern unsigned long txn_affinity_addr(unsigned int irq, int cpu); 50 51 extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *); 52 + extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest); 53 54 /* soft power switch support (power.c) */ 55 extern struct tasklet_struct power_tasklet;
+4 -10
arch/parisc/include/asm/pgtable.h
··· 138 #define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ 139 #define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ 140 #define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ 141 - #define _PAGE_FLUSH_BIT 21 /* (0x400) Software: translation valid */ 142 - /* for cache flushing only */ 143 #define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ 144 145 /* N.B. The bits are defined in terms of a 32 bit word above, so the */ ··· 172 #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) 173 #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) 174 #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) 175 - #define _PAGE_FLUSH (1 << xlate_pabit(_PAGE_FLUSH_BIT)) 176 #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) 177 #define _PAGE_FILE (1 << xlate_pabit(_PAGE_FILE_BIT)) 178 ··· 211 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) 212 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) 213 #define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ) 214 - #define PAGE_FLUSH __pgprot(_PAGE_FLUSH) 215 216 217 /* ··· 258 259 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 260 261 - #define pte_none(x) ((pte_val(x) == 0) || (pte_val(x) & _PAGE_FLUSH)) 262 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 263 #define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) 264 ··· 441 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 442 { 443 pte_t old_pte; 444 - pte_t pte; 445 446 spin_lock(&pa_dbit_lock); 447 - pte = old_pte = *ptep; 448 - pte_val(pte) &= ~_PAGE_PRESENT; 449 - pte_val(pte) |= _PAGE_FLUSH; 450 - set_pte_at(mm,addr,ptep,pte); 451 spin_unlock(&pa_dbit_lock); 452 453 return old_pte;
··· 138 #define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ 139 #define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ 140 #define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ 141 + /* bit 21 was formerly the FLUSH bit but is now unused */ 142 #define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ 143 144 /* N.B. The bits are defined in terms of a 32 bit word above, so the */ ··· 173 #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) 174 #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) 175 #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) 176 #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) 177 #define _PAGE_FILE (1 << xlate_pabit(_PAGE_FILE_BIT)) 178 ··· 213 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) 214 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) 215 #define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ) 216 217 218 /* ··· 261 262 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 263 264 + #define pte_none(x) (pte_val(x) == 0) 265 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 266 #define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) 267 ··· 444 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 445 { 446 pte_t old_pte; 447 448 spin_lock(&pa_dbit_lock); 449 + old_pte = *ptep; 450 + pte_clear(mm,addr,ptep); 451 spin_unlock(&pa_dbit_lock); 452 453 return old_pte;
+17 -92
arch/parisc/kernel/cache.c
··· 27 #include <asm/pgalloc.h> 28 #include <asm/processor.h> 29 #include <asm/sections.h> 30 31 int split_tlb __read_mostly; 32 int dcache_stride __read_mostly; 33 int icache_stride __read_mostly; 34 EXPORT_SYMBOL(dcache_stride); 35 36 37 /* On some machines (e.g. ones with the Merced bus), there can be ··· 264 panic("SpaceID hashing is still on!\n"); 265 } 266 267 - /* Simple function to work out if we have an existing address translation 268 - * for a user space vma. */ 269 - static inline int translation_exists(struct vm_area_struct *vma, 270 - unsigned long addr, unsigned long pfn) 271 - { 272 - pgd_t *pgd = pgd_offset(vma->vm_mm, addr); 273 - pmd_t *pmd; 274 - pte_t pte; 275 - 276 - if(pgd_none(*pgd)) 277 - return 0; 278 - 279 - pmd = pmd_offset(pgd, addr); 280 - if(pmd_none(*pmd) || pmd_bad(*pmd)) 281 - return 0; 282 - 283 - /* We cannot take the pte lock here: flush_cache_page is usually 284 - * called with pte lock already held. Whereas flush_dcache_page 285 - * takes flush_dcache_mmap_lock, which is lower in the hierarchy: 286 - * the vma itself is secure, but the pte might come or go racily. 287 - */ 288 - pte = *pte_offset_map(pmd, addr); 289 - /* But pte_unmap() does nothing on this architecture */ 290 - 291 - /* Filter out coincidental file entries and swap entries */ 292 - if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT))) 293 - return 0; 294 - 295 - return pte_pfn(pte) == pfn; 296 - } 297 - 298 - /* Private function to flush a page from the cache of a non-current 299 - * process. cr25 contains the Page Directory of the current user 300 - * process; we're going to hijack both it and the user space %sr3 to 301 - * temporarily make the non-current process current. We have to do 302 - * this because cache flushing may cause a non-access tlb miss which 303 - * the handlers have to fill in from the pgd of the non-current 304 - * process. */ 305 static inline void 306 - flush_user_cache_page_non_current(struct vm_area_struct *vma, 307 - unsigned long vmaddr) 308 { 309 - /* save the current process space and pgd */ 310 - unsigned long space = mfsp(3), pgd = mfctl(25); 311 - 312 - /* we don't mind taking interrupts since they may not 313 - * do anything with user space, but we can't 314 - * be preempted here */ 315 - preempt_disable(); 316 - 317 - /* make us current */ 318 - mtctl(__pa(vma->vm_mm->pgd), 25); 319 - mtsp(vma->vm_mm->context, 3); 320 - 321 - flush_user_dcache_page(vmaddr); 322 - if(vma->vm_flags & VM_EXEC) 323 - flush_user_icache_page(vmaddr); 324 - 325 - /* put the old current process back */ 326 - mtsp(space, 3); 327 - mtctl(pgd, 25); 328 - preempt_enable(); 329 - } 330 - 331 - 332 - static inline void 333 - __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr) 334 - { 335 - if (likely(vma->vm_mm->context == mfsp(3))) { 336 - flush_user_dcache_page(vmaddr); 337 - if (vma->vm_flags & VM_EXEC) 338 - flush_user_icache_page(vmaddr); 339 - } else { 340 - flush_user_cache_page_non_current(vma, vmaddr); 341 - } 342 } 343 344 void flush_dcache_page(struct page *page) ··· 279 struct vm_area_struct *mpnt; 280 struct prio_tree_iter iter; 281 unsigned long offset; 282 - unsigned long addr; 283 pgoff_t pgoff; 284 - unsigned long pfn = page_to_pfn(page); 285 - 286 287 if (mapping && !mapping_mapped(mapping)) { 288 set_bit(PG_dcache_dirty, &page->flags); ··· 304 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 305 addr = mpnt->vm_start + offset; 306 307 - /* Flush instructions produce non access tlb misses. 308 - * On PA, we nullify these instructions rather than 309 - * taking a page fault if the pte doesn't exist. 310 - * This is just for speed. If the page translation 311 - * isn't there, there's no point exciting the 312 - * nadtlb handler into a nullification frenzy. 313 - * 314 - * Make sure we really have this page: the private 315 - * mappings may cover this area but have COW'd this 316 - * particular page. 317 - */ 318 - if (translation_exists(mpnt, addr, pfn)) { 319 - __flush_cache_page(mpnt, addr); 320 - break; 321 } 322 } 323 flush_dcache_mmap_unlock(mapping); ··· 499 { 500 BUG_ON(!vma->vm_mm->context); 501 502 - if (likely(translation_exists(vma, vmaddr, pfn))) 503 - __flush_cache_page(vma, vmaddr); 504 505 }
··· 27 #include <asm/pgalloc.h> 28 #include <asm/processor.h> 29 #include <asm/sections.h> 30 + #include <asm/shmparam.h> 31 32 int split_tlb __read_mostly; 33 int dcache_stride __read_mostly; 34 int icache_stride __read_mostly; 35 EXPORT_SYMBOL(dcache_stride); 36 + 37 + void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); 38 + EXPORT_SYMBOL(flush_dcache_page_asm); 39 + void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); 40 41 42 /* On some machines (e.g. ones with the Merced bus), there can be ··· 259 panic("SpaceID hashing is still on!\n"); 260 } 261 262 static inline void 263 + __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, 264 + unsigned long physaddr) 265 { 266 + flush_dcache_page_asm(physaddr, vmaddr); 267 + if (vma->vm_flags & VM_EXEC) 268 + flush_icache_page_asm(physaddr, vmaddr); 269 } 270 271 void flush_dcache_page(struct page *page) ··· 342 struct vm_area_struct *mpnt; 343 struct prio_tree_iter iter; 344 unsigned long offset; 345 + unsigned long addr, old_addr = 0; 346 pgoff_t pgoff; 347 348 if (mapping && !mapping_mapped(mapping)) { 349 set_bit(PG_dcache_dirty, &page->flags); ··· 369 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 370 addr = mpnt->vm_start + offset; 371 372 + if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) { 373 + __flush_cache_page(mpnt, addr, page_to_phys(page)); 374 + if (old_addr) 375 + printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? mpnt->vm_file->f_path.dentry->d_name.name : "(null)"); 376 + old_addr = addr; 377 } 378 } 379 flush_dcache_mmap_unlock(mapping); ··· 573 { 574 BUG_ON(!vma->vm_mm->context); 575 576 + __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); 577 578 }
+126 -91
arch/parisc/kernel/entry.S
··· 225 #ifndef CONFIG_64BIT 226 /* 227 * naitlb miss interruption handler (parisc 1.1 - 32 bit) 228 - * 229 - * Note: naitlb misses will be treated 230 - * as an ordinary itlb miss for now. 231 - * However, note that naitlb misses 232 - * have the faulting address in the 233 - * IOR/ISR. 234 */ 235 236 .macro naitlb_11 code 237 238 mfctl %isr,spc 239 - b itlb_miss_11 240 mfctl %ior,va 241 - /* FIXME: If user causes a naitlb miss, the priv level may not be in 242 - * lower bits of va, where the itlb miss handler is expecting them 243 - */ 244 245 .align 32 246 .endm ··· 239 240 /* 241 * naitlb miss interruption handler (parisc 2.0) 242 - * 243 - * Note: naitlb misses will be treated 244 - * as an ordinary itlb miss for now. 245 - * However, note that naitlb misses 246 - * have the faulting address in the 247 - * IOR/ISR. 248 */ 249 250 .macro naitlb_20 code 251 252 mfctl %isr,spc 253 #ifdef CONFIG_64BIT 254 - b itlb_miss_20w 255 #else 256 - b itlb_miss_20 257 #endif 258 mfctl %ior,va 259 - /* FIXME: If user causes a naitlb miss, the priv level may not be in 260 - * lower bits of va, where the itlb miss handler is expecting them 261 - */ 262 263 .align 32 264 .endm ··· 563 copy \va,\tmp1 564 depi 0,31,23,\tmp1 565 cmpb,COND(<>),n \tmp,\tmp1,\fault 566 - ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot 567 depd,z \prot,8,7,\prot 568 /* 569 * OK, it is in the temp alias region, check whether "from" or "to". ··· 630 def 13 631 def 14 632 dtlb_20 15 633 - #if 0 634 naitlb_20 16 635 - #else 636 - def 16 637 - #endif 638 nadtlb_20 17 639 def 18 640 def 19 ··· 673 def 13 674 def 14 675 dtlb_11 15 676 - #if 0 677 naitlb_11 16 678 - #else 679 - def 16 680 - #endif 681 nadtlb_11 17 682 def 18 683 def 19 ··· 1194 get_pgd spc,ptp 1195 space_check spc,t0,nadtlb_fault 1196 1197 - L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w 1198 1199 update_ptep ptp,pte,t0,t1 1200 ··· 1205 rfir 1206 nop 1207 1208 - nadtlb_check_flush_20w: 1209 - bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1210 - 1211 - /* Insert a "flush only" translation */ 1212 - 1213 - depdi,z 7,7,3,prot 1214 - depdi 1,10,1,prot 1215 - 1216 - /* Drop prot bits from pte and convert to page addr for idtlbt */ 1217 - convert_for_tlb_insert20 pte 1218 1219 idtlbt pte,prot 1220 ··· 1238 nop 1239 1240 dtlb_check_alias_11: 1241 - 1242 - /* Check to see if fault is in the temporary alias region */ 1243 - 1244 - cmpib,<>,n 0,spc,dtlb_fault /* forward */ 1245 - ldil L%(TMPALIAS_MAP_START),t0 1246 - copy va,t1 1247 - depwi 0,31,23,t1 1248 - cmpb,<>,n t0,t1,dtlb_fault /* forward */ 1249 - ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot 1250 - depw,z prot,8,7,prot 1251 - 1252 - /* 1253 - * OK, it is in the temp alias region, check whether "from" or "to". 1254 - * Check "subtle" note in pacache.S re: r23/r26. 1255 - */ 1256 - 1257 - extrw,u,= va,9,1,r0 1258 - or,tr %r23,%r0,pte /* If "from" use "from" page */ 1259 - or %r26,%r0,pte /* else "to", use "to" page */ 1260 1261 idtlba pte,(va) 1262 idtlbp prot,(va) ··· 1251 1252 space_check spc,t0,nadtlb_fault 1253 1254 - L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11 1255 1256 update_ptep ptp,pte,t0,t1 1257 ··· 1269 rfir 1270 nop 1271 1272 - nadtlb_check_flush_11: 1273 - bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1274 1275 - /* Insert a "flush only" translation */ 1276 - 1277 - zdepi 7,7,3,prot 1278 - depi 1,10,1,prot 1279 - 1280 - /* Get rid of prot bits and convert to page addr for idtlba */ 1281 - 1282 - depi 0,31,ASM_PFN_PTE_SHIFT,pte 1283 - SHRREG pte,(ASM_PFN_PTE_SHIFT-(31-26)),pte 1284 - 1285 - mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1286 - mtsp spc,%sr1 1287 - 1288 - idtlba pte,(%sr1,va) 1289 - idtlbp prot,(%sr1,va) 1290 - 1291 - mtsp t0, %sr1 /* Restore sr1 */ 1292 1293 rfir 1294 nop ··· 1309 1310 space_check spc,t0,nadtlb_fault 1311 1312 - L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20 1313 1314 update_ptep ptp,pte,t0,t1 1315 ··· 1322 rfir 1323 nop 1324 1325 - nadtlb_check_flush_20: 1326 - bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1327 - 1328 - /* Insert a "flush only" translation */ 1329 - 1330 - depdi,z 7,7,3,prot 1331 - depdi 1,10,1,prot 1332 - 1333 - /* Drop prot bits from pte and convert to page addr for idtlbt */ 1334 - convert_for_tlb_insert20 pte 1335 1336 idtlbt pte,prot 1337 1338 rfir 1339 nop 1340 #endif 1341 1342 nadtlb_emulate: ··· 1427 rfir 1428 nop 1429 1430 #else 1431 1432 itlb_miss_11: ··· 1481 rfir 1482 nop 1483 1484 itlb_miss_20: 1485 get_pgd spc,ptp 1486 ··· 1525 make_insert_tlb spc,pte,prot 1526 1527 f_extend pte,t0 1528 1529 iitlbt pte,prot 1530 ··· 1692 nadtlb_fault: 1693 b intr_save 1694 ldi 17,%r8 1695 1696 dtlb_fault: 1697 b intr_save
··· 225 #ifndef CONFIG_64BIT 226 /* 227 * naitlb miss interruption handler (parisc 1.1 - 32 bit) 228 */ 229 230 .macro naitlb_11 code 231 232 mfctl %isr,spc 233 + b naitlb_miss_11 234 mfctl %ior,va 235 236 .align 32 237 .endm ··· 248 249 /* 250 * naitlb miss interruption handler (parisc 2.0) 251 */ 252 253 .macro naitlb_20 code 254 255 mfctl %isr,spc 256 #ifdef CONFIG_64BIT 257 + b naitlb_miss_20w 258 #else 259 + b naitlb_miss_20 260 #endif 261 mfctl %ior,va 262 263 .align 32 264 .endm ··· 581 copy \va,\tmp1 582 depi 0,31,23,\tmp1 583 cmpb,COND(<>),n \tmp,\tmp1,\fault 584 + mfctl %cr19,\tmp /* iir */ 585 + /* get the opcode (first six bits) into \tmp */ 586 + extrw,u \tmp,5,6,\tmp 587 + /* 588 + * Only setting the T bit prevents data cache movein 589 + * Setting access rights to zero prevents instruction cache movein 590 + * 591 + * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go 592 + * to type field and _PAGE_READ goes to top bit of PL1 593 + */ 594 + ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot 595 + /* 596 + * so if the opcode is one (i.e. this is a memory management 597 + * instruction) nullify the next load so \prot is only T. 598 + * Otherwise this is a normal data operation 599 + */ 600 + cmpiclr,= 0x01,\tmp,%r0 601 + ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot 602 depd,z \prot,8,7,\prot 603 /* 604 * OK, it is in the temp alias region, check whether "from" or "to". ··· 631 def 13 632 def 14 633 dtlb_20 15 634 naitlb_20 16 635 nadtlb_20 17 636 def 18 637 def 19 ··· 678 def 13 679 def 14 680 dtlb_11 15 681 naitlb_11 16 682 nadtlb_11 17 683 def 18 684 def 19 ··· 1203 get_pgd spc,ptp 1204 space_check spc,t0,nadtlb_fault 1205 1206 + L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w 1207 1208 update_ptep ptp,pte,t0,t1 1209 ··· 1214 rfir 1215 nop 1216 1217 + nadtlb_check_alias_20w: 1218 + do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate 1219 1220 idtlbt pte,prot 1221 ··· 1255 nop 1256 1257 dtlb_check_alias_11: 1258 + do_alias spc,t0,t1,va,pte,prot,dtlb_fault 1259 1260 idtlba pte,(va) 1261 idtlbp prot,(va) ··· 1286 1287 space_check spc,t0,nadtlb_fault 1288 1289 + L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 1290 1291 update_ptep ptp,pte,t0,t1 1292 ··· 1304 rfir 1305 nop 1306 1307 + nadtlb_check_alias_11: 1308 + do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate 1309 1310 + idtlba pte,(va) 1311 + idtlbp prot,(va) 1312 1313 rfir 1314 nop ··· 1359 1360 space_check spc,t0,nadtlb_fault 1361 1362 + L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 1363 1364 update_ptep ptp,pte,t0,t1 1365 ··· 1372 rfir 1373 nop 1374 1375 + nadtlb_check_alias_20: 1376 + do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate 1377 1378 idtlbt pte,prot 1379 1380 rfir 1381 nop 1382 + 1383 #endif 1384 1385 nadtlb_emulate: ··· 1484 rfir 1485 nop 1486 1487 + naitlb_miss_20w: 1488 + 1489 + /* 1490 + * I miss is a little different, since we allow users to fault 1491 + * on the gateway page which is in the kernel address space. 1492 + */ 1493 + 1494 + space_adjust spc,va,t0 1495 + get_pgd spc,ptp 1496 + space_check spc,t0,naitlb_fault 1497 + 1498 + L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w 1499 + 1500 + update_ptep ptp,pte,t0,t1 1501 + 1502 + make_insert_tlb spc,pte,prot 1503 + 1504 + iitlbt pte,prot 1505 + 1506 + rfir 1507 + nop 1508 + 1509 + naitlb_check_alias_20w: 1510 + do_alias spc,t0,t1,va,pte,prot,naitlb_fault 1511 + 1512 + iitlbt pte,prot 1513 + 1514 + rfir 1515 + nop 1516 + 1517 #else 1518 1519 itlb_miss_11: ··· 1508 rfir 1509 nop 1510 1511 + naitlb_miss_11: 1512 + get_pgd spc,ptp 1513 + 1514 + space_check spc,t0,naitlb_fault 1515 + 1516 + L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 1517 + 1518 + update_ptep ptp,pte,t0,t1 1519 + 1520 + make_insert_tlb_11 spc,pte,prot 1521 + 1522 + mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1523 + mtsp spc,%sr1 1524 + 1525 + iitlba pte,(%sr1,va) 1526 + iitlbp prot,(%sr1,va) 1527 + 1528 + mtsp t0, %sr1 /* Restore sr1 */ 1529 + 1530 + rfir 1531 + nop 1532 + 1533 + naitlb_check_alias_11: 1534 + do_alias spc,t0,t1,va,pte,prot,itlb_fault 1535 + 1536 + iitlba pte,(%sr0, va) 1537 + iitlbp prot,(%sr0, va) 1538 + 1539 + rfir 1540 + nop 1541 + 1542 + 1543 itlb_miss_20: 1544 get_pgd spc,ptp 1545 ··· 1520 make_insert_tlb spc,pte,prot 1521 1522 f_extend pte,t0 1523 + 1524 + iitlbt pte,prot 1525 + 1526 + rfir 1527 + nop 1528 + 1529 + naitlb_miss_20: 1530 + get_pgd spc,ptp 1531 + 1532 + space_check spc,t0,naitlb_fault 1533 + 1534 + L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 1535 + 1536 + update_ptep ptp,pte,t0,t1 1537 + 1538 + make_insert_tlb spc,pte,prot 1539 + 1540 + f_extend pte,t0 1541 + 1542 + iitlbt pte,prot 1543 + 1544 + rfir 1545 + nop 1546 + 1547 + naitlb_check_alias_20: 1548 + do_alias spc,t0,t1,va,pte,prot,naitlb_fault 1549 1550 iitlbt pte,prot 1551 ··· 1661 nadtlb_fault: 1662 b intr_save 1663 ldi 17,%r8 1664 + 1665 + naitlb_fault: 1666 + b intr_save 1667 + ldi 16,%r8 1668 1669 dtlb_fault: 1670 b intr_save
+37 -27
arch/parisc/kernel/irq.c
··· 52 */ 53 static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL; 54 55 - static void cpu_mask_irq(unsigned int irq) 56 { 57 - unsigned long eirr_bit = EIEM_MASK(irq); 58 59 cpu_eiem &= ~eirr_bit; 60 /* Do nothing on the other CPUs. If they get this interrupt, ··· 63 * then gets disabled */ 64 } 65 66 - static void cpu_unmask_irq(unsigned int irq) 67 { 68 unsigned long eirr_bit = EIEM_MASK(irq); 69 ··· 75 smp_send_all_nop(); 76 } 77 78 - void cpu_ack_irq(unsigned int irq) 79 { 80 - unsigned long mask = EIEM_MASK(irq); 81 int cpu = smp_processor_id(); 82 83 /* Clear in EIEM so we can no longer process */ ··· 95 mtctl(mask, 23); 96 } 97 98 - void cpu_eoi_irq(unsigned int irq) 99 { 100 - unsigned long mask = EIEM_MASK(irq); 101 int cpu = smp_processor_id(); 102 103 /* set it in the eiems---it's no longer in process */ ··· 108 } 109 110 #ifdef CONFIG_SMP 111 - int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) 112 { 113 int cpu_dest; 114 115 /* timer and ipi have to always be received on all CPUs */ 116 - if (CHECK_IRQ_PER_CPU(irq)) { 117 /* Bad linux design decision. The mask has already 118 - * been set; we must reset it */ 119 - cpumask_setall(irq_desc[irq].affinity); 120 return -EINVAL; 121 } 122 ··· 127 return cpu_dest; 128 } 129 130 - static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) 131 { 132 int cpu_dest; 133 134 - cpu_dest = cpu_check_affinity(irq, dest); 135 if (cpu_dest < 0) 136 return -1; 137 138 - cpumask_copy(irq_desc[irq].affinity, dest); 139 140 return 0; 141 } 142 #endif 143 144 static struct irq_chip cpu_interrupt_type = { 145 - .name = "CPU", 146 - .mask = cpu_mask_irq, 147 - .unmask = cpu_unmask_irq, 148 - .ack = cpu_ack_irq, 149 - .eoi = cpu_eoi_irq, 150 #ifdef CONFIG_SMP 151 - .set_affinity = cpu_set_affinity_irq, 152 #endif 153 /* XXX: Needs to be written. We managed without it so far, but 154 * we really ought to write it. 155 */ 156 - .retrigger = NULL, 157 }; 158 159 int show_interrupts(struct seq_file *p, void *v) ··· 188 seq_printf(p, "%10u ", kstat_irqs(i)); 189 #endif 190 191 - seq_printf(p, " %14s", irq_desc[i].chip->name); 192 #ifndef PARISC_IRQ_CR16_COUNTS 193 seq_printf(p, " %s", action->name); 194 ··· 240 { 241 if (irq_desc[irq].action) 242 return -EBUSY; 243 - if (irq_desc[irq].chip != &cpu_interrupt_type) 244 return -EBUSY; 245 246 /* for iosapic interrupts */ 247 if (type) { 248 set_irq_chip_and_handler(irq, type, handle_percpu_irq); 249 set_irq_chip_data(irq, data); 250 - cpu_unmask_irq(irq); 251 } 252 return 0; 253 } ··· 296 unsigned long txn_affinity_addr(unsigned int irq, int cpu) 297 { 298 #ifdef CONFIG_SMP 299 - cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); 300 #endif 301 302 return per_cpu(cpu_data, cpu).txn_addr; ··· 341 unsigned long eirr_val; 342 int irq, cpu = smp_processor_id(); 343 #ifdef CONFIG_SMP 344 cpumask_t dest; 345 #endif 346 ··· 355 irq = eirr_to_irq(eirr_val); 356 357 #ifdef CONFIG_SMP 358 - cpumask_copy(&dest, irq_desc[irq].affinity); 359 - if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && 360 !cpu_isset(smp_processor_id(), dest)) { 361 int cpu = first_cpu(dest); 362
··· 52 */ 53 static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL; 54 55 + static void cpu_mask_irq(struct irq_data *d) 56 { 57 + unsigned long eirr_bit = EIEM_MASK(d->irq); 58 59 cpu_eiem &= ~eirr_bit; 60 /* Do nothing on the other CPUs. If they get this interrupt, ··· 63 * then gets disabled */ 64 } 65 66 + static void __cpu_unmask_irq(unsigned int irq) 67 { 68 unsigned long eirr_bit = EIEM_MASK(irq); 69 ··· 75 smp_send_all_nop(); 76 } 77 78 + static void cpu_unmask_irq(struct irq_data *d) 79 { 80 + __cpu_unmask_irq(d->irq); 81 + } 82 + 83 + void cpu_ack_irq(struct irq_data *d) 84 + { 85 + unsigned long mask = EIEM_MASK(d->irq); 86 int cpu = smp_processor_id(); 87 88 /* Clear in EIEM so we can no longer process */ ··· 90 mtctl(mask, 23); 91 } 92 93 + void cpu_eoi_irq(struct irq_data *d) 94 { 95 + unsigned long mask = EIEM_MASK(d->irq); 96 int cpu = smp_processor_id(); 97 98 /* set it in the eiems---it's no longer in process */ ··· 103 } 104 105 #ifdef CONFIG_SMP 106 + int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest) 107 { 108 int cpu_dest; 109 110 /* timer and ipi have to always be received on all CPUs */ 111 + if (CHECK_IRQ_PER_CPU(irq_to_desc(d->irq)->status)) { 112 /* Bad linux design decision. The mask has already 113 + * been set; we must reset it. Will fix - tglx 114 + */ 115 + cpumask_setall(d->affinity); 116 return -EINVAL; 117 } 118 ··· 121 return cpu_dest; 122 } 123 124 + static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, 125 + bool force) 126 { 127 int cpu_dest; 128 129 + cpu_dest = cpu_check_affinity(d, dest); 130 if (cpu_dest < 0) 131 return -1; 132 133 + cpumask_copy(d->affinity, dest); 134 135 return 0; 136 } 137 #endif 138 139 static struct irq_chip cpu_interrupt_type = { 140 + .name = "CPU", 141 + .irq_mask = cpu_mask_irq, 142 + .irq_unmask = cpu_unmask_irq, 143 + .irq_ack = cpu_ack_irq, 144 + .irq_eoi = cpu_eoi_irq, 145 #ifdef CONFIG_SMP 146 + .irq_set_affinity = cpu_set_affinity_irq, 147 #endif 148 /* XXX: Needs to be written. We managed without it so far, but 149 * we really ought to write it. 150 */ 151 + .irq_retrigger = NULL, 152 }; 153 154 int show_interrupts(struct seq_file *p, void *v) ··· 181 seq_printf(p, "%10u ", kstat_irqs(i)); 182 #endif 183 184 + seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name); 185 #ifndef PARISC_IRQ_CR16_COUNTS 186 seq_printf(p, " %s", action->name); 187 ··· 233 { 234 if (irq_desc[irq].action) 235 return -EBUSY; 236 + if (get_irq_chip(irq) != &cpu_interrupt_type) 237 return -EBUSY; 238 239 /* for iosapic interrupts */ 240 if (type) { 241 set_irq_chip_and_handler(irq, type, handle_percpu_irq); 242 set_irq_chip_data(irq, data); 243 + __cpu_unmask_irq(irq); 244 } 245 return 0; 246 } ··· 289 unsigned long txn_affinity_addr(unsigned int irq, int cpu) 290 { 291 #ifdef CONFIG_SMP 292 + struct irq_data *d = irq_get_irq_data(irq); 293 + cpumask_copy(d->affinity, cpumask_of(cpu)); 294 #endif 295 296 return per_cpu(cpu_data, cpu).txn_addr; ··· 333 unsigned long eirr_val; 334 int irq, cpu = smp_processor_id(); 335 #ifdef CONFIG_SMP 336 + struct irq_desc *desc; 337 cpumask_t dest; 338 #endif 339 ··· 346 irq = eirr_to_irq(eirr_val); 347 348 #ifdef CONFIG_SMP 349 + desc = irq_to_desc(irq); 350 + cpumask_copy(&dest, desc->irq_data.affinity); 351 + if (CHECK_IRQ_PER_CPU(desc->status) && 352 !cpu_isset(smp_processor_id(), dest)) { 353 int cpu = first_cpu(dest); 354
+124 -151
arch/parisc/kernel/pacache.S
··· 608 .procend 609 ENDPROC(__clear_user_page_asm) 610 611 ENTRY(flush_kernel_dcache_page_asm) 612 .proc 613 .callinfo NO_CALLS ··· 774 775 .procend 776 ENDPROC(flush_kernel_dcache_page_asm) 777 - 778 - ENTRY(flush_user_dcache_page) 779 - .proc 780 - .callinfo NO_CALLS 781 - .entry 782 - 783 - ldil L%dcache_stride, %r1 784 - ldw R%dcache_stride(%r1), %r23 785 - 786 - #ifdef CONFIG_64BIT 787 - depdi,z 1,63-PAGE_SHIFT,1, %r25 788 - #else 789 - depwi,z 1,31-PAGE_SHIFT,1, %r25 790 - #endif 791 - add %r26, %r25, %r25 792 - sub %r25, %r23, %r25 793 - 794 - 795 - 1: fdc,m %r23(%sr3, %r26) 796 - fdc,m %r23(%sr3, %r26) 797 - fdc,m %r23(%sr3, %r26) 798 - fdc,m %r23(%sr3, %r26) 799 - fdc,m %r23(%sr3, %r26) 800 - fdc,m %r23(%sr3, %r26) 801 - fdc,m %r23(%sr3, %r26) 802 - fdc,m %r23(%sr3, %r26) 803 - fdc,m %r23(%sr3, %r26) 804 - fdc,m %r23(%sr3, %r26) 805 - fdc,m %r23(%sr3, %r26) 806 - fdc,m %r23(%sr3, %r26) 807 - fdc,m %r23(%sr3, %r26) 808 - fdc,m %r23(%sr3, %r26) 809 - fdc,m %r23(%sr3, %r26) 810 - cmpb,COND(<<) %r26, %r25,1b 811 - fdc,m %r23(%sr3, %r26) 812 - 813 - sync 814 - bv %r0(%r2) 815 - nop 816 - .exit 817 - 818 - .procend 819 - ENDPROC(flush_user_dcache_page) 820 - 821 - ENTRY(flush_user_icache_page) 822 - .proc 823 - .callinfo NO_CALLS 824 - .entry 825 - 826 - ldil L%dcache_stride, %r1 827 - ldw R%dcache_stride(%r1), %r23 828 - 829 - #ifdef CONFIG_64BIT 830 - depdi,z 1, 63-PAGE_SHIFT,1, %r25 831 - #else 832 - depwi,z 1, 31-PAGE_SHIFT,1, %r25 833 - #endif 834 - add %r26, %r25, %r25 835 - sub %r25, %r23, %r25 836 - 837 - 838 - 1: fic,m %r23(%sr3, %r26) 839 - fic,m %r23(%sr3, %r26) 840 - fic,m %r23(%sr3, %r26) 841 - fic,m %r23(%sr3, %r26) 842 - fic,m %r23(%sr3, %r26) 843 - fic,m %r23(%sr3, %r26) 844 - fic,m %r23(%sr3, %r26) 845 - fic,m %r23(%sr3, %r26) 846 - fic,m %r23(%sr3, %r26) 847 - fic,m %r23(%sr3, %r26) 848 - fic,m %r23(%sr3, %r26) 849 - fic,m %r23(%sr3, %r26) 850 - fic,m %r23(%sr3, %r26) 851 - fic,m %r23(%sr3, %r26) 852 - fic,m %r23(%sr3, %r26) 853 - cmpb,COND(<<) %r26, %r25,1b 854 - fic,m %r23(%sr3, %r26) 855 - 856 - sync 857 - bv %r0(%r2) 858 - nop 859 - .exit 860 - 861 - .procend 862 - ENDPROC(flush_user_icache_page) 863 - 864 865 ENTRY(purge_kernel_dcache_page) 866 .proc ··· 817 .procend 818 ENDPROC(purge_kernel_dcache_page) 819 820 - #if 0 821 - /* Currently not used, but it still is a possible alternate 822 - * solution. 823 - */ 824 - 825 - ENTRY(flush_alias_page) 826 - .proc 827 - .callinfo NO_CALLS 828 - .entry 829 - 830 - tophys_r1 %r26 831 - 832 - ldil L%(TMPALIAS_MAP_START), %r28 833 - #ifdef CONFIG_64BIT 834 - extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ 835 - depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ 836 - depdi 0, 63,12, %r28 /* Clear any offset bits */ 837 - #else 838 - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 839 - depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 840 - depwi 0, 31,12, %r28 /* Clear any offset bits */ 841 - #endif 842 - 843 - /* Purge any old translation */ 844 - 845 - pdtlb 0(%r28) 846 - 847 - ldil L%dcache_stride, %r1 848 - ldw R%dcache_stride(%r1), %r23 849 - 850 - #ifdef CONFIG_64BIT 851 - depdi,z 1, 63-PAGE_SHIFT,1, %r29 852 - #else 853 - depwi,z 1, 31-PAGE_SHIFT,1, %r29 854 - #endif 855 - add %r28, %r29, %r29 856 - sub %r29, %r23, %r29 857 - 858 - 1: fdc,m %r23(%r28) 859 - fdc,m %r23(%r28) 860 - fdc,m %r23(%r28) 861 - fdc,m %r23(%r28) 862 - fdc,m %r23(%r28) 863 - fdc,m %r23(%r28) 864 - fdc,m %r23(%r28) 865 - fdc,m %r23(%r28) 866 - fdc,m %r23(%r28) 867 - fdc,m %r23(%r28) 868 - fdc,m %r23(%r28) 869 - fdc,m %r23(%r28) 870 - fdc,m %r23(%r28) 871 - fdc,m %r23(%r28) 872 - fdc,m %r23(%r28) 873 - cmpb,COND(<<) %r28, %r29, 1b 874 - fdc,m %r23(%r28) 875 - 876 - sync 877 - bv %r0(%r2) 878 - nop 879 - .exit 880 - 881 - .procend 882 - #endif 883 884 .export flush_user_dcache_range_asm 885 ··· 839 .exit 840 841 .procend 842 - ENDPROC(flush_alias_page) 843 844 ENTRY(flush_kernel_dcache_range_asm) 845 .proc
··· 608 .procend 609 ENDPROC(__clear_user_page_asm) 610 611 + ENTRY(flush_dcache_page_asm) 612 + .proc 613 + .callinfo NO_CALLS 614 + .entry 615 + 616 + ldil L%(TMPALIAS_MAP_START), %r28 617 + #ifdef CONFIG_64BIT 618 + #if (TMPALIAS_MAP_START >= 0x80000000) 619 + depdi 0, 31,32, %r28 /* clear any sign extension */ 620 + /* FIXME: page size dependend */ 621 + #endif 622 + extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ 623 + depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ 624 + depdi 0, 63,12, %r28 /* Clear any offset bits */ 625 + #else 626 + extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 627 + depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 628 + depwi 0, 31,12, %r28 /* Clear any offset bits */ 629 + #endif 630 + 631 + /* Purge any old translation */ 632 + 633 + pdtlb 0(%r28) 634 + 635 + ldil L%dcache_stride, %r1 636 + ldw R%dcache_stride(%r1), %r1 637 + 638 + #ifdef CONFIG_64BIT 639 + depdi,z 1, 63-PAGE_SHIFT,1, %r25 640 + #else 641 + depwi,z 1, 31-PAGE_SHIFT,1, %r25 642 + #endif 643 + add %r28, %r25, %r25 644 + sub %r25, %r1, %r25 645 + 646 + 647 + 1: fdc,m %r1(%r28) 648 + fdc,m %r1(%r28) 649 + fdc,m %r1(%r28) 650 + fdc,m %r1(%r28) 651 + fdc,m %r1(%r28) 652 + fdc,m %r1(%r28) 653 + fdc,m %r1(%r28) 654 + fdc,m %r1(%r28) 655 + fdc,m %r1(%r28) 656 + fdc,m %r1(%r28) 657 + fdc,m %r1(%r28) 658 + fdc,m %r1(%r28) 659 + fdc,m %r1(%r28) 660 + fdc,m %r1(%r28) 661 + fdc,m %r1(%r28) 662 + cmpb,COND(<<) %r28, %r25,1b 663 + fdc,m %r1(%r28) 664 + 665 + sync 666 + bv %r0(%r2) 667 + pdtlb (%r25) 668 + .exit 669 + 670 + .procend 671 + ENDPROC(flush_dcache_page_asm) 672 + 673 + ENTRY(flush_icache_page_asm) 674 + .proc 675 + .callinfo NO_CALLS 676 + .entry 677 + 678 + ldil L%(TMPALIAS_MAP_START), %r28 679 + #ifdef CONFIG_64BIT 680 + #if (TMPALIAS_MAP_START >= 0x80000000) 681 + depdi 0, 31,32, %r28 /* clear any sign extension */ 682 + /* FIXME: page size dependend */ 683 + #endif 684 + extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ 685 + depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ 686 + depdi 0, 63,12, %r28 /* Clear any offset bits */ 687 + #else 688 + extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 689 + depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 690 + depwi 0, 31,12, %r28 /* Clear any offset bits */ 691 + #endif 692 + 693 + /* Purge any old translation */ 694 + 695 + pitlb (%sr0,%r28) 696 + 697 + ldil L%icache_stride, %r1 698 + ldw R%icache_stride(%r1), %r1 699 + 700 + #ifdef CONFIG_64BIT 701 + depdi,z 1, 63-PAGE_SHIFT,1, %r25 702 + #else 703 + depwi,z 1, 31-PAGE_SHIFT,1, %r25 704 + #endif 705 + add %r28, %r25, %r25 706 + sub %r25, %r1, %r25 707 + 708 + 709 + 1: fic,m %r1(%r28) 710 + fic,m %r1(%r28) 711 + fic,m %r1(%r28) 712 + fic,m %r1(%r28) 713 + fic,m %r1(%r28) 714 + fic,m %r1(%r28) 715 + fic,m %r1(%r28) 716 + fic,m %r1(%r28) 717 + fic,m %r1(%r28) 718 + fic,m %r1(%r28) 719 + fic,m %r1(%r28) 720 + fic,m %r1(%r28) 721 + fic,m %r1(%r28) 722 + fic,m %r1(%r28) 723 + fic,m %r1(%r28) 724 + cmpb,COND(<<) %r28, %r25,1b 725 + fic,m %r1(%r28) 726 + 727 + sync 728 + bv %r0(%r2) 729 + pitlb (%sr0,%r25) 730 + .exit 731 + 732 + .procend 733 + ENDPROC(flush_icache_page_asm) 734 + 735 ENTRY(flush_kernel_dcache_page_asm) 736 .proc 737 .callinfo NO_CALLS ··· 650 651 .procend 652 ENDPROC(flush_kernel_dcache_page_asm) 653 654 ENTRY(purge_kernel_dcache_page) 655 .proc ··· 780 .procend 781 ENDPROC(purge_kernel_dcache_page) 782 783 784 .export flush_user_dcache_range_asm 785 ··· 865 .exit 866 867 .procend 868 869 ENTRY(flush_kernel_dcache_range_asm) 870 .proc
+11 -11
drivers/parisc/dino.c
··· 296 .outl = dino_out32 297 }; 298 299 - static void dino_mask_irq(unsigned int irq) 300 { 301 - struct dino_device *dino_dev = get_irq_chip_data(irq); 302 - int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 303 304 - DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq); 305 306 /* Clear the matching bit in the IMR register */ 307 dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq)); 308 __raw_writel(dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR); 309 } 310 311 - static void dino_unmask_irq(unsigned int irq) 312 { 313 - struct dino_device *dino_dev = get_irq_chip_data(irq); 314 - int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 315 u32 tmp; 316 317 - DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq); 318 319 /* 320 ** clear pending IRQ bits ··· 346 } 347 348 static struct irq_chip dino_interrupt_type = { 349 - .name = "GSC-PCI", 350 - .unmask = dino_unmask_irq, 351 - .mask = dino_mask_irq, 352 }; 353 354
··· 296 .outl = dino_out32 297 }; 298 299 + static void dino_mask_irq(struct irq_data *d) 300 { 301 + struct dino_device *dino_dev = irq_data_get_irq_chip_data(d); 302 + int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 303 304 + DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq); 305 306 /* Clear the matching bit in the IMR register */ 307 dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq)); 308 __raw_writel(dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR); 309 } 310 311 + static void dino_unmask_irq(struct irq_data *d) 312 { 313 + struct dino_device *dino_dev = irq_data_get_irq_chip_data(d); 314 + int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 315 u32 tmp; 316 317 + DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq); 318 319 /* 320 ** clear pending IRQ bits ··· 346 } 347 348 static struct irq_chip dino_interrupt_type = { 349 + .name = "GSC-PCI", 350 + .irq_unmask = dino_unmask_irq, 351 + .irq_mask = dino_mask_irq, 352 }; 353 354
+7 -5
drivers/parisc/eisa.c
··· 144 145 146 /* called by free irq */ 147 - static void eisa_mask_irq(unsigned int irq) 148 { 149 unsigned long flags; 150 151 EISA_DBG("disable irq %d\n", irq); ··· 165 } 166 167 /* called by request irq */ 168 - static void eisa_unmask_irq(unsigned int irq) 169 { 170 unsigned long flags; 171 EISA_DBG("enable irq %d\n", irq); 172 ··· 185 } 186 187 static struct irq_chip eisa_interrupt_type = { 188 - .name = "EISA", 189 - .unmask = eisa_unmask_irq, 190 - .mask = eisa_mask_irq, 191 }; 192 193 static irqreturn_t eisa_irq(int wax_irq, void *intr_dev)
··· 144 145 146 /* called by free irq */ 147 + static void eisa_mask_irq(struct irq_data *d) 148 { 149 + unsigned int irq = d->irq; 150 unsigned long flags; 151 152 EISA_DBG("disable irq %d\n", irq); ··· 164 } 165 166 /* called by request irq */ 167 + static void eisa_unmask_irq(struct irq_data *d) 168 { 169 + unsigned int irq = d->irq; 170 unsigned long flags; 171 EISA_DBG("enable irq %d\n", irq); 172 ··· 183 } 184 185 static struct irq_chip eisa_interrupt_type = { 186 + .name = "EISA", 187 + .irq_unmask = eisa_unmask_irq, 188 + .irq_mask = eisa_mask_irq, 189 }; 190 191 static irqreturn_t eisa_irq(int wax_irq, void *intr_dev)
+11 -11
drivers/parisc/gsc.c
··· 105 return NO_IRQ; 106 } 107 108 - static void gsc_asic_mask_irq(unsigned int irq) 109 { 110 - struct gsc_asic *irq_dev = get_irq_chip_data(irq); 111 - int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32); 112 u32 imr; 113 114 - DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, irq, 115 irq_dev->name, imr); 116 117 /* Disable the IRQ line by clearing the bit in the IMR */ ··· 120 gsc_writel(imr, irq_dev->hpa + OFFSET_IMR); 121 } 122 123 - static void gsc_asic_unmask_irq(unsigned int irq) 124 { 125 - struct gsc_asic *irq_dev = get_irq_chip_data(irq); 126 - int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32); 127 u32 imr; 128 129 - DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, irq, 130 irq_dev->name, imr); 131 132 /* Enable the IRQ line by setting the bit in the IMR */ ··· 140 } 141 142 static struct irq_chip gsc_asic_interrupt_type = { 143 - .name = "GSC-ASIC", 144 - .unmask = gsc_asic_unmask_irq, 145 - .mask = gsc_asic_mask_irq, 146 }; 147 148 int gsc_assign_irq(struct irq_chip *type, void *data)
··· 105 return NO_IRQ; 106 } 107 108 + static void gsc_asic_mask_irq(struct irq_data *d) 109 { 110 + struct gsc_asic *irq_dev = irq_data_get_irq_chip_data(d); 111 + int local_irq = gsc_find_local_irq(d->irq, irq_dev->global_irq, 32); 112 u32 imr; 113 114 + DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, d->irq, 115 irq_dev->name, imr); 116 117 /* Disable the IRQ line by clearing the bit in the IMR */ ··· 120 gsc_writel(imr, irq_dev->hpa + OFFSET_IMR); 121 } 122 123 + static void gsc_asic_unmask_irq(struct irq_data *d) 124 { 125 + struct gsc_asic *irq_dev = irq_data_get_irq_chip_data(d); 126 + int local_irq = gsc_find_local_irq(d->irq, irq_dev->global_irq, 32); 127 u32 imr; 128 129 + DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, d->irq, 130 irq_dev->name, imr); 131 132 /* Enable the IRQ line by setting the bit in the IMR */ ··· 140 } 141 142 static struct irq_chip gsc_asic_interrupt_type = { 143 + .name = "GSC-ASIC", 144 + .irq_unmask = gsc_asic_unmask_irq, 145 + .irq_mask = gsc_asic_mask_irq, 146 }; 147 148 int gsc_assign_irq(struct irq_chip *type, void *data)
+20 -20
drivers/parisc/iosapic.c
··· 615 } 616 617 618 - static void iosapic_mask_irq(unsigned int irq) 619 { 620 unsigned long flags; 621 - struct vector_info *vi = get_irq_chip_data(irq); 622 u32 d0, d1; 623 624 spin_lock_irqsave(&iosapic_lock, flags); ··· 628 spin_unlock_irqrestore(&iosapic_lock, flags); 629 } 630 631 - static void iosapic_unmask_irq(unsigned int irq) 632 { 633 - struct vector_info *vi = get_irq_chip_data(irq); 634 u32 d0, d1; 635 636 /* data is initialized by fixup_irq */ ··· 666 * enables their IRQ. It can lead to "interesting" race conditions 667 * in the driver initialization sequence. 668 */ 669 - DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", irq, 670 vi->eoi_addr, vi->eoi_data); 671 iosapic_eoi(vi->eoi_addr, vi->eoi_data); 672 } 673 674 - static void iosapic_eoi_irq(unsigned int irq) 675 { 676 - struct vector_info *vi = get_irq_chip_data(irq); 677 678 iosapic_eoi(vi->eoi_addr, vi->eoi_data); 679 - cpu_eoi_irq(irq); 680 } 681 682 #ifdef CONFIG_SMP 683 - static int iosapic_set_affinity_irq(unsigned int irq, 684 - const struct cpumask *dest) 685 { 686 - struct vector_info *vi = get_irq_chip_data(irq); 687 u32 d0, d1, dummy_d0; 688 unsigned long flags; 689 int dest_cpu; 690 691 - dest_cpu = cpu_check_affinity(irq, dest); 692 if (dest_cpu < 0) 693 return -1; 694 695 - cpumask_copy(irq_desc[irq].affinity, cpumask_of(dest_cpu)); 696 - vi->txn_addr = txn_affinity_addr(irq, dest_cpu); 697 698 spin_lock_irqsave(&iosapic_lock, flags); 699 /* d1 contains the destination CPU, so only want to set that ··· 708 #endif 709 710 static struct irq_chip iosapic_interrupt_type = { 711 - .name = "IO-SAPIC-level", 712 - .unmask = iosapic_unmask_irq, 713 - .mask = iosapic_mask_irq, 714 - .ack = cpu_ack_irq, 715 - .eoi = iosapic_eoi_irq, 716 #ifdef CONFIG_SMP 717 - .set_affinity = iosapic_set_affinity_irq, 718 #endif 719 }; 720
··· 615 } 616 617 618 + static void iosapic_mask_irq(struct irq_data *d) 619 { 620 unsigned long flags; 621 + struct vector_info *vi = irq_data_get_irq_chip_data(d); 622 u32 d0, d1; 623 624 spin_lock_irqsave(&iosapic_lock, flags); ··· 628 spin_unlock_irqrestore(&iosapic_lock, flags); 629 } 630 631 + static void iosapic_unmask_irq(struct irq_data *d) 632 { 633 + struct vector_info *vi = irq_data_get_irq_chip_data(d); 634 u32 d0, d1; 635 636 /* data is initialized by fixup_irq */ ··· 666 * enables their IRQ. It can lead to "interesting" race conditions 667 * in the driver initialization sequence. 668 */ 669 + DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", d->irq, 670 vi->eoi_addr, vi->eoi_data); 671 iosapic_eoi(vi->eoi_addr, vi->eoi_data); 672 } 673 674 + static void iosapic_eoi_irq(struct irq_data *d) 675 { 676 + struct vector_info *vi = irq_data_get_irq_chip_data(d); 677 678 iosapic_eoi(vi->eoi_addr, vi->eoi_data); 679 + cpu_eoi_irq(d); 680 } 681 682 #ifdef CONFIG_SMP 683 + static int iosapic_set_affinity_irq(struct irq_data *d, 684 + const struct cpumask *dest, bool force) 685 { 686 + struct vector_info *vi = irq_data_get_irq_chip_data(d); 687 u32 d0, d1, dummy_d0; 688 unsigned long flags; 689 int dest_cpu; 690 691 + dest_cpu = cpu_check_affinity(d, dest); 692 if (dest_cpu < 0) 693 return -1; 694 695 + cpumask_copy(d->affinity, cpumask_of(dest_cpu)); 696 + vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu); 697 698 spin_lock_irqsave(&iosapic_lock, flags); 699 /* d1 contains the destination CPU, so only want to set that ··· 708 #endif 709 710 static struct irq_chip iosapic_interrupt_type = { 711 + .name = "IO-SAPIC-level", 712 + .irq_unmask = iosapic_unmask_irq, 713 + .irq_mask = iosapic_mask_irq, 714 + .irq_ack = cpu_ack_irq, 715 + .irq_eoi = iosapic_eoi_irq, 716 #ifdef CONFIG_SMP 717 + .irq_set_affinity = iosapic_set_affinity_irq, 718 #endif 719 }; 720
+7 -5
drivers/parisc/superio.c
··· 286 } 287 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO, superio_init); 288 289 - static void superio_mask_irq(unsigned int irq) 290 { 291 u8 r8; 292 293 if ((irq < 1) || (irq == 2) || (irq > 7)) { ··· 304 outb (r8,IC_PIC1+1); 305 } 306 307 - static void superio_unmask_irq(unsigned int irq) 308 { 309 u8 r8; 310 311 if ((irq < 1) || (irq == 2) || (irq > 7)) { ··· 322 } 323 324 static struct irq_chip superio_interrupt_type = { 325 - .name = SUPERIO, 326 - .unmask = superio_unmask_irq, 327 - .mask = superio_mask_irq, 328 }; 329 330 #ifdef DEBUG_SUPERIO_INIT
··· 286 } 287 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO, superio_init); 288 289 + static void superio_mask_irq(struct irq_data *d) 290 { 291 + unsigned int irq = d->irq; 292 u8 r8; 293 294 if ((irq < 1) || (irq == 2) || (irq > 7)) { ··· 303 outb (r8,IC_PIC1+1); 304 } 305 306 + static void superio_unmask_irq(struct irq_data *d) 307 { 308 + unsigned int irq = d->irq; 309 u8 r8; 310 311 if ((irq < 1) || (irq == 2) || (irq > 7)) { ··· 320 } 321 322 static struct irq_chip superio_interrupt_type = { 323 + .name = SUPERIO, 324 + .irq_unmask = superio_unmask_irq, 325 + .irq_mask = superio_mask_irq, 326 }; 327 328 #ifdef DEBUG_SUPERIO_INIT