Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6:
[PARISC] Convert to new irq_chip functions
[PARISC] fix per-cpu flag problem in the cpu affinity checkers
[PARISC] fix vmap flush/invalidate
eliminate special FLUSH flag from page table
parisc: flush pages through tmpalias space

+390 -442
+1
arch/parisc/Kconfig
··· 15 15 select HAVE_GENERIC_HARDIRQS 16 16 select GENERIC_IRQ_PROBE 17 17 select IRQ_PER_CPU 18 + select GENERIC_HARDIRQS_NO_DEPRECATED 18 19 19 20 help 20 21 The PA-RISC microprocessor is designed by Hewlett-Packard and used
+21 -10
arch/parisc/include/asm/cacheflush.h
··· 26 26 void flush_kernel_dcache_range_asm(unsigned long, unsigned long); 27 27 void flush_kernel_dcache_page_asm(void *); 28 28 void flush_kernel_icache_page(void *); 29 - void flush_user_dcache_page(unsigned long); 30 - void flush_user_icache_page(unsigned long); 31 29 void flush_user_dcache_range(unsigned long, unsigned long); 32 30 void flush_user_icache_range(unsigned long, unsigned long); 33 31 ··· 34 36 void flush_cache_all_local(void); 35 37 void flush_cache_all(void); 36 38 void flush_cache_mm(struct mm_struct *mm); 39 + 40 + #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 41 + void flush_kernel_dcache_page_addr(void *addr); 42 + static inline void flush_kernel_dcache_page(struct page *page) 43 + { 44 + flush_kernel_dcache_page_addr(page_address(page)); 45 + } 37 46 38 47 #define flush_kernel_dcache_range(start,size) \ 39 48 flush_kernel_dcache_range_asm((start), (start)+(size)); ··· 55 50 } 56 51 static inline void invalidate_kernel_vmap_range(void *vaddr, int size) 57 52 { 53 + unsigned long start = (unsigned long)vaddr; 54 + void *cursor = vaddr; 55 + 56 + for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) { 57 + struct page *page = vmalloc_to_page(cursor); 58 + 59 + if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) 60 + flush_kernel_dcache_page(page); 61 + } 62 + flush_kernel_dcache_range_asm(start, start + size); 58 63 } 59 64 60 65 #define flush_cache_vmap(start, end) flush_cache_all() ··· 105 90 void flush_cache_range(struct vm_area_struct *vma, 106 91 unsigned long start, unsigned long end); 107 92 93 + /* defined in pacache.S exported in cache.c used by flush_anon_page */ 94 + void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); 95 + 108 96 #define ARCH_HAS_FLUSH_ANON_PAGE 109 97 static inline void 110 98 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 111 99 { 112 100 if (PageAnon(page)) 113 - flush_user_dcache_page(vmaddr); 114 - } 115 - 116 - #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 117 - void flush_kernel_dcache_page_addr(void *addr); 118 - static inline void flush_kernel_dcache_page(struct page *page) 119 - { 120 - flush_kernel_dcache_page_addr(page_address(page)); 101 + flush_dcache_page_asm(page_to_phys(page), vmaddr); 121 102 } 122 103 123 104 #ifdef CONFIG_DEBUG_RODATA
+4 -9
arch/parisc/include/asm/irq.h
··· 32 32 } 33 33 34 34 struct irq_chip; 35 + struct irq_data; 35 36 36 - /* 37 - * Some useful "we don't have to do anything here" handlers. Should 38 - * probably be provided by the generic code. 39 - */ 40 - void no_ack_irq(unsigned int irq); 41 - void no_end_irq(unsigned int irq); 42 - void cpu_ack_irq(unsigned int irq); 43 - void cpu_eoi_irq(unsigned int irq); 37 + void cpu_ack_irq(struct irq_data *d); 38 + void cpu_eoi_irq(struct irq_data *d); 44 39 45 40 extern int txn_alloc_irq(unsigned int nbits); 46 41 extern int txn_claim_irq(int); ··· 44 49 extern unsigned long txn_affinity_addr(unsigned int irq, int cpu); 45 50 46 51 extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *); 47 - extern int cpu_check_affinity(unsigned int irq, const struct cpumask *dest); 52 + extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest); 48 53 49 54 /* soft power switch support (power.c) */ 50 55 extern struct tasklet_struct power_tasklet;
+4 -10
arch/parisc/include/asm/pgtable.h
··· 138 138 #define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ 139 139 #define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ 140 140 #define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ 141 - #define _PAGE_FLUSH_BIT 21 /* (0x400) Software: translation valid */ 142 - /* for cache flushing only */ 141 + /* bit 21 was formerly the FLUSH bit but is now unused */ 143 142 #define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ 144 143 145 144 /* N.B. The bits are defined in terms of a 32 bit word above, so the */ ··· 172 173 #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) 173 174 #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) 174 175 #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) 175 - #define _PAGE_FLUSH (1 << xlate_pabit(_PAGE_FLUSH_BIT)) 176 176 #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) 177 177 #define _PAGE_FILE (1 << xlate_pabit(_PAGE_FILE_BIT)) 178 178 ··· 211 213 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) 212 214 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) 213 215 #define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ) 214 - #define PAGE_FLUSH __pgprot(_PAGE_FLUSH) 215 216 216 217 217 218 /* ··· 258 261 259 262 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 260 263 261 - #define pte_none(x) ((pte_val(x) == 0) || (pte_val(x) & _PAGE_FLUSH)) 264 + #define pte_none(x) (pte_val(x) == 0) 262 265 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 263 266 #define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) 264 267 ··· 441 444 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 442 445 { 443 446 pte_t old_pte; 444 - pte_t pte; 445 447 446 448 spin_lock(&pa_dbit_lock); 447 - pte = old_pte = *ptep; 448 - pte_val(pte) &= ~_PAGE_PRESENT; 449 - pte_val(pte) |= _PAGE_FLUSH; 450 - set_pte_at(mm,addr,ptep,pte); 449 + old_pte = *ptep; 450 + pte_clear(mm,addr,ptep); 451 451 spin_unlock(&pa_dbit_lock); 452 452 453 453 return old_pte;
+17 -92
arch/parisc/kernel/cache.c
··· 27 27 #include <asm/pgalloc.h> 28 28 #include <asm/processor.h> 29 29 #include <asm/sections.h> 30 + #include <asm/shmparam.h> 30 31 31 32 int split_tlb __read_mostly; 32 33 int dcache_stride __read_mostly; 33 34 int icache_stride __read_mostly; 34 35 EXPORT_SYMBOL(dcache_stride); 36 + 37 + void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); 38 + EXPORT_SYMBOL(flush_dcache_page_asm); 39 + void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); 35 40 36 41 37 42 /* On some machines (e.g. ones with the Merced bus), there can be ··· 264 259 panic("SpaceID hashing is still on!\n"); 265 260 } 266 261 267 - /* Simple function to work out if we have an existing address translation 268 - * for a user space vma. */ 269 - static inline int translation_exists(struct vm_area_struct *vma, 270 - unsigned long addr, unsigned long pfn) 271 - { 272 - pgd_t *pgd = pgd_offset(vma->vm_mm, addr); 273 - pmd_t *pmd; 274 - pte_t pte; 275 - 276 - if(pgd_none(*pgd)) 277 - return 0; 278 - 279 - pmd = pmd_offset(pgd, addr); 280 - if(pmd_none(*pmd) || pmd_bad(*pmd)) 281 - return 0; 282 - 283 - /* We cannot take the pte lock here: flush_cache_page is usually 284 - * called with pte lock already held. Whereas flush_dcache_page 285 - * takes flush_dcache_mmap_lock, which is lower in the hierarchy: 286 - * the vma itself is secure, but the pte might come or go racily. 287 - */ 288 - pte = *pte_offset_map(pmd, addr); 289 - /* But pte_unmap() does nothing on this architecture */ 290 - 291 - /* Filter out coincidental file entries and swap entries */ 292 - if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT))) 293 - return 0; 294 - 295 - return pte_pfn(pte) == pfn; 296 - } 297 - 298 - /* Private function to flush a page from the cache of a non-current 299 - * process. cr25 contains the Page Directory of the current user 300 - * process; we're going to hijack both it and the user space %sr3 to 301 - * temporarily make the non-current process current. We have to do 302 - * this because cache flushing may cause a non-access tlb miss which 303 - * the handlers have to fill in from the pgd of the non-current 304 - * process. */ 305 262 static inline void 306 - flush_user_cache_page_non_current(struct vm_area_struct *vma, 307 - unsigned long vmaddr) 263 + __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, 264 + unsigned long physaddr) 308 265 { 309 - /* save the current process space and pgd */ 310 - unsigned long space = mfsp(3), pgd = mfctl(25); 311 - 312 - /* we don't mind taking interrupts since they may not 313 - * do anything with user space, but we can't 314 - * be preempted here */ 315 - preempt_disable(); 316 - 317 - /* make us current */ 318 - mtctl(__pa(vma->vm_mm->pgd), 25); 319 - mtsp(vma->vm_mm->context, 3); 320 - 321 - flush_user_dcache_page(vmaddr); 322 - if(vma->vm_flags & VM_EXEC) 323 - flush_user_icache_page(vmaddr); 324 - 325 - /* put the old current process back */ 326 - mtsp(space, 3); 327 - mtctl(pgd, 25); 328 - preempt_enable(); 329 - } 330 - 331 - 332 - static inline void 333 - __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr) 334 - { 335 - if (likely(vma->vm_mm->context == mfsp(3))) { 336 - flush_user_dcache_page(vmaddr); 337 - if (vma->vm_flags & VM_EXEC) 338 - flush_user_icache_page(vmaddr); 339 - } else { 340 - flush_user_cache_page_non_current(vma, vmaddr); 341 - } 266 + flush_dcache_page_asm(physaddr, vmaddr); 267 + if (vma->vm_flags & VM_EXEC) 268 + flush_icache_page_asm(physaddr, vmaddr); 342 269 } 343 270 344 271 void flush_dcache_page(struct page *page) ··· 279 342 struct vm_area_struct *mpnt; 280 343 struct prio_tree_iter iter; 281 344 unsigned long offset; 282 - unsigned long addr; 345 + unsigned long addr, old_addr = 0; 283 346 pgoff_t pgoff; 284 - unsigned long pfn = page_to_pfn(page); 285 - 286 347 287 348 if (mapping && !mapping_mapped(mapping)) { 288 349 set_bit(PG_dcache_dirty, &page->flags); ··· 304 369 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 305 370 addr = mpnt->vm_start + offset; 306 371 307 - /* Flush instructions produce non access tlb misses. 308 - * On PA, we nullify these instructions rather than 309 - * taking a page fault if the pte doesn't exist. 310 - * This is just for speed. If the page translation 311 - * isn't there, there's no point exciting the 312 - * nadtlb handler into a nullification frenzy. 313 - * 314 - * Make sure we really have this page: the private 315 - * mappings may cover this area but have COW'd this 316 - * particular page. 317 - */ 318 - if (translation_exists(mpnt, addr, pfn)) { 319 - __flush_cache_page(mpnt, addr); 320 - break; 372 + if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) { 373 + __flush_cache_page(mpnt, addr, page_to_phys(page)); 374 + if (old_addr) 375 + printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? mpnt->vm_file->f_path.dentry->d_name.name : "(null)"); 376 + old_addr = addr; 321 377 } 322 378 } 323 379 flush_dcache_mmap_unlock(mapping); ··· 499 573 { 500 574 BUG_ON(!vma->vm_mm->context); 501 575 502 - if (likely(translation_exists(vma, vmaddr, pfn))) 503 - __flush_cache_page(vma, vmaddr); 576 + __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); 504 577 505 578 }
+126 -91
arch/parisc/kernel/entry.S
··· 225 225 #ifndef CONFIG_64BIT 226 226 /* 227 227 * naitlb miss interruption handler (parisc 1.1 - 32 bit) 228 - * 229 - * Note: naitlb misses will be treated 230 - * as an ordinary itlb miss for now. 231 - * However, note that naitlb misses 232 - * have the faulting address in the 233 - * IOR/ISR. 234 228 */ 235 229 236 230 .macro naitlb_11 code 237 231 238 232 mfctl %isr,spc 239 - b itlb_miss_11 233 + b naitlb_miss_11 240 234 mfctl %ior,va 241 - /* FIXME: If user causes a naitlb miss, the priv level may not be in 242 - * lower bits of va, where the itlb miss handler is expecting them 243 - */ 244 235 245 236 .align 32 246 237 .endm ··· 239 248 240 249 /* 241 250 * naitlb miss interruption handler (parisc 2.0) 242 - * 243 - * Note: naitlb misses will be treated 244 - * as an ordinary itlb miss for now. 245 - * However, note that naitlb misses 246 - * have the faulting address in the 247 - * IOR/ISR. 248 251 */ 249 252 250 253 .macro naitlb_20 code 251 254 252 255 mfctl %isr,spc 253 256 #ifdef CONFIG_64BIT 254 - b itlb_miss_20w 257 + b naitlb_miss_20w 255 258 #else 256 - b itlb_miss_20 259 + b naitlb_miss_20 257 260 #endif 258 261 mfctl %ior,va 259 - /* FIXME: If user causes a naitlb miss, the priv level may not be in 260 - * lower bits of va, where the itlb miss handler is expecting them 261 - */ 262 262 263 263 .align 32 264 264 .endm ··· 563 581 copy \va,\tmp1 564 582 depi 0,31,23,\tmp1 565 583 cmpb,COND(<>),n \tmp,\tmp1,\fault 566 - ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot 584 + mfctl %cr19,\tmp /* iir */ 585 + /* get the opcode (first six bits) into \tmp */ 586 + extrw,u \tmp,5,6,\tmp 587 + /* 588 + * Only setting the T bit prevents data cache movein 589 + * Setting access rights to zero prevents instruction cache movein 590 + * 591 + * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go 592 + * to type field and _PAGE_READ goes to top bit of PL1 593 + */ 594 + ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot 595 + /* 596 + * so if the opcode is one (i.e. this is a memory management 597 + * instruction) nullify the next load so \prot is only T. 598 + * Otherwise this is a normal data operation 599 + */ 600 + cmpiclr,= 0x01,\tmp,%r0 601 + ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot 567 602 depd,z \prot,8,7,\prot 568 603 /* 569 604 * OK, it is in the temp alias region, check whether "from" or "to". ··· 630 631 def 13 631 632 def 14 632 633 dtlb_20 15 633 - #if 0 634 634 naitlb_20 16 635 - #else 636 - def 16 637 - #endif 638 635 nadtlb_20 17 639 636 def 18 640 637 def 19 ··· 673 678 def 13 674 679 def 14 675 680 dtlb_11 15 676 - #if 0 677 681 naitlb_11 16 678 - #else 679 - def 16 680 - #endif 681 682 nadtlb_11 17 682 683 def 18 683 684 def 19 ··· 1194 1203 get_pgd spc,ptp 1195 1204 space_check spc,t0,nadtlb_fault 1196 1205 1197 - L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w 1206 + L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w 1198 1207 1199 1208 update_ptep ptp,pte,t0,t1 1200 1209 ··· 1205 1214 rfir 1206 1215 nop 1207 1216 1208 - nadtlb_check_flush_20w: 1209 - bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1210 - 1211 - /* Insert a "flush only" translation */ 1212 - 1213 - depdi,z 7,7,3,prot 1214 - depdi 1,10,1,prot 1215 - 1216 - /* Drop prot bits from pte and convert to page addr for idtlbt */ 1217 - convert_for_tlb_insert20 pte 1217 + nadtlb_check_alias_20w: 1218 + do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate 1218 1219 1219 1220 idtlbt pte,prot 1220 1221 ··· 1238 1255 nop 1239 1256 1240 1257 dtlb_check_alias_11: 1241 - 1242 - /* Check to see if fault is in the temporary alias region */ 1243 - 1244 - cmpib,<>,n 0,spc,dtlb_fault /* forward */ 1245 - ldil L%(TMPALIAS_MAP_START),t0 1246 - copy va,t1 1247 - depwi 0,31,23,t1 1248 - cmpb,<>,n t0,t1,dtlb_fault /* forward */ 1249 - ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot 1250 - depw,z prot,8,7,prot 1251 - 1252 - /* 1253 - * OK, it is in the temp alias region, check whether "from" or "to". 1254 - * Check "subtle" note in pacache.S re: r23/r26. 1255 - */ 1256 - 1257 - extrw,u,= va,9,1,r0 1258 - or,tr %r23,%r0,pte /* If "from" use "from" page */ 1259 - or %r26,%r0,pte /* else "to", use "to" page */ 1258 + do_alias spc,t0,t1,va,pte,prot,dtlb_fault 1260 1259 1261 1260 idtlba pte,(va) 1262 1261 idtlbp prot,(va) ··· 1251 1286 1252 1287 space_check spc,t0,nadtlb_fault 1253 1288 1254 - L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11 1289 + L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 1255 1290 1256 1291 update_ptep ptp,pte,t0,t1 1257 1292 ··· 1269 1304 rfir 1270 1305 nop 1271 1306 1272 - nadtlb_check_flush_11: 1273 - bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1307 + nadtlb_check_alias_11: 1308 + do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate 1274 1309 1275 - /* Insert a "flush only" translation */ 1276 - 1277 - zdepi 7,7,3,prot 1278 - depi 1,10,1,prot 1279 - 1280 - /* Get rid of prot bits and convert to page addr for idtlba */ 1281 - 1282 - depi 0,31,ASM_PFN_PTE_SHIFT,pte 1283 - SHRREG pte,(ASM_PFN_PTE_SHIFT-(31-26)),pte 1284 - 1285 - mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1286 - mtsp spc,%sr1 1287 - 1288 - idtlba pte,(%sr1,va) 1289 - idtlbp prot,(%sr1,va) 1290 - 1291 - mtsp t0, %sr1 /* Restore sr1 */ 1310 + idtlba pte,(va) 1311 + idtlbp prot,(va) 1292 1312 1293 1313 rfir 1294 1314 nop ··· 1309 1359 1310 1360 space_check spc,t0,nadtlb_fault 1311 1361 1312 - L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20 1362 + L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 1313 1363 1314 1364 update_ptep ptp,pte,t0,t1 1315 1365 ··· 1322 1372 rfir 1323 1373 nop 1324 1374 1325 - nadtlb_check_flush_20: 1326 - bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1327 - 1328 - /* Insert a "flush only" translation */ 1329 - 1330 - depdi,z 7,7,3,prot 1331 - depdi 1,10,1,prot 1332 - 1333 - /* Drop prot bits from pte and convert to page addr for idtlbt */ 1334 - convert_for_tlb_insert20 pte 1375 + nadtlb_check_alias_20: 1376 + do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate 1335 1377 1336 1378 idtlbt pte,prot 1337 1379 1338 1380 rfir 1339 1381 nop 1382 + 1340 1383 #endif 1341 1384 1342 1385 nadtlb_emulate: ··· 1427 1484 rfir 1428 1485 nop 1429 1486 1487 + naitlb_miss_20w: 1488 + 1489 + /* 1490 + * I miss is a little different, since we allow users to fault 1491 + * on the gateway page which is in the kernel address space. 1492 + */ 1493 + 1494 + space_adjust spc,va,t0 1495 + get_pgd spc,ptp 1496 + space_check spc,t0,naitlb_fault 1497 + 1498 + L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w 1499 + 1500 + update_ptep ptp,pte,t0,t1 1501 + 1502 + make_insert_tlb spc,pte,prot 1503 + 1504 + iitlbt pte,prot 1505 + 1506 + rfir 1507 + nop 1508 + 1509 + naitlb_check_alias_20w: 1510 + do_alias spc,t0,t1,va,pte,prot,naitlb_fault 1511 + 1512 + iitlbt pte,prot 1513 + 1514 + rfir 1515 + nop 1516 + 1430 1517 #else 1431 1518 1432 1519 itlb_miss_11: ··· 1481 1508 rfir 1482 1509 nop 1483 1510 1511 + naitlb_miss_11: 1512 + get_pgd spc,ptp 1513 + 1514 + space_check spc,t0,naitlb_fault 1515 + 1516 + L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 1517 + 1518 + update_ptep ptp,pte,t0,t1 1519 + 1520 + make_insert_tlb_11 spc,pte,prot 1521 + 1522 + mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1523 + mtsp spc,%sr1 1524 + 1525 + iitlba pte,(%sr1,va) 1526 + iitlbp prot,(%sr1,va) 1527 + 1528 + mtsp t0, %sr1 /* Restore sr1 */ 1529 + 1530 + rfir 1531 + nop 1532 + 1533 + naitlb_check_alias_11: 1534 + do_alias spc,t0,t1,va,pte,prot,itlb_fault 1535 + 1536 + iitlba pte,(%sr0, va) 1537 + iitlbp prot,(%sr0, va) 1538 + 1539 + rfir 1540 + nop 1541 + 1542 + 1484 1543 itlb_miss_20: 1485 1544 get_pgd spc,ptp 1486 1545 ··· 1525 1520 make_insert_tlb spc,pte,prot 1526 1521 1527 1522 f_extend pte,t0 1523 + 1524 + iitlbt pte,prot 1525 + 1526 + rfir 1527 + nop 1528 + 1529 + naitlb_miss_20: 1530 + get_pgd spc,ptp 1531 + 1532 + space_check spc,t0,naitlb_fault 1533 + 1534 + L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 1535 + 1536 + update_ptep ptp,pte,t0,t1 1537 + 1538 + make_insert_tlb spc,pte,prot 1539 + 1540 + f_extend pte,t0 1541 + 1542 + iitlbt pte,prot 1543 + 1544 + rfir 1545 + nop 1546 + 1547 + naitlb_check_alias_20: 1548 + do_alias spc,t0,t1,va,pte,prot,naitlb_fault 1528 1549 1529 1550 iitlbt pte,prot 1530 1551 ··· 1692 1661 nadtlb_fault: 1693 1662 b intr_save 1694 1663 ldi 17,%r8 1664 + 1665 + naitlb_fault: 1666 + b intr_save 1667 + ldi 16,%r8 1695 1668 1696 1669 dtlb_fault: 1697 1670 b intr_save
+37 -27
arch/parisc/kernel/irq.c
··· 52 52 */ 53 53 static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL; 54 54 55 - static void cpu_mask_irq(unsigned int irq) 55 + static void cpu_mask_irq(struct irq_data *d) 56 56 { 57 - unsigned long eirr_bit = EIEM_MASK(irq); 57 + unsigned long eirr_bit = EIEM_MASK(d->irq); 58 58 59 59 cpu_eiem &= ~eirr_bit; 60 60 /* Do nothing on the other CPUs. If they get this interrupt, ··· 63 63 * then gets disabled */ 64 64 } 65 65 66 - static void cpu_unmask_irq(unsigned int irq) 66 + static void __cpu_unmask_irq(unsigned int irq) 67 67 { 68 68 unsigned long eirr_bit = EIEM_MASK(irq); 69 69 ··· 75 75 smp_send_all_nop(); 76 76 } 77 77 78 - void cpu_ack_irq(unsigned int irq) 78 + static void cpu_unmask_irq(struct irq_data *d) 79 79 { 80 - unsigned long mask = EIEM_MASK(irq); 80 + __cpu_unmask_irq(d->irq); 81 + } 82 + 83 + void cpu_ack_irq(struct irq_data *d) 84 + { 85 + unsigned long mask = EIEM_MASK(d->irq); 81 86 int cpu = smp_processor_id(); 82 87 83 88 /* Clear in EIEM so we can no longer process */ ··· 95 90 mtctl(mask, 23); 96 91 } 97 92 98 - void cpu_eoi_irq(unsigned int irq) 93 + void cpu_eoi_irq(struct irq_data *d) 99 94 { 100 - unsigned long mask = EIEM_MASK(irq); 95 + unsigned long mask = EIEM_MASK(d->irq); 101 96 int cpu = smp_processor_id(); 102 97 103 98 /* set it in the eiems---it's no longer in process */ ··· 108 103 } 109 104 110 105 #ifdef CONFIG_SMP 111 - int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) 106 + int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest) 112 107 { 113 108 int cpu_dest; 114 109 115 110 /* timer and ipi have to always be received on all CPUs */ 116 - if (CHECK_IRQ_PER_CPU(irq)) { 111 + if (CHECK_IRQ_PER_CPU(irq_to_desc(d->irq)->status)) { 117 112 /* Bad linux design decision. The mask has already 118 - * been set; we must reset it */ 119 - cpumask_setall(irq_desc[irq].affinity); 113 + * been set; we must reset it. Will fix - tglx 114 + */ 115 + cpumask_setall(d->affinity); 120 116 return -EINVAL; 121 117 } 122 118 ··· 127 121 return cpu_dest; 128 122 } 129 123 130 - static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) 124 + static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, 125 + bool force) 131 126 { 132 127 int cpu_dest; 133 128 134 - cpu_dest = cpu_check_affinity(irq, dest); 129 + cpu_dest = cpu_check_affinity(d, dest); 135 130 if (cpu_dest < 0) 136 131 return -1; 137 132 138 - cpumask_copy(irq_desc[irq].affinity, dest); 133 + cpumask_copy(d->affinity, dest); 139 134 140 135 return 0; 141 136 } 142 137 #endif 143 138 144 139 static struct irq_chip cpu_interrupt_type = { 145 - .name = "CPU", 146 - .mask = cpu_mask_irq, 147 - .unmask = cpu_unmask_irq, 148 - .ack = cpu_ack_irq, 149 - .eoi = cpu_eoi_irq, 140 + .name = "CPU", 141 + .irq_mask = cpu_mask_irq, 142 + .irq_unmask = cpu_unmask_irq, 143 + .irq_ack = cpu_ack_irq, 144 + .irq_eoi = cpu_eoi_irq, 150 145 #ifdef CONFIG_SMP 151 - .set_affinity = cpu_set_affinity_irq, 146 + .irq_set_affinity = cpu_set_affinity_irq, 152 147 #endif 153 148 /* XXX: Needs to be written. We managed without it so far, but 154 149 * we really ought to write it. 155 150 */ 156 - .retrigger = NULL, 151 + .irq_retrigger = NULL, 157 152 }; 158 153 159 154 int show_interrupts(struct seq_file *p, void *v) ··· 188 181 seq_printf(p, "%10u ", kstat_irqs(i)); 189 182 #endif 190 183 191 - seq_printf(p, " %14s", irq_desc[i].chip->name); 184 + seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name); 192 185 #ifndef PARISC_IRQ_CR16_COUNTS 193 186 seq_printf(p, " %s", action->name); 194 187 ··· 240 233 { 241 234 if (irq_desc[irq].action) 242 235 return -EBUSY; 243 - if (irq_desc[irq].chip != &cpu_interrupt_type) 236 + if (get_irq_chip(irq) != &cpu_interrupt_type) 244 237 return -EBUSY; 245 238 246 239 /* for iosapic interrupts */ 247 240 if (type) { 248 241 set_irq_chip_and_handler(irq, type, handle_percpu_irq); 249 242 set_irq_chip_data(irq, data); 250 - cpu_unmask_irq(irq); 243 + __cpu_unmask_irq(irq); 251 244 } 252 245 return 0; 253 246 } ··· 296 289 unsigned long txn_affinity_addr(unsigned int irq, int cpu) 297 290 { 298 291 #ifdef CONFIG_SMP 299 - cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); 292 + struct irq_data *d = irq_get_irq_data(irq); 293 + cpumask_copy(d->affinity, cpumask_of(cpu)); 300 294 #endif 301 295 302 296 return per_cpu(cpu_data, cpu).txn_addr; ··· 341 333 unsigned long eirr_val; 342 334 int irq, cpu = smp_processor_id(); 343 335 #ifdef CONFIG_SMP 336 + struct irq_desc *desc; 344 337 cpumask_t dest; 345 338 #endif 346 339 ··· 355 346 irq = eirr_to_irq(eirr_val); 356 347 357 348 #ifdef CONFIG_SMP 358 - cpumask_copy(&dest, irq_desc[irq].affinity); 359 - if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && 349 + desc = irq_to_desc(irq); 350 + cpumask_copy(&dest, desc->irq_data.affinity); 351 + if (CHECK_IRQ_PER_CPU(desc->status) && 360 352 !cpu_isset(smp_processor_id(), dest)) { 361 353 int cpu = first_cpu(dest); 362 354
+124 -151
arch/parisc/kernel/pacache.S
··· 608 608 .procend 609 609 ENDPROC(__clear_user_page_asm) 610 610 611 + ENTRY(flush_dcache_page_asm) 612 + .proc 613 + .callinfo NO_CALLS 614 + .entry 615 + 616 + ldil L%(TMPALIAS_MAP_START), %r28 617 + #ifdef CONFIG_64BIT 618 + #if (TMPALIAS_MAP_START >= 0x80000000) 619 + depdi 0, 31,32, %r28 /* clear any sign extension */ 620 + /* FIXME: page size dependend */ 621 + #endif 622 + extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ 623 + depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ 624 + depdi 0, 63,12, %r28 /* Clear any offset bits */ 625 + #else 626 + extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 627 + depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 628 + depwi 0, 31,12, %r28 /* Clear any offset bits */ 629 + #endif 630 + 631 + /* Purge any old translation */ 632 + 633 + pdtlb 0(%r28) 634 + 635 + ldil L%dcache_stride, %r1 636 + ldw R%dcache_stride(%r1), %r1 637 + 638 + #ifdef CONFIG_64BIT 639 + depdi,z 1, 63-PAGE_SHIFT,1, %r25 640 + #else 641 + depwi,z 1, 31-PAGE_SHIFT,1, %r25 642 + #endif 643 + add %r28, %r25, %r25 644 + sub %r25, %r1, %r25 645 + 646 + 647 + 1: fdc,m %r1(%r28) 648 + fdc,m %r1(%r28) 649 + fdc,m %r1(%r28) 650 + fdc,m %r1(%r28) 651 + fdc,m %r1(%r28) 652 + fdc,m %r1(%r28) 653 + fdc,m %r1(%r28) 654 + fdc,m %r1(%r28) 655 + fdc,m %r1(%r28) 656 + fdc,m %r1(%r28) 657 + fdc,m %r1(%r28) 658 + fdc,m %r1(%r28) 659 + fdc,m %r1(%r28) 660 + fdc,m %r1(%r28) 661 + fdc,m %r1(%r28) 662 + cmpb,COND(<<) %r28, %r25,1b 663 + fdc,m %r1(%r28) 664 + 665 + sync 666 + bv %r0(%r2) 667 + pdtlb (%r25) 668 + .exit 669 + 670 + .procend 671 + ENDPROC(flush_dcache_page_asm) 672 + 673 + ENTRY(flush_icache_page_asm) 674 + .proc 675 + .callinfo NO_CALLS 676 + .entry 677 + 678 + ldil L%(TMPALIAS_MAP_START), %r28 679 + #ifdef CONFIG_64BIT 680 + #if (TMPALIAS_MAP_START >= 0x80000000) 681 + depdi 0, 31,32, %r28 /* clear any sign extension */ 682 + /* FIXME: page size dependend */ 683 + #endif 684 + extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ 685 + depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ 686 + depdi 0, 63,12, %r28 /* Clear any offset bits */ 687 + #else 688 + extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 689 + depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 690 + depwi 0, 31,12, %r28 /* Clear any offset bits */ 691 + #endif 692 + 693 + /* Purge any old translation */ 694 + 695 + pitlb (%sr0,%r28) 696 + 697 + ldil L%icache_stride, %r1 698 + ldw R%icache_stride(%r1), %r1 699 + 700 + #ifdef CONFIG_64BIT 701 + depdi,z 1, 63-PAGE_SHIFT,1, %r25 702 + #else 703 + depwi,z 1, 31-PAGE_SHIFT,1, %r25 704 + #endif 705 + add %r28, %r25, %r25 706 + sub %r25, %r1, %r25 707 + 708 + 709 + 1: fic,m %r1(%r28) 710 + fic,m %r1(%r28) 711 + fic,m %r1(%r28) 712 + fic,m %r1(%r28) 713 + fic,m %r1(%r28) 714 + fic,m %r1(%r28) 715 + fic,m %r1(%r28) 716 + fic,m %r1(%r28) 717 + fic,m %r1(%r28) 718 + fic,m %r1(%r28) 719 + fic,m %r1(%r28) 720 + fic,m %r1(%r28) 721 + fic,m %r1(%r28) 722 + fic,m %r1(%r28) 723 + fic,m %r1(%r28) 724 + cmpb,COND(<<) %r28, %r25,1b 725 + fic,m %r1(%r28) 726 + 727 + sync 728 + bv %r0(%r2) 729 + pitlb (%sr0,%r25) 730 + .exit 731 + 732 + .procend 733 + ENDPROC(flush_icache_page_asm) 734 + 611 735 ENTRY(flush_kernel_dcache_page_asm) 612 736 .proc 613 737 .callinfo NO_CALLS ··· 774 650 775 651 .procend 776 652 ENDPROC(flush_kernel_dcache_page_asm) 777 - 778 - ENTRY(flush_user_dcache_page) 779 - .proc 780 - .callinfo NO_CALLS 781 - .entry 782 - 783 - ldil L%dcache_stride, %r1 784 - ldw R%dcache_stride(%r1), %r23 785 - 786 - #ifdef CONFIG_64BIT 787 - depdi,z 1,63-PAGE_SHIFT,1, %r25 788 - #else 789 - depwi,z 1,31-PAGE_SHIFT,1, %r25 790 - #endif 791 - add %r26, %r25, %r25 792 - sub %r25, %r23, %r25 793 - 794 - 795 - 1: fdc,m %r23(%sr3, %r26) 796 - fdc,m %r23(%sr3, %r26) 797 - fdc,m %r23(%sr3, %r26) 798 - fdc,m %r23(%sr3, %r26) 799 - fdc,m %r23(%sr3, %r26) 800 - fdc,m %r23(%sr3, %r26) 801 - fdc,m %r23(%sr3, %r26) 802 - fdc,m %r23(%sr3, %r26) 803 - fdc,m %r23(%sr3, %r26) 804 - fdc,m %r23(%sr3, %r26) 805 - fdc,m %r23(%sr3, %r26) 806 - fdc,m %r23(%sr3, %r26) 807 - fdc,m %r23(%sr3, %r26) 808 - fdc,m %r23(%sr3, %r26) 809 - fdc,m %r23(%sr3, %r26) 810 - cmpb,COND(<<) %r26, %r25,1b 811 - fdc,m %r23(%sr3, %r26) 812 - 813 - sync 814 - bv %r0(%r2) 815 - nop 816 - .exit 817 - 818 - .procend 819 - ENDPROC(flush_user_dcache_page) 820 - 821 - ENTRY(flush_user_icache_page) 822 - .proc 823 - .callinfo NO_CALLS 824 - .entry 825 - 826 - ldil L%dcache_stride, %r1 827 - ldw R%dcache_stride(%r1), %r23 828 - 829 - #ifdef CONFIG_64BIT 830 - depdi,z 1, 63-PAGE_SHIFT,1, %r25 831 - #else 832 - depwi,z 1, 31-PAGE_SHIFT,1, %r25 833 - #endif 834 - add %r26, %r25, %r25 835 - sub %r25, %r23, %r25 836 - 837 - 838 - 1: fic,m %r23(%sr3, %r26) 839 - fic,m %r23(%sr3, %r26) 840 - fic,m %r23(%sr3, %r26) 841 - fic,m %r23(%sr3, %r26) 842 - fic,m %r23(%sr3, %r26) 843 - fic,m %r23(%sr3, %r26) 844 - fic,m %r23(%sr3, %r26) 845 - fic,m %r23(%sr3, %r26) 846 - fic,m %r23(%sr3, %r26) 847 - fic,m %r23(%sr3, %r26) 848 - fic,m %r23(%sr3, %r26) 849 - fic,m %r23(%sr3, %r26) 850 - fic,m %r23(%sr3, %r26) 851 - fic,m %r23(%sr3, %r26) 852 - fic,m %r23(%sr3, %r26) 853 - cmpb,COND(<<) %r26, %r25,1b 854 - fic,m %r23(%sr3, %r26) 855 - 856 - sync 857 - bv %r0(%r2) 858 - nop 859 - .exit 860 - 861 - .procend 862 - ENDPROC(flush_user_icache_page) 863 - 864 653 865 654 ENTRY(purge_kernel_dcache_page) 866 655 .proc ··· 817 780 .procend 818 781 ENDPROC(purge_kernel_dcache_page) 819 782 820 - #if 0 821 - /* Currently not used, but it still is a possible alternate 822 - * solution. 823 - */ 824 - 825 - ENTRY(flush_alias_page) 826 - .proc 827 - .callinfo NO_CALLS 828 - .entry 829 - 830 - tophys_r1 %r26 831 - 832 - ldil L%(TMPALIAS_MAP_START), %r28 833 - #ifdef CONFIG_64BIT 834 - extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ 835 - depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ 836 - depdi 0, 63,12, %r28 /* Clear any offset bits */ 837 - #else 838 - extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 839 - depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 840 - depwi 0, 31,12, %r28 /* Clear any offset bits */ 841 - #endif 842 - 843 - /* Purge any old translation */ 844 - 845 - pdtlb 0(%r28) 846 - 847 - ldil L%dcache_stride, %r1 848 - ldw R%dcache_stride(%r1), %r23 849 - 850 - #ifdef CONFIG_64BIT 851 - depdi,z 1, 63-PAGE_SHIFT,1, %r29 852 - #else 853 - depwi,z 1, 31-PAGE_SHIFT,1, %r29 854 - #endif 855 - add %r28, %r29, %r29 856 - sub %r29, %r23, %r29 857 - 858 - 1: fdc,m %r23(%r28) 859 - fdc,m %r23(%r28) 860 - fdc,m %r23(%r28) 861 - fdc,m %r23(%r28) 862 - fdc,m %r23(%r28) 863 - fdc,m %r23(%r28) 864 - fdc,m %r23(%r28) 865 - fdc,m %r23(%r28) 866 - fdc,m %r23(%r28) 867 - fdc,m %r23(%r28) 868 - fdc,m %r23(%r28) 869 - fdc,m %r23(%r28) 870 - fdc,m %r23(%r28) 871 - fdc,m %r23(%r28) 872 - fdc,m %r23(%r28) 873 - cmpb,COND(<<) %r28, %r29, 1b 874 - fdc,m %r23(%r28) 875 - 876 - sync 877 - bv %r0(%r2) 878 - nop 879 - .exit 880 - 881 - .procend 882 - #endif 883 783 884 784 .export flush_user_dcache_range_asm 885 785 ··· 839 865 .exit 840 866 841 867 .procend 842 - ENDPROC(flush_alias_page) 843 868 844 869 ENTRY(flush_kernel_dcache_range_asm) 845 870 .proc
+11 -11
drivers/parisc/dino.c
··· 296 296 .outl = dino_out32 297 297 }; 298 298 299 - static void dino_mask_irq(unsigned int irq) 299 + static void dino_mask_irq(struct irq_data *d) 300 300 { 301 - struct dino_device *dino_dev = get_irq_chip_data(irq); 302 - int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 301 + struct dino_device *dino_dev = irq_data_get_irq_chip_data(d); 302 + int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 303 303 304 - DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq); 304 + DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq); 305 305 306 306 /* Clear the matching bit in the IMR register */ 307 307 dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq)); 308 308 __raw_writel(dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR); 309 309 } 310 310 311 - static void dino_unmask_irq(unsigned int irq) 311 + static void dino_unmask_irq(struct irq_data *d) 312 312 { 313 - struct dino_device *dino_dev = get_irq_chip_data(irq); 314 - int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 313 + struct dino_device *dino_dev = irq_data_get_irq_chip_data(d); 314 + int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 315 315 u32 tmp; 316 316 317 - DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq); 317 + DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq); 318 318 319 319 /* 320 320 ** clear pending IRQ bits ··· 346 346 } 347 347 348 348 static struct irq_chip dino_interrupt_type = { 349 - .name = "GSC-PCI", 350 - .unmask = dino_unmask_irq, 351 - .mask = dino_mask_irq, 349 + .name = "GSC-PCI", 350 + .irq_unmask = dino_unmask_irq, 351 + .irq_mask = dino_mask_irq, 352 352 }; 353 353 354 354
+7 -5
drivers/parisc/eisa.c
··· 144 144 145 145 146 146 /* called by free irq */ 147 - static void eisa_mask_irq(unsigned int irq) 147 + static void eisa_mask_irq(struct irq_data *d) 148 148 { 149 + unsigned int irq = d->irq; 149 150 unsigned long flags; 150 151 151 152 EISA_DBG("disable irq %d\n", irq); ··· 165 164 } 166 165 167 166 /* called by request irq */ 168 - static void eisa_unmask_irq(unsigned int irq) 167 + static void eisa_unmask_irq(struct irq_data *d) 169 168 { 169 + unsigned int irq = d->irq; 170 170 unsigned long flags; 171 171 EISA_DBG("enable irq %d\n", irq); 172 172 ··· 185 183 } 186 184 187 185 static struct irq_chip eisa_interrupt_type = { 188 - .name = "EISA", 189 - .unmask = eisa_unmask_irq, 190 - .mask = eisa_mask_irq, 186 + .name = "EISA", 187 + .irq_unmask = eisa_unmask_irq, 188 + .irq_mask = eisa_mask_irq, 191 189 }; 192 190 193 191 static irqreturn_t eisa_irq(int wax_irq, void *intr_dev)
+11 -11
drivers/parisc/gsc.c
··· 105 105 return NO_IRQ; 106 106 } 107 107 108 - static void gsc_asic_mask_irq(unsigned int irq) 108 + static void gsc_asic_mask_irq(struct irq_data *d) 109 109 { 110 - struct gsc_asic *irq_dev = get_irq_chip_data(irq); 111 - int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32); 110 + struct gsc_asic *irq_dev = irq_data_get_irq_chip_data(d); 111 + int local_irq = gsc_find_local_irq(d->irq, irq_dev->global_irq, 32); 112 112 u32 imr; 113 113 114 - DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, irq, 114 + DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, d->irq, 115 115 irq_dev->name, imr); 116 116 117 117 /* Disable the IRQ line by clearing the bit in the IMR */ ··· 120 120 gsc_writel(imr, irq_dev->hpa + OFFSET_IMR); 121 121 } 122 122 123 - static void gsc_asic_unmask_irq(unsigned int irq) 123 + static void gsc_asic_unmask_irq(struct irq_data *d) 124 124 { 125 - struct gsc_asic *irq_dev = get_irq_chip_data(irq); 126 - int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32); 125 + struct gsc_asic *irq_dev = irq_data_get_irq_chip_data(d); 126 + int local_irq = gsc_find_local_irq(d->irq, irq_dev->global_irq, 32); 127 127 u32 imr; 128 128 129 - DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, irq, 129 + DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, d->irq, 130 130 irq_dev->name, imr); 131 131 132 132 /* Enable the IRQ line by setting the bit in the IMR */ ··· 140 140 } 141 141 142 142 static struct irq_chip gsc_asic_interrupt_type = { 143 - .name = "GSC-ASIC", 144 - .unmask = gsc_asic_unmask_irq, 145 - .mask = gsc_asic_mask_irq, 143 + .name = "GSC-ASIC", 144 + .irq_unmask = gsc_asic_unmask_irq, 145 + .irq_mask = gsc_asic_mask_irq, 146 146 }; 147 147 148 148 int gsc_assign_irq(struct irq_chip *type, void *data)
+20 -20
drivers/parisc/iosapic.c
··· 615 615 } 616 616 617 617 618 - static void iosapic_mask_irq(unsigned int irq) 618 + static void iosapic_mask_irq(struct irq_data *d) 619 619 { 620 620 unsigned long flags; 621 - struct vector_info *vi = get_irq_chip_data(irq); 621 + struct vector_info *vi = irq_data_get_irq_chip_data(d); 622 622 u32 d0, d1; 623 623 624 624 spin_lock_irqsave(&iosapic_lock, flags); ··· 628 628 spin_unlock_irqrestore(&iosapic_lock, flags); 629 629 } 630 630 631 - static void iosapic_unmask_irq(unsigned int irq) 631 + static void iosapic_unmask_irq(struct irq_data *d) 632 632 { 633 - struct vector_info *vi = get_irq_chip_data(irq); 633 + struct vector_info *vi = irq_data_get_irq_chip_data(d); 634 634 u32 d0, d1; 635 635 636 636 /* data is initialized by fixup_irq */ ··· 666 666 * enables their IRQ. It can lead to "interesting" race conditions 667 667 * in the driver initialization sequence. 668 668 */ 669 - DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", irq, 669 + DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", d->irq, 670 670 vi->eoi_addr, vi->eoi_data); 671 671 iosapic_eoi(vi->eoi_addr, vi->eoi_data); 672 672 } 673 673 674 - static void iosapic_eoi_irq(unsigned int irq) 674 + static void iosapic_eoi_irq(struct irq_data *d) 675 675 { 676 - struct vector_info *vi = get_irq_chip_data(irq); 676 + struct vector_info *vi = irq_data_get_irq_chip_data(d); 677 677 678 678 iosapic_eoi(vi->eoi_addr, vi->eoi_data); 679 - cpu_eoi_irq(irq); 679 + cpu_eoi_irq(d); 680 680 } 681 681 682 682 #ifdef CONFIG_SMP 683 - static int iosapic_set_affinity_irq(unsigned int irq, 684 - const struct cpumask *dest) 683 + static int iosapic_set_affinity_irq(struct irq_data *d, 684 + const struct cpumask *dest, bool force) 685 685 { 686 - struct vector_info *vi = get_irq_chip_data(irq); 686 + struct vector_info *vi = irq_data_get_irq_chip_data(d); 687 687 u32 d0, d1, dummy_d0; 688 688 unsigned long flags; 689 689 int dest_cpu; 690 690 691 - dest_cpu = cpu_check_affinity(irq, dest); 691 + dest_cpu = cpu_check_affinity(d, dest); 692 692 if (dest_cpu < 0) 693 693 return -1; 694 694 695 - cpumask_copy(irq_desc[irq].affinity, cpumask_of(dest_cpu)); 696 - vi->txn_addr = txn_affinity_addr(irq, dest_cpu); 695 + cpumask_copy(d->affinity, cpumask_of(dest_cpu)); 696 + vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu); 697 697 698 698 spin_lock_irqsave(&iosapic_lock, flags); 699 699 /* d1 contains the destination CPU, so only want to set that ··· 708 708 #endif 709 709 710 710 static struct irq_chip iosapic_interrupt_type = { 711 - .name = "IO-SAPIC-level", 712 - .unmask = iosapic_unmask_irq, 713 - .mask = iosapic_mask_irq, 714 - .ack = cpu_ack_irq, 715 - .eoi = iosapic_eoi_irq, 711 + .name = "IO-SAPIC-level", 712 + .irq_unmask = iosapic_unmask_irq, 713 + .irq_mask = iosapic_mask_irq, 714 + .irq_ack = cpu_ack_irq, 715 + .irq_eoi = iosapic_eoi_irq, 716 716 #ifdef CONFIG_SMP 717 - .set_affinity = iosapic_set_affinity_irq, 717 + .irq_set_affinity = iosapic_set_affinity_irq, 718 718 #endif 719 719 }; 720 720
+7 -5
drivers/parisc/superio.c
··· 286 286 } 287 287 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO, superio_init); 288 288 289 - static void superio_mask_irq(unsigned int irq) 289 + static void superio_mask_irq(struct irq_data *d) 290 290 { 291 + unsigned int irq = d->irq; 291 292 u8 r8; 292 293 293 294 if ((irq < 1) || (irq == 2) || (irq > 7)) { ··· 304 303 outb (r8,IC_PIC1+1); 305 304 } 306 305 307 - static void superio_unmask_irq(unsigned int irq) 306 + static void superio_unmask_irq(struct irq_data *d) 308 307 { 308 + unsigned int irq = d->irq; 309 309 u8 r8; 310 310 311 311 if ((irq < 1) || (irq == 2) || (irq > 7)) { ··· 322 320 } 323 321 324 322 static struct irq_chip superio_interrupt_type = { 325 - .name = SUPERIO, 326 - .unmask = superio_unmask_irq, 327 - .mask = superio_mask_irq, 323 + .name = SUPERIO, 324 + .irq_unmask = superio_unmask_irq, 325 + .irq_mask = superio_mask_irq, 328 326 }; 329 327 330 328 #ifdef DEBUG_SUPERIO_INIT