Merge master.kernel.org:/pub/scm/linux/kernel/git/kyle/parisc-2.6

+373 -228
+10 -10
arch/parisc/kernel/cache.c
··· 29 29 #include <asm/processor.h> 30 30 #include <asm/sections.h> 31 31 32 - int split_tlb; 33 - int dcache_stride; 34 - int icache_stride; 32 + int split_tlb __read_mostly; 33 + int dcache_stride __read_mostly; 34 + int icache_stride __read_mostly; 35 35 EXPORT_SYMBOL(dcache_stride); 36 36 37 37 ··· 45 45 EXPORT_SYMBOL(pa_tlb_lock); 46 46 #endif 47 47 48 - struct pdc_cache_info cache_info; 48 + struct pdc_cache_info cache_info __read_mostly; 49 49 #ifndef CONFIG_PA20 50 - static struct pdc_btlb_info btlb_info; 50 + static struct pdc_btlb_info btlb_info __read_mostly; 51 51 #endif 52 52 53 53 #ifdef CONFIG_SMP 54 54 void 55 55 flush_data_cache(void) 56 56 { 57 - on_each_cpu((void (*)(void *))flush_data_cache_local, NULL, 1, 1); 57 + on_each_cpu(flush_data_cache_local, NULL, 1, 1); 58 58 } 59 59 void 60 60 flush_instruction_cache(void) 61 61 { 62 - on_each_cpu((void (*)(void *))flush_instruction_cache_local, NULL, 1, 1); 62 + on_each_cpu(flush_instruction_cache_local, NULL, 1, 1); 63 63 } 64 64 #endif 65 65 66 66 void 67 67 flush_cache_all_local(void) 68 68 { 69 - flush_instruction_cache_local(); 70 - flush_data_cache_local(); 69 + flush_instruction_cache_local(NULL); 70 + flush_data_cache_local(NULL); 71 71 } 72 72 EXPORT_SYMBOL(flush_cache_all_local); 73 73 ··· 332 332 } 333 333 334 334 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ 335 - int parisc_cache_flush_threshold = FLUSH_THRESHOLD; 335 + int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; 336 336 337 337 void parisc_setup_cache_timing(void) 338 338 {
+9 -4
arch/parisc/kernel/drivers.c
··· 39 39 #include <asm/parisc-device.h> 40 40 41 41 /* See comments in include/asm-parisc/pci.h */ 42 - struct hppa_dma_ops *hppa_dma_ops; 42 + struct hppa_dma_ops *hppa_dma_ops __read_mostly; 43 43 EXPORT_SYMBOL(hppa_dma_ops); 44 44 45 45 static struct device root = { ··· 515 515 (iodc_data[5] << 8) | iodc_data[6]; 516 516 dev->hpa.name = parisc_pathname(dev); 517 517 dev->hpa.start = hpa; 518 - if (hpa == 0xf4000000 || hpa == 0xf6000000 || 519 - hpa == 0xf8000000 || hpa == 0xfa000000) { 518 + /* This is awkward. The STI spec says that gfx devices may occupy 519 + * 32MB or 64MB. Unfortunately, we don't know how to tell whether 520 + * it's the former or the latter. Assumptions either way can hurt us. 521 + */ 522 + if (hpa == 0xf4000000 || hpa == 0xf8000000) { 523 + dev->hpa.end = hpa + 0x03ffffff; 524 + } else if (hpa == 0xf6000000 || hpa == 0xfa000000) { 520 525 dev->hpa.end = hpa + 0x01ffffff; 521 526 } else { 522 527 dev->hpa.end = hpa + 0xfff; ··· 839 834 840 835 if (dev->num_addrs) { 841 836 int k; 842 - printk(", additional addresses: "); 837 + printk(", additional addresses: "); 843 838 for (k = 0; k < dev->num_addrs; k++) 844 839 printk("0x%lx ", dev->addr[k]); 845 840 }
+1 -1
arch/parisc/kernel/firmware.c
··· 80 80 81 81 /* Firmware needs to be initially set to narrow to determine the 82 82 * actual firmware width. */ 83 - int parisc_narrow_firmware = 1; 83 + int parisc_narrow_firmware __read_mostly = 1; 84 84 #endif 85 85 86 86 /* On most currently-supported platforms, IODC I/O calls are 32-bit calls
+1
arch/parisc/kernel/hardware.c
··· 551 551 {HPHW_BCPORT, 0x804, 0x0000C, 0x10, "REO I/O BC Merced Port"}, 552 552 {HPHW_BCPORT, 0x782, 0x0000C, 0x00, "REO I/O BC Ropes Port"}, 553 553 {HPHW_BCPORT, 0x784, 0x0000C, 0x00, "Pluto I/O BC Ropes Port"}, 554 + {HPHW_BRIDGE, 0x05D, 0x0000A, 0x00, "SummitHawk Dino PCI Bridge"}, 554 555 {HPHW_BRIDGE, 0x680, 0x0000A, 0x00, "Dino PCI Bridge"}, 555 556 {HPHW_BRIDGE, 0x682, 0x0000A, 0x00, "Cujo PCI Bridge"}, 556 557 {HPHW_BRIDGE, 0x782, 0x0000A, 0x00, "Elroy PCI Bridge"},
+3 -3
arch/parisc/kernel/inventory.c
··· 38 38 */ 39 39 #undef DEBUG_PAT 40 40 41 - int pdc_type = PDC_TYPE_ILLEGAL; 41 + int pdc_type __read_mostly = PDC_TYPE_ILLEGAL; 42 42 43 43 void __init setup_pdc(void) 44 44 { ··· 120 120 * pdc info is bad in this case). 121 121 */ 122 122 123 - if ( ((start & (PAGE_SIZE - 1)) != 0) 124 - || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) ) { 123 + if (unlikely( ((start & (PAGE_SIZE - 1)) != 0) 124 + || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) { 125 125 126 126 panic("Memory range doesn't align with page size!\n"); 127 127 }
+3 -3
arch/parisc/kernel/pci-dma.c
··· 33 33 #include <asm/uaccess.h> 34 34 #include <asm/tlbflush.h> /* for purge_tlb_*() macros */ 35 35 36 - static struct proc_dir_entry * proc_gsc_root = NULL; 36 + static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; 37 37 static int pcxl_proc_info(char *buffer, char **start, off_t offset, int length); 38 - static unsigned long pcxl_used_bytes = 0; 39 - static unsigned long pcxl_used_pages = 0; 38 + static unsigned long pcxl_used_bytes __read_mostly = 0; 39 + static unsigned long pcxl_used_pages __read_mostly = 0; 40 40 41 41 extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */ 42 42 static spinlock_t pcxl_res_lock;
+7 -6
arch/parisc/kernel/pdc_chassis.c
··· 30 30 #include <linux/kernel.h> 31 31 #include <linux/reboot.h> 32 32 #include <linux/notifier.h> 33 + #include <linux/cache.h> 33 34 34 35 #include <asm/pdc_chassis.h> 35 36 #include <asm/processor.h> ··· 39 38 40 39 41 40 #ifdef CONFIG_PDC_CHASSIS 42 - static int pdc_chassis_old = 0; 43 - static unsigned int pdc_chassis_enabled = 1; 41 + static int pdc_chassis_old __read_mostly = 0; 42 + static unsigned int pdc_chassis_enabled __read_mostly = 1; 44 43 45 44 46 45 /** ··· 133 132 { 134 133 #ifdef CONFIG_PDC_CHASSIS 135 134 int handle = 0; 136 - if (pdc_chassis_enabled) { 135 + if (likely(pdc_chassis_enabled)) { 137 136 DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__); 138 137 139 138 /* Let see if we have something to handle... */ ··· 143 142 printk(KERN_INFO "Enabling PDC_PAT chassis codes support.\n"); 144 143 handle = 1; 145 144 } 146 - else if (pdc_chassis_old) { 145 + else if (unlikely(pdc_chassis_old)) { 147 146 printk(KERN_INFO "Enabling old style chassis LED panel support.\n"); 148 147 handle = 1; 149 148 } ··· 179 178 /* Maybe we should do that in an other way ? */ 180 179 int retval = 0; 181 180 #ifdef CONFIG_PDC_CHASSIS 182 - if (pdc_chassis_enabled) { 181 + if (likely(pdc_chassis_enabled)) { 183 182 184 183 DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message); 185 184 ··· 215 214 } 216 215 } else retval = -1; 217 216 #else 218 - if (pdc_chassis_old) { 217 + if (unlikely(pdc_chassis_old)) { 219 218 switch (message) { 220 219 case PDC_CHASSIS_DIRECT_BSTART: 221 220 case PDC_CHASSIS_DIRECT_BCOMPLETE:
+3 -3
arch/parisc/kernel/perf.c
··· 66 66 uint8_t write_control; 67 67 }; 68 68 69 - static int perf_processor_interface = UNKNOWN_INTF; 70 - static int perf_enabled = 0; 69 + static int perf_processor_interface __read_mostly = UNKNOWN_INTF; 70 + static int perf_enabled __read_mostly = 0; 71 71 static spinlock_t perf_lock; 72 - struct parisc_device *cpu_device = NULL; 72 + struct parisc_device *cpu_device __read_mostly = NULL; 73 73 74 74 /* RDRs to write for PCX-W */ 75 75 static int perf_rdrs_W[] =
+1 -1
arch/parisc/kernel/process.c
··· 54 54 #include <asm/uaccess.h> 55 55 #include <asm/unwind.h> 56 56 57 - static int hlt_counter; 57 + static int hlt_counter __read_mostly; 58 58 59 59 /* 60 60 * Power off function, if any
+4 -4
arch/parisc/kernel/processor.c
··· 44 44 #include <asm/irq.h> /* for struct irq_region */ 45 45 #include <asm/parisc-device.h> 46 46 47 - struct system_cpuinfo_parisc boot_cpu_data; 47 + struct system_cpuinfo_parisc boot_cpu_data __read_mostly; 48 48 EXPORT_SYMBOL(boot_cpu_data); 49 49 50 - struct cpuinfo_parisc cpu_data[NR_CPUS]; 50 + struct cpuinfo_parisc cpu_data[NR_CPUS] __read_mostly; 51 51 52 52 /* 53 53 ** PARISC CPU driver - claim "device" and initialize CPU data structures. ··· 378 378 return 0; 379 379 } 380 380 381 - static struct parisc_device_id processor_tbl[] = { 381 + static struct parisc_device_id processor_tbl[] __read_mostly = { 382 382 { HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID }, 383 383 { 0, } 384 384 }; 385 385 386 - static struct parisc_driver cpu_driver = { 386 + static struct parisc_driver cpu_driver __read_mostly = { 387 387 .name = "CPU", 388 388 .id_table = processor_tbl, 389 389 .probe = processor_probe
+5 -5
arch/parisc/kernel/setup.c
··· 46 46 #include <asm/io.h> 47 47 #include <asm/setup.h> 48 48 49 - char command_line[COMMAND_LINE_SIZE]; 49 + char command_line[COMMAND_LINE_SIZE] __read_mostly; 50 50 51 51 /* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */ 52 - struct proc_dir_entry * proc_runway_root = NULL; 53 - struct proc_dir_entry * proc_gsc_root = NULL; 54 - struct proc_dir_entry * proc_mckinley_root = NULL; 52 + struct proc_dir_entry * proc_runway_root __read_mostly = NULL; 53 + struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; 54 + struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL; 55 55 56 56 #if !defined(CONFIG_PA20) && (defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA)) 57 - int parisc_bus_is_phys = 1; /* Assume no IOMMU is present */ 57 + int parisc_bus_is_phys __read_mostly = 1; /* Assume no IOMMU is present */ 58 58 EXPORT_SYMBOL(parisc_bus_is_phys); 59 59 #endif 60 60
+7 -9
arch/parisc/kernel/smp.c
··· 39 39 #include <asm/atomic.h> 40 40 #include <asm/current.h> 41 41 #include <asm/delay.h> 42 - #include <asm/pgalloc.h> /* for flush_tlb_all() proto/macro */ 42 + #include <asm/tlbflush.h> 43 43 44 44 #include <asm/io.h> 45 45 #include <asm/irq.h> /* for CPU_IRQ_REGION and friends */ ··· 58 58 59 59 volatile struct task_struct *smp_init_current_idle_task; 60 60 61 - static volatile int cpu_now_booting = 0; /* track which CPU is booting */ 61 + static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is booting */ 62 62 63 - static int parisc_max_cpus = 1; 63 + static int parisc_max_cpus __read_mostly = 1; 64 64 65 65 /* online cpus are ones that we've managed to bring up completely 66 66 * possible cpus are all valid cpu ··· 71 71 * empty in the beginning. 72 72 */ 73 73 74 - cpumask_t cpu_online_map = CPU_MASK_NONE; /* Bitmap of online CPUs */ 75 - cpumask_t cpu_possible_map = CPU_MASK_ALL; /* Bitmap of Present CPUs */ 74 + cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; /* Bitmap of online CPUs */ 75 + cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; /* Bitmap of Present CPUs */ 76 76 77 77 EXPORT_SYMBOL(cpu_online_map); 78 78 EXPORT_SYMBOL(cpu_possible_map); ··· 406 406 * as we want to ensure all TLB's flushed before proceeding. 407 407 */ 408 408 409 - extern void flush_tlb_all_local(void); 410 - 411 409 void 412 410 smp_flush_tlb_all(void) 413 411 { 414 - on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); 412 + on_each_cpu(flush_tlb_all_local, NULL, 1, 1); 415 413 } 416 414 417 415 ··· 485 487 #endif 486 488 487 489 flush_cache_all_local(); /* start with known state */ 488 - flush_tlb_all_local(); 490 + flush_tlb_all_local(NULL); 489 491 490 492 local_irq_enable(); /* Interrupts have been off until now */ 491 493
+2 -2
arch/parisc/kernel/time.c
··· 36 36 /* xtime and wall_jiffies keep wall-clock time */ 37 37 extern unsigned long wall_jiffies; 38 38 39 - static long clocktick; /* timer cycles per tick */ 40 - static long halftick; 39 + static long clocktick __read_mostly; /* timer cycles per tick */ 40 + static long halftick __read_mostly; 41 41 42 42 #ifdef CONFIG_SMP 43 43 extern void smp_do_timer(struct pt_regs *regs);
+2 -1
arch/parisc/kernel/topology.c
··· 20 20 #include <linux/init.h> 21 21 #include <linux/smp.h> 22 22 #include <linux/cpu.h> 23 + #include <linux/cache.h> 23 24 24 - static struct cpu cpu_devices[NR_CPUS]; 25 + static struct cpu cpu_devices[NR_CPUS] __read_mostly; 25 26 26 27 static int __init topology_init(void) 27 28 {
+1 -1
arch/parisc/kernel/unaligned.c
··· 122 122 #define ERR_NOTHANDLED -1 123 123 #define ERR_PAGEFAULT -2 124 124 125 - int unaligned_enabled = 1; 125 + int unaligned_enabled __read_mostly = 1; 126 126 127 127 void die_if_kernel (char *str, struct pt_regs *regs, long err); 128 128
+1 -1
arch/parisc/kernel/unwind.c
··· 35 35 * we can call unwind_init as early in the bootup process as 36 36 * possible (before the slab allocator is initialized) 37 37 */ 38 - static struct unwind_table kernel_unwind_table; 38 + static struct unwind_table kernel_unwind_table __read_mostly; 39 39 static LIST_HEAD(unwind_tables); 40 40 41 41 static inline const struct unwind_table_entry *
+6 -9
arch/parisc/kernel/vmlinux.lds.S
··· 68 68 RODATA 69 69 70 70 /* writeable */ 71 - . = ALIGN(4096); /* Make sure this is paged aligned so 71 + . = ALIGN(4096); /* Make sure this is page aligned so 72 72 that we can properly leave these 73 73 as writable */ 74 74 data_start = .; ··· 104 104 /* PA-RISC locks requires 16-byte alignment */ 105 105 . = ALIGN(16); 106 106 .data.lock_aligned : { *(.data.lock_aligned) } 107 + 108 + /* rarely changed data like cpu maps */ 109 + . = ALIGN(16); 110 + .data.read_mostly : { *(.data.read_mostly) } 107 111 108 112 _edata = .; /* End of data section */ 109 113 ··· 198 194 #endif 199 195 } 200 196 201 - /* Stabs debugging sections. */ 202 - .stab 0 : { *(.stab) } 203 - .stabstr 0 : { *(.stabstr) } 204 - .stab.excl 0 : { *(.stab.excl) } 205 - .stab.exclstr 0 : { *(.stab.exclstr) } 206 - .stab.index 0 : { *(.stab.index) } 207 - .stab.indexstr 0 : { *(.stab.indexstr) } 208 - .comment 0 : { *(.comment) } 197 + STABS_DEBUG 209 198 .note 0 : { *(.note) } 210 199 211 200 }
+18 -11
arch/parisc/mm/init.c
··· 36 36 extern char __init_begin, __init_end; 37 37 38 38 #ifdef CONFIG_DISCONTIGMEM 39 - struct node_map_data node_data[MAX_NUMNODES]; 40 - bootmem_data_t bmem_data[MAX_NUMNODES]; 41 - unsigned char pfnnid_map[PFNNID_MAP_MAX]; 39 + struct node_map_data node_data[MAX_NUMNODES] __read_mostly; 40 + bootmem_data_t bmem_data[MAX_NUMNODES] __read_mostly; 41 + unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; 42 42 #endif 43 43 44 44 static struct resource data_resource = { ··· 58 58 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 59 59 }; 60 60 61 - static struct resource sysram_resources[MAX_PHYSMEM_RANGES]; 61 + static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; 62 62 63 63 /* The following array is initialized from the firmware specific 64 64 * information retrieved in kernel/inventory.c. 65 65 */ 66 66 67 - physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES]; 68 - int npmem_ranges; 67 + physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; 68 + int npmem_ranges __read_mostly; 69 69 70 70 #ifdef __LP64__ 71 71 #define MAX_MEM (~0UL) ··· 73 73 #define MAX_MEM (3584U*1024U*1024U) 74 74 #endif /* !__LP64__ */ 75 75 76 - static unsigned long mem_limit = MAX_MEM; 76 + static unsigned long mem_limit __read_mostly = MAX_MEM; 77 77 78 78 static void __init mem_limit_func(void) 79 79 { ··· 300 300 max_pfn = start_pfn + npages; 301 301 } 302 302 303 + /* IOMMU is always used to access "high mem" on those boxes 304 + * that can support enough mem that a PCI device couldn't 305 + * directly DMA to any physical addresses. 306 + * ISA DMA support will need to revisit this. 307 + */ 308 + max_low_pfn = max_pfn; 309 + 303 310 if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) { 304 311 printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n"); 305 312 BUG(); ··· 438 431 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ 439 432 & ~(VM_MAP_OFFSET-1))) 440 433 441 - void *vmalloc_start; 434 + void *vmalloc_start __read_mostly; 442 435 EXPORT_SYMBOL(vmalloc_start); 443 436 444 437 #ifdef CONFIG_PA11 445 - unsigned long pcxl_dma_start; 438 + unsigned long pcxl_dma_start __read_mostly; 446 439 #endif 447 440 448 441 void __init mem_init(void) ··· 482 475 return 0; 483 476 } 484 477 485 - unsigned long *empty_zero_page; 478 + unsigned long *empty_zero_page __read_mostly; 486 479 487 480 void show_mem(void) 488 481 { ··· 1005 998 void flush_tlb_all(void) 1006 999 { 1007 1000 spin_lock(&sid_lock); 1008 - flush_tlb_all_local(); 1001 + flush_tlb_all_local(NULL); 1009 1002 recycle_sids(); 1010 1003 spin_unlock(&sid_lock); 1011 1004 }
+63 -37
arch/parisc/mm/ioremap.c
··· 1 1 /* 2 2 * arch/parisc/mm/ioremap.c 3 3 * 4 - * Re-map IO memory to kernel address space so that we can access it. 5 - * This is needed for high PCI addresses that aren't mapped in the 6 - * 640k-1MB IO memory area on PC's 7 - * 8 4 * (C) Copyright 1995 1996 Linus Torvalds 9 5 * (C) Copyright 2001 Helge Deller <deller@gmx.de> 6 + * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org> 10 7 */ 11 8 12 9 #include <linux/vmalloc.h> ··· 11 14 #include <linux/module.h> 12 15 #include <asm/io.h> 13 16 #include <asm/pgalloc.h> 17 + #include <asm/tlbflush.h> 18 + #include <asm/cacheflush.h> 14 19 15 - static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, 16 - unsigned long phys_addr, unsigned long flags) 20 + static inline void 21 + remap_area_pte(pte_t *pte, unsigned long address, unsigned long size, 22 + unsigned long phys_addr, unsigned long flags) 17 23 { 18 - unsigned long end; 24 + unsigned long end, pfn; 25 + pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | 26 + _PAGE_ACCESSED | flags); 19 27 20 28 address &= ~PMD_MASK; 29 + 21 30 end = address + size; 22 31 if (end > PMD_SIZE) 23 32 end = PMD_SIZE; 24 - if (address >= end) 25 - BUG(); 33 + 34 + BUG_ON(address >= end); 35 + 36 + pfn = phys_addr >> PAGE_SHIFT; 26 37 do { 27 - if (!pte_none(*pte)) { 28 - printk(KERN_ERR "remap_area_pte: page already exists\n"); 29 - BUG(); 30 - } 31 - set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW | 32 - _PAGE_DIRTY | _PAGE_ACCESSED | flags))); 38 + BUG_ON(!pte_none(*pte)); 39 + 40 + set_pte(pte, pfn_pte(pfn, pgprot)); 41 + 33 42 address += PAGE_SIZE; 34 - phys_addr += PAGE_SIZE; 43 + pfn++; 35 44 pte++; 36 45 } while (address && (address < end)); 37 46 } 38 47 39 - static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, 40 - unsigned long phys_addr, unsigned long flags) 48 + static inline int 49 + remap_area_pmd(pmd_t *pmd, unsigned long address, unsigned long size, 50 + unsigned long phys_addr, unsigned long flags) 41 51 { 42 52 unsigned long end; 43 53 44 54 address &= ~PGDIR_MASK; 55 + 45 56 end = address + size; 46 57 if (end > PGDIR_SIZE) 47 58 end = PGDIR_SIZE; 59 + 60 + BUG_ON(address >= end); 61 + 48 62 phys_addr -= address; 49 - if (address >= end) 50 - BUG(); 51 63 do { 52 - pte_t * pte = pte_alloc_kernel(pmd, address); 64 + pte_t *pte = pte_alloc_kernel(pmd, address); 53 65 if (!pte) 54 66 return -ENOMEM; 55 - remap_area_pte(pte, address, end - address, address + phys_addr, flags); 67 + 68 + remap_area_pte(pte, address, end - address, 69 + address + phys_addr, flags); 70 + 56 71 address = (address + PMD_SIZE) & PMD_MASK; 57 72 pmd++; 58 73 } while (address && (address < end)); 74 + 59 75 return 0; 60 76 } 61 77 62 - #if (USE_HPPA_IOREMAP) 63 - static int remap_area_pages(unsigned long address, unsigned long phys_addr, 64 - unsigned long size, unsigned long flags) 78 + #if USE_HPPA_IOREMAP 79 + static int 80 + remap_area_pages(unsigned long address, unsigned long phys_addr, 81 + unsigned long size, unsigned long flags) 65 82 { 66 - int error; 67 - pgd_t * dir; 83 + pgd_t *dir; 84 + int error = 0; 68 85 unsigned long end = address + size; 69 86 87 + BUG_ON(address >= end); 88 + 70 89 phys_addr -= address; 71 - dir = pgd_offset(&init_mm, address); 90 + dir = pgd_offset_k(address); 91 + 72 92 flush_cache_all(); 73 - if (address >= end) 74 - BUG(); 93 + 75 94 do { 95 + pud_t *pud; 76 96 pmd_t *pmd; 77 - pmd = pmd_alloc(&init_mm, dir, address); 97 + 78 98 error = -ENOMEM; 99 + pud = pud_alloc(&init_mm, dir, address); 100 + if (!pud) 101 + break; 102 + 103 + pmd = pmd_alloc(&init_mm, pud, address); 79 104 if (!pmd) 80 105 break; 106 + 81 107 if (remap_area_pmd(pmd, address, end - address, 82 - phys_addr + address, flags)) 108 + phys_addr + address, flags)) 83 109 break; 110 + 84 111 error = 0; 85 112 address = (address + PGDIR_SIZE) & PGDIR_MASK; 86 113 dir++; 87 114 } while (address && (address < end)); 115 + 88 116 flush_tlb_all(); 117 + 89 118 return error; 90 119 } 91 120 #endif /* USE_HPPA_IOREMAP */ ··· 146 123 147 124 /* 148 125 * Remap an arbitrary physical address space into the kernel virtual 149 - * address space. Needed when the kernel wants to access high addresses 150 - * directly. 126 + * address space. 151 127 * 152 128 * NOTE! We need to allow non-page-aligned mappings too: we will obviously 153 129 * have to convert them into an offset in a page-aligned mapping, but the ··· 170 148 #endif 171 149 172 150 #else 173 - void * addr; 174 - struct vm_struct * area; 151 + void *addr; 152 + struct vm_struct *area; 175 153 unsigned long offset, last_addr; 176 154 177 155 /* Don't allow wraparound or zero size */ ··· 189 167 t_addr = __va(phys_addr); 190 168 t_end = t_addr + (size - 1); 191 169 192 - for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) 170 + for (page = virt_to_page(t_addr); 171 + page <= virt_to_page(t_end); page++) { 193 172 if(!PageReserved(page)) 194 173 return NULL; 174 + } 195 175 } 196 176 197 177 /* ··· 209 185 area = get_vm_area(size, VM_IOREMAP); 210 186 if (!area) 211 187 return NULL; 188 + 212 189 addr = area->addr; 213 190 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { 214 191 vfree(addr); 215 192 return NULL; 216 193 } 194 + 217 195 return (void __iomem *) (offset + (char *)addr); 218 196 #endif 219 197 }
+23 -7
drivers/parisc/dino.c
··· 83 83 ** bus number for each dino. 84 84 */ 85 85 86 - #define is_card_dino(id) ((id)->hw_type == HPHW_A_DMA) 86 + #define is_card_dino(id) ((id)->hw_type == HPHW_A_DMA) 87 + #define is_cujo(id) ((id)->hversion == 0x682) 87 88 88 89 #define DINO_IAR0 0x004 89 90 #define DINO_IODC_ADDR 0x008 ··· 125 124 126 125 #define DINO_IRQS 11 /* bits 0-10 are architected */ 127 126 #define DINO_IRR_MASK 0x5ff /* only 10 bits are implemented */ 127 + #define DINO_LOCAL_IRQS (DINO_IRQS+1) 128 128 129 129 #define DINO_MASK_IRQ(x) (1<<(x)) 130 130 ··· 148 146 unsigned long txn_addr; /* EIR addr to generate interrupt */ 149 147 u32 txn_data; /* EIR data assign to each dino */ 150 148 u32 imr; /* IRQ's which are enabled */ 151 - int global_irq[12]; /* map IMR bit to global irq */ 149 + int global_irq[DINO_LOCAL_IRQS]; /* map IMR bit to global irq */ 152 150 #ifdef DINO_DEBUG 153 151 unsigned int dino_irr0; /* save most recent IRQ line stat */ 154 152 #endif ··· 299 297 static void dino_disable_irq(unsigned int irq) 300 298 { 301 299 struct dino_device *dino_dev = irq_desc[irq].handler_data; 302 - int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, irq); 300 + int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 303 301 304 302 DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq); 305 303 ··· 311 309 static void dino_enable_irq(unsigned int irq) 312 310 { 313 311 struct dino_device *dino_dev = irq_desc[irq].handler_data; 314 - int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, irq); 312 + int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 315 313 u32 tmp; 316 314 317 315 DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq); ··· 436 434 437 435 dino_assign_irq(dino, irq, &dev->irq); 438 436 } 437 + 438 + 439 + /* 440 + * Cirrus 6832 Cardbus reports wrong irq on RDI Tadpole PARISC Laptop (deller@gmx.de) 441 + * (the irqs are off-by-one, not sure yet if this is a cirrus, dino-hardware or dino-driver problem...) 442 + */ 443 + static void __devinit quirk_cirrus_cardbus(struct pci_dev *dev) 444 + { 445 + u8 new_irq = dev->irq - 1; 446 + printk(KERN_INFO "PCI: Cirrus Cardbus IRQ fixup for %s, from %d to %d\n", 447 + pci_name(dev), dev->irq, new_irq); 448 + dev->irq = new_irq; 449 + } 450 + DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus ); 451 + 439 452 440 453 static void __init 441 454 dino_bios_init(void) ··· 683 666 printk(KERN_WARNING "Device %s has unassigned IRQ\n", pci_name(dev)); 684 667 #endif 685 668 } else { 686 - 687 669 /* Adjust INT_LINE for that busses region */ 688 670 dino_assign_irq(dino_dev, dev->irq, &dev->irq); 689 671 } ··· 888 872 889 873 /* allocate I/O Port resource region */ 890 874 res = &dino_dev->hba.io_space; 891 - if (dev->id.hversion == 0x680 || is_card_dino(&dev->id)) { 875 + if (!is_cujo(&dev->id)) { 892 876 res->name = "Dino I/O Port"; 893 877 } else { 894 878 res->name = "Cujo I/O Port"; ··· 943 927 if (is_card_dino(&dev->id)) { 944 928 version = "3.x (card mode)"; 945 929 } else { 946 - if(dev->id.hversion == 0x680) { 930 + if (!is_cujo(&dev->id)) { 947 931 if (dev->id.hversion_rev < 4) { 948 932 version = dino_vers[dev->id.hversion_rev]; 949 933 }
+2 -2
drivers/parisc/eisa.c
··· 57 57 58 58 static DEFINE_SPINLOCK(eisa_irq_lock); 59 59 60 - void __iomem *eisa_eeprom_addr; 60 + void __iomem *eisa_eeprom_addr __read_mostly; 61 61 62 62 /* We can only have one EISA adapter in the system because neither 63 63 * implementation can be flexed. ··· 141 141 * in the furure. 142 142 */ 143 143 /* irq 13,8,2,1,0 must be edge */ 144 - static unsigned int eisa_irq_level; /* default to edge triggered */ 144 + static unsigned int eisa_irq_level __read_mostly; /* default to edge triggered */ 145 145 146 146 147 147 /* called by free irq */
+1 -1
drivers/parisc/eisa_eeprom.c
··· 48 48 } 49 49 50 50 static ssize_t eisa_eeprom_read(struct file * file, 51 - char *buf, size_t count, loff_t *ppos ) 51 + char __user *buf, size_t count, loff_t *ppos ) 52 52 { 53 53 unsigned char *tmp; 54 54 ssize_t ret;
+1 -1
drivers/parisc/lasi.c
··· 150 150 * 151 151 */ 152 152 153 - static unsigned long lasi_power_off_hpa; 153 + static unsigned long lasi_power_off_hpa __read_mostly; 154 154 155 155 static void lasi_power_off(void) 156 156 {
+91 -24
drivers/parisc/lba_pci.c
··· 167 167 168 168 /* non-postable I/O port space, densely packed */ 169 169 #define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL) 170 - static void __iomem *astro_iop_base; 170 + static void __iomem *astro_iop_base __read_mostly; 171 171 172 172 #define ELROY_HVERS 0x782 173 173 #define MERCURY_HVERS 0x783 ··· 695 695 } 696 696 } 697 697 } 698 - #else 699 - #define lba_claim_dev_resources(dev) 700 - #endif 701 698 699 + 700 + /* 701 + * truncate_pat_collision: Deal with overlaps or outright collisions 702 + * between PAT PDC reported ranges. 703 + * 704 + * Broken PA8800 firmware will report lmmio range that 705 + * overlaps with CPU HPA. Just truncate the lmmio range. 706 + * 707 + * BEWARE: conflicts with this lmmio range may be an 708 + * elmmio range which is pointing down another rope. 709 + * 710 + * FIXME: only deals with one collision per range...theoretically we 711 + * could have several. Supporting more than one collision will get messy. 712 + */ 713 + static unsigned long 714 + truncate_pat_collision(struct resource *root, struct resource *new) 715 + { 716 + unsigned long start = new->start; 717 + unsigned long end = new->end; 718 + struct resource *tmp = root->child; 719 + 720 + if (end <= start || start < root->start || !tmp) 721 + return 0; 722 + 723 + /* find first overlap */ 724 + while (tmp && tmp->end < start) 725 + tmp = tmp->sibling; 726 + 727 + /* no entries overlap */ 728 + if (!tmp) return 0; 729 + 730 + /* found one that starts behind the new one 731 + ** Don't need to do anything. 732 + */ 733 + if (tmp->start >= end) return 0; 734 + 735 + if (tmp->start <= start) { 736 + /* "front" of new one overlaps */ 737 + new->start = tmp->end + 1; 738 + 739 + if (tmp->end >= end) { 740 + /* AACCKK! totally overlaps! drop this range. */ 741 + return 1; 742 + } 743 + } 744 + 745 + if (tmp->end < end ) { 746 + /* "end" of new one overlaps */ 747 + new->end = tmp->start - 1; 748 + } 749 + 750 + printk(KERN_WARNING "LBA: Truncating lmmio_space [%lx/%lx] " 751 + "to [%lx,%lx]\n", 752 + start, end, 753 + new->start, new->end ); 754 + 755 + return 0; /* truncation successful */ 756 + } 757 + 758 + #else 759 + #define lba_claim_dev_resources(dev) do { } while (0) 760 + #define truncate_pat_collision(r,n) (0) 761 + #endif 702 762 703 763 /* 704 764 ** The algorithm is generic code. ··· 807 747 lba_dump_res(&ioport_resource, 2); 808 748 BUG(); 809 749 } 750 + /* advertize Host bridge resources to PCI bus */ 751 + bus->resource[0] = &(ldev->hba.io_space); 752 + i = 1; 810 753 811 754 if (ldev->hba.elmmio_space.start) { 812 755 err = request_resource(&iomem_resource, ··· 823 760 824 761 /* lba_dump_res(&iomem_resource, 2); */ 825 762 /* BUG(); */ 826 - } 763 + } else 764 + bus->resource[i++] = &(ldev->hba.elmmio_space); 827 765 } 828 766 829 - err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space)); 830 - if (err < 0) { 831 - /* FIXME overlaps with elmmio will fail here. 832 - * Need to prune (or disable) the distributed range. 833 - * 834 - * BEWARE: conflicts with this lmmio range may be 835 - * elmmio range which is pointing down another rope. 836 - */ 837 767 838 - printk("FAILED: lba_fixup_bus() request for " 768 + /* Overlaps with elmmio can (and should) fail here. 769 + * We will prune (or ignore) the distributed range. 770 + * 771 + * FIXME: SBA code should register all elmmio ranges first. 772 + * that would take care of elmmio ranges routed 773 + * to a different rope (already discovered) from 774 + * getting registered *after* LBA code has already 775 + * registered it's distributed lmmio range. 776 + */ 777 + if (truncate_pat_collision(&iomem_resource, 778 + &(ldev->hba.lmmio_space))) { 779 + 780 + printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n", 781 + ldev->hba.lmmio_space.start, 782 + ldev->hba.lmmio_space.end); 783 + } else { 784 + err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space)); 785 + if (err < 0) { 786 + printk(KERN_ERR "FAILED: lba_fixup_bus() request for " 839 787 "lmmio_space [%lx/%lx]\n", 840 788 ldev->hba.lmmio_space.start, 841 789 ldev->hba.lmmio_space.end); 842 - /* lba_dump_res(&iomem_resource, 2); */ 790 + } else 791 + bus->resource[i++] = &(ldev->hba.lmmio_space); 843 792 } 844 793 845 794 #ifdef CONFIG_64BIT ··· 866 791 lba_dump_res(&iomem_resource, 2); 867 792 BUG(); 868 793 } 794 + bus->resource[i++] = &(ldev->hba.gmmio_space); 869 795 } 870 796 #endif 871 797 872 - /* advertize Host bridge resources to PCI bus */ 873 - bus->resource[0] = &(ldev->hba.io_space); 874 - bus->resource[1] = &(ldev->hba.lmmio_space); 875 - i=2; 876 - if (ldev->hba.elmmio_space.start) 877 - bus->resource[i++] = &(ldev->hba.elmmio_space); 878 - if (ldev->hba.gmmio_space.start) 879 - bus->resource[i++] = &(ldev->hba.gmmio_space); 880 - 881 798 } 882 799 883 800 list_for_each(ln, &bus->devices) {
+9 -9
drivers/parisc/led.c
··· 3 3 * 4 4 * (c) Copyright 2000 Red Hat Software 5 5 * (c) Copyright 2000 Helge Deller <hdeller@redhat.com> 6 - * (c) Copyright 2001-2004 Helge Deller <deller@gmx.de> 6 + * (c) Copyright 2001-2005 Helge Deller <deller@gmx.de> 7 7 * (c) Copyright 2001 Randolph Chung <tausq@debian.org> 8 8 * 9 9 * This program is free software; you can redistribute it and/or modify ··· 56 56 relatively large amount of CPU time, some of the calculations can be 57 57 turned off with the following variables (controlled via procfs) */ 58 58 59 - static int led_type = -1; 59 + static int led_type __read_mostly = -1; 60 60 static unsigned char lastleds; /* LED state from most recent update */ 61 - static unsigned int led_heartbeat = 1; 62 - static unsigned int led_diskio = 1; 63 - static unsigned int led_lanrxtx = 1; 64 - static char lcd_text[32]; 65 - static char lcd_text_default[32]; 61 + static unsigned int led_heartbeat __read_mostly = 1; 62 + static unsigned int led_diskio __read_mostly = 1; 63 + static unsigned int led_lanrxtx __read_mostly = 1; 64 + static char lcd_text[32] __read_mostly; 65 + static char lcd_text_default[32] __read_mostly; 66 66 67 67 68 68 static struct workqueue_struct *led_wq; ··· 108 108 /* lcd_info is pre-initialized to the values needed to program KittyHawk LCD's 109 109 * HP seems to have used Sharp/Hitachi HD44780 LCDs most of the time. */ 110 110 static struct pdc_chassis_lcd_info_ret_block 111 - lcd_info __attribute__((aligned(8))) = 111 + lcd_info __attribute__((aligned(8))) __read_mostly = 112 112 { 113 113 .model = DISPLAY_MODEL_LCD, 114 114 .lcd_width = 16, ··· 144 144 device_initcall(start_task); 145 145 146 146 /* ptr to LCD/LED-specific function */ 147 - static void (*led_func_ptr) (unsigned char); 147 + static void (*led_func_ptr) (unsigned char) __read_mostly; 148 148 149 149 #ifdef CONFIG_PROC_FS 150 150 static int led_proc_read(char *page, char **start, off_t off, int count,
+32 -12
drivers/parisc/pdc_stable.c
··· 56 56 #include <asm/uaccess.h> 57 57 #include <asm/hardware.h> 58 58 59 - #define PDCS_VERSION "0.09" 59 + #define PDCS_VERSION "0.10" 60 60 61 61 #define PDCS_ADDR_PPRI 0x00 62 62 #define PDCS_ADDR_OSID 0x40 ··· 70 70 MODULE_LICENSE("GPL"); 71 71 MODULE_VERSION(PDCS_VERSION); 72 72 73 - static unsigned long pdcs_size = 0; 73 + static unsigned long pdcs_size __read_mostly; 74 74 75 75 /* This struct defines what we need to deal with a parisc pdc path entry */ 76 76 struct pdcspath_entry { ··· 194 194 return -EIO; 195 195 } 196 196 197 - entry->ready = 1; 197 + /* kobject is already registered */ 198 + entry->ready = 2; 198 199 199 200 DPRINTK("%s: device: 0x%p\n", __func__, entry->dev); 200 201 ··· 654 653 { 655 654 unsigned short i; 656 655 struct pdcspath_entry *entry; 656 + int err; 657 657 658 658 for (i = 0; (entry = pdcspath_entries[i]); i++) { 659 659 if (pdcspath_fetch(entry) < 0) 660 660 continue; 661 661 662 - kobject_set_name(&entry->kobj, "%s", entry->name); 662 + if ((err = kobject_set_name(&entry->kobj, "%s", entry->name))) 663 + return err; 663 664 kobj_set_kset_s(entry, paths_subsys); 664 - kobject_register(&entry->kobj); 665 - 665 + if ((err = kobject_register(&entry->kobj))) 666 + return err; 667 + 668 + /* kobject is now registered */ 669 + entry->ready = 2; 670 + 666 671 if (!entry->dev) 667 672 continue; 668 673 ··· 682 675 /** 683 676 * pdcs_unregister_pathentries - Routine called when unregistering the module. 684 677 */ 685 - static inline void __exit 678 + static inline void 686 679 pdcs_unregister_pathentries(void) 687 680 { 688 681 unsigned short i; 689 682 struct pdcspath_entry *entry; 690 683 691 684 for (i = 0; (entry = pdcspath_entries[i]); i++) 692 - if (entry->ready) 685 + if (entry->ready >= 2) 693 686 kobject_unregister(&entry->kobj); 694 687 } 695 688 ··· 711 704 712 705 /* For now we'll register the pdc subsys within this driver */ 713 706 if ((rc = firmware_register(&pdc_subsys))) 714 - return rc; 707 + goto fail_firmreg; 715 708 716 709 /* Don't forget the info entry */ 717 710 for (i = 0; (attr = pdcs_subsys_attrs[i]) && !error; i++) ··· 720 713 721 714 /* register the paths subsys as a subsystem of pdc subsys */ 722 715 kset_set_kset_s(&paths_subsys, pdc_subsys); 723 - subsystem_register(&paths_subsys); 716 + if ((rc= subsystem_register(&paths_subsys))) 717 + goto fail_subsysreg; 724 718 725 719 /* now we create all "files" for the paths subsys */ 726 - pdcs_register_pathentries(); 720 + if ((rc = pdcs_register_pathentries())) 721 + goto fail_pdcsreg; 722 + 723 + return rc; 727 724 728 - return 0; 725 + fail_pdcsreg: 726 + pdcs_unregister_pathentries(); 727 + subsystem_unregister(&paths_subsys); 728 + 729 + fail_subsysreg: 730 + firmware_unregister(&pdc_subsys); 731 + 732 + fail_firmreg: 733 + printk(KERN_INFO "PDC Stable Storage bailing out\n"); 734 + return rc; 729 735 } 730 736 731 737 static void __exit
+6 -6
drivers/parisc/power.c
··· 2 2 * linux/arch/parisc/kernel/power.c 3 3 * HP PARISC soft power switch support driver 4 4 * 5 - * Copyright (c) 2001-2002 Helge Deller <deller@gmx.de> 5 + * Copyright (c) 2001-2005 Helge Deller <deller@gmx.de> 6 6 * All rights reserved. 7 7 * 8 8 * ··· 102 102 103 103 static void poweroff(void) 104 104 { 105 - static int powering_off; 105 + static int powering_off __read_mostly; 106 106 107 107 if (powering_off) 108 108 return; ··· 113 113 114 114 115 115 /* local time-counter for shutdown */ 116 - static int shutdown_timer; 116 + static int shutdown_timer __read_mostly; 117 117 118 118 /* check, give feedback and start shutdown after one second */ 119 119 static void process_shutdown(void) ··· 139 139 DECLARE_TASKLET_DISABLED(power_tasklet, NULL, 0); 140 140 141 141 /* soft power switch enabled/disabled */ 142 - int pwrsw_enabled = 1; 142 + int pwrsw_enabled __read_mostly = 1; 143 143 144 144 /* 145 145 * On gecko style machines (e.g. 712/xx and 715/xx) ··· 149 149 */ 150 150 static void gecko_tasklet_func(unsigned long unused) 151 151 { 152 - if (!pwrsw_enabled) 152 + if (unlikely(!pwrsw_enabled)) 153 153 return; 154 154 155 155 if (__getDIAG(25) & 0x80000000) { ··· 173 173 { 174 174 unsigned long current_status; 175 175 176 - if (!pwrsw_enabled) 176 + if (unlikely(!pwrsw_enabled)) 177 177 return; 178 178 179 179 current_status = gsc_readl(soft_power_reg);
+1
drivers/parport/Kconfig
··· 121 121 tristate 122 122 default GSC 123 123 depends on PARPORT 124 + select PARPORT_NOT_PC 124 125 125 126 config PARPORT_SUNBPP 126 127 tristate "Sparc hardware (EXPERIMENTAL)"
+28 -37
drivers/video/stifb.c
··· 3 3 * Low level Frame buffer driver for HP workstations with 4 4 * STI (standard text interface) video firmware. 5 5 * 6 - * Copyright (C) 2001-2004 Helge Deller <deller@gmx.de> 6 + * Copyright (C) 2001-2005 Helge Deller <deller@gmx.de> 7 7 * Portions Copyright (C) 2001 Thomas Bogendoerfer <tsbogend@alpha.franken.de> 8 8 * 9 9 * Based on: ··· 73 73 #include "sticore.h" 74 74 75 75 /* REGION_BASE(fb_info, index) returns the virtual address for region <index> */ 76 - #ifdef __LP64__ 77 - #define REGION_BASE(fb_info, index) \ 78 - (fb_info->sti->glob_cfg->region_ptrs[index] | 0xffffffff00000000) 79 - #else 80 - #define REGION_BASE(fb_info, index) \ 81 - fb_info->sti->glob_cfg->region_ptrs[index] 82 - #endif 76 + #define REGION_BASE(fb_info, index) \ 77 + F_EXTEND(fb_info->sti->glob_cfg->region_ptrs[index]) 83 78 84 79 #define NGLEDEVDEPROM_CRT_REGION 1 80 + 81 + #define NR_PALETTE 256 85 82 86 83 typedef struct { 87 84 __s32 video_config_reg; ··· 109 112 ngle_rom_t ngle_rom; 110 113 struct sti_struct *sti; 111 114 int deviceSpecificConfig; 112 - u32 pseudo_palette[256]; 115 + u32 pseudo_palette[16]; 113 116 }; 114 117 115 118 static int __initdata stifb_bpp_pref[MAX_STI_ROMS]; ··· 349 352 #define IS_888_DEVICE(fb) \ 350 353 (!(IS_24_DEVICE(fb))) 351 354 352 - #define GET_FIFO_SLOTS(fb, cnt, numslots) \ 353 - { while (cnt < numslots) \ 355 + #define GET_FIFO_SLOTS(fb, cnt, numslots) \ 356 + { while (cnt < numslots) \ 354 357 cnt = READ_WORD(fb, REG_34); \ 355 - cnt -= numslots; \ 358 + cnt -= numslots; \ 356 359 } 357 360 358 361 #define IndexedDcd 0 /* Pixel data is indexed (pseudo) color */ ··· 992 995 struct stifb_info *fb = (struct stifb_info *) info; 993 996 u32 color; 994 997 995 - if (regno >= 256) /* no. of hw registers */ 998 + if (regno >= NR_PALETTE) 996 999 return 1; 997 1000 998 1001 red >>= 8; ··· 1002 1005 DEBUG_OFF(); 1003 1006 1004 1007 START_IMAGE_COLORMAP_ACCESS(fb); 1005 - 1006 - if (fb->info.var.grayscale) { 1008 + 1009 + if (unlikely(fb->info.var.grayscale)) { 1007 1010 /* gray = 0.30*R + 0.59*G + 0.11*B */ 1008 1011 color = ((red * 77) + 1009 1012 (green * 151) + ··· 1014 1017 (blue)); 1015 1018 } 1016 1019 1017 - if (info->var.bits_per_pixel == 32) { 1018 - ((u32 *)(info->pseudo_palette))[regno] = 1019 - (red << info->var.red.offset) | 1020 - (green << info->var.green.offset) | 1021 - (blue << info->var.blue.offset); 1022 - } else { 1023 - ((u32 *)(info->pseudo_palette))[regno] = regno; 1020 + if (fb->info.fix.visual == FB_VISUAL_DIRECTCOLOR) { 1021 + struct fb_var_screeninfo *var = &fb->info.var; 1022 + if (regno < 16) 1023 + ((u32 *)fb->info.pseudo_palette)[regno] = 1024 + regno << var->red.offset | 1025 + regno << var->green.offset | 1026 + regno << var->blue.offset; 1024 1027 } 1025 1028 1026 1029 WRITE_IMAGE_COLOR(fb, regno, color); 1027 - 1030 + 1028 1031 if (fb->id == S9000_ID_HCRX) { 1029 1032 NgleLutBltCtl lutBltCtl; 1030 1033 ··· 1063 1066 case S9000_ID_HCRX: 1064 1067 HYPER_ENABLE_DISABLE_DISPLAY(fb, enable); 1065 1068 break; 1066 - case S9000_ID_A1659A:; /* fall through */ 1067 - case S9000_ID_TIMBER:; 1068 - case CRX24_OVERLAY_PLANES:; 1069 + case S9000_ID_A1659A: /* fall through */ 1070 + case S9000_ID_TIMBER: 1071 + case CRX24_OVERLAY_PLANES: 1069 1072 default: 1070 1073 ENABLE_DISABLE_DISPLAY(fb, enable); 1071 1074 break; ··· 1247 1250 memset(&fb->ngle_rom, 0, sizeof(fb->ngle_rom)); 1248 1251 if ((fb->sti->regions_phys[0] & 0xfc000000) == 1249 1252 (fb->sti->regions_phys[2] & 0xfc000000)) 1250 - sti_rom_address = fb->sti->regions_phys[0]; 1253 + sti_rom_address = F_EXTEND(fb->sti->regions_phys[0]); 1251 1254 else 1252 - sti_rom_address = fb->sti->regions_phys[1]; 1253 - #ifdef __LP64__ 1254 - sti_rom_address |= 0xffffffff00000000; 1255 - #endif 1255 + sti_rom_address = F_EXTEND(fb->sti->regions_phys[1]); 1256 + 1256 1257 fb->deviceSpecificConfig = gsc_readl(sti_rom_address); 1257 1258 if (IS_24_DEVICE(fb)) { 1258 1259 if (bpp_pref == 8 || bpp_pref == 32) ··· 1310 1315 break; 1311 1316 case 32: 1312 1317 fix->type = FB_TYPE_PACKED_PIXELS; 1313 - fix->visual = FB_VISUAL_TRUECOLOR; 1318 + fix->visual = FB_VISUAL_DIRECTCOLOR; 1314 1319 var->red.length = var->green.length = var->blue.length = var->transp.length = 8; 1315 1320 var->blue.offset = 0; 1316 1321 var->green.offset = 8; ··· 1332 1337 info->pseudo_palette = &fb->pseudo_palette; 1333 1338 1334 1339 /* This has to been done !!! */ 1335 - fb_alloc_cmap(&info->cmap, 256, 0); 1340 + fb_alloc_cmap(&info->cmap, NR_PALETTE, 0); 1336 1341 stifb_init_display(fb); 1337 1342 1338 1343 if (!request_mem_region(fix->smem_start, fix->smem_len, "stifb fb")) { ··· 1483 1488 MODULE_AUTHOR("Helge Deller <deller@gmx.de>, Thomas Bogendoerfer <tsbogend@alpha.franken.de>"); 1484 1489 MODULE_DESCRIPTION("Framebuffer driver for HP's NGLE series graphics cards in HP PARISC machines"); 1485 1490 MODULE_LICENSE("GPL v2"); 1486 - 1487 - MODULE_PARM(bpp, "i"); 1488 - MODULE_PARM_DESC(mem, "Bits per pixel (default: 8)"); 1489 -
+4 -4
include/asm-parisc/cache.h
··· 29 29 30 30 #define SMP_CACHE_BYTES L1_CACHE_BYTES 31 31 32 - extern void flush_data_cache_local(void); /* flushes local data-cache only */ 33 - extern void flush_instruction_cache_local(void); /* flushes local code-cache only */ 32 + extern void flush_data_cache_local(void *); /* flushes local data-cache only */ 33 + extern void flush_instruction_cache_local(void *); /* flushes local code-cache only */ 34 34 #ifdef CONFIG_SMP 35 35 extern void flush_data_cache(void); /* flushes data-cache only (all processors) */ 36 36 extern void flush_instruction_cache(void); /* flushes i-cache only (all processors) */ 37 37 #else 38 - #define flush_data_cache flush_data_cache_local 39 - #define flush_instruction_cache flush_instruction_cache_local 38 + #define flush_data_cache() flush_data_cache_local(NULL) 39 + #define flush_instruction_cache() flush_instruction_cache_local(NULL) 40 40 #endif 41 41 42 42 extern void parisc_cache_init(void); /* initializes cache-flushing */
+1 -1
include/asm-parisc/io.h
··· 41 41 #define __raw_check_addr(addr) \ 42 42 if (((unsigned long)addr >> NYBBLE_SHIFT) != 0xe) \ 43 43 __raw_bad_addr(addr); \ 44 - addr = (void *)((unsigned long)addr | (0xfUL << NYBBLE_SHIFT)); 44 + addr = (void __iomem *)((unsigned long)addr | (0xfUL << NYBBLE_SHIFT)); 45 45 #else 46 46 #define gsc_check_addr(addr) 47 47 #define __raw_check_addr(addr)
+7
include/asm-parisc/page.h
··· 135 135 #define pfn_valid(pfn) ((pfn) < max_mapnr) 136 136 #endif /* CONFIG_DISCONTIGMEM */ 137 137 138 + #ifdef CONFIG_HUGETLB_PAGE 139 + #define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */ 140 + #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) 141 + #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 142 + #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 143 + #endif 144 + 138 145 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 139 146 140 147 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+7 -1
include/asm-parisc/pci.h
··· 84 84 /* 85 85 ** Convert between PCI (IO_VIEW) addresses and processor (PA_VIEW) addresses. 86 86 ** See pcibios.c for more conversions used by Generic PCI code. 87 + ** 88 + ** Platform characteristics/firmware guarantee that 89 + ** (1) PA_VIEW - IO_VIEW = lmmio_offset for both LMMIO and ELMMIO 90 + ** (2) PA_VIEW == IO_VIEW for GMMIO 87 91 */ 88 92 #define PCI_BUS_ADDR(hba,a) (PCI_IS_LMMIO(hba,a) \ 89 93 ? ((a) - hba->lmmio_space_offset) /* mangle LMMIO */ \ 90 94 : (a)) /* GMMIO */ 91 - #define PCI_HOST_ADDR(hba,a) ((a) + hba->lmmio_space_offset) 95 + #define PCI_HOST_ADDR(hba,a) (((a) & PCI_F_EXTEND) == 0 \ 96 + ? (a) + hba->lmmio_space_offset \ 97 + : (a)) 92 98 93 99 #else /* !CONFIG_64BIT */ 94 100
+9 -9
include/asm-parisc/processor.h
··· 144 144 }) 145 145 146 146 #define INIT_THREAD { \ 147 - regs: { gr: { 0, }, \ 148 - fr: { 0, }, \ 149 - sr: { 0, }, \ 150 - iasq: { 0, }, \ 151 - iaoq: { 0, }, \ 152 - cr27: 0, \ 147 + .regs = { .gr = { 0, }, \ 148 + .fr = { 0, }, \ 149 + .sr = { 0, }, \ 150 + .iasq = { 0, }, \ 151 + .iaoq = { 0, }, \ 152 + .cr27 = 0, \ 153 153 }, \ 154 - task_size: DEFAULT_TASK_SIZE, \ 155 - map_base: DEFAULT_MAP_BASE, \ 156 - flags: 0 \ 154 + .task_size = DEFAULT_TASK_SIZE, \ 155 + .map_base = DEFAULT_MAP_BASE, \ 156 + .flags = 0 \ 157 157 } 158 158 159 159 /*
+1
include/asm-parisc/tlbflush.h
··· 22 22 #define purge_tlb_end(x) spin_unlock(&pa_tlb_lock) 23 23 24 24 extern void flush_tlb_all(void); 25 + extern void flush_tlb_all_local(void *); 25 26 26 27 /* 27 28 * flush_tlb_mm()
+1 -1
include/linux/cache.h
··· 13 13 #define SMP_CACHE_BYTES L1_CACHE_BYTES 14 14 #endif 15 15 16 - #if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_IA64) 16 + #if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) 17 17 #define __read_mostly __attribute__((__section__(".data.read_mostly"))) 18 18 #else 19 19 #define __read_mostly
+2 -2
sound/oss/harmony.c
··· 1236 1236 } 1237 1237 1238 1238 /* Set the HPA of harmony */ 1239 - harmony.hpa = (struct harmony_hpa *)dev->hpa; 1239 + harmony.hpa = (struct harmony_hpa *)dev->hpa.start; 1240 1240 harmony.dev = dev; 1241 1241 1242 1242 /* Grab the ID and revision from the device */ ··· 1250 1250 1251 1251 printk(KERN_INFO "Lasi Harmony Audio driver " HARMONY_VERSION ", " 1252 1252 "h/w id %i, rev. %i at 0x%lx, IRQ %i\n", 1253 - id, rev, dev->hpa, harmony.dev->irq); 1253 + id, rev, dev->hpa.start, harmony.dev->irq); 1254 1254 1255 1255 /* Make sure the control bit isn't set, although I don't think it 1256 1256 ever is. */