Merge HEAD from oss.sgi.com:/oss/git/linux-2.6.git

+406 -267
+2 -2
Documentation/cpusets.txt
··· 135 The implementation of cpusets requires a few, simple hooks 136 into the rest of the kernel, none in performance critical paths: 137 138 - - in main/init.c, to initialize the root cpuset at system boot. 139 - in fork and exit, to attach and detach a task from its cpuset. 140 - in sched_setaffinity, to mask the requested CPUs by what's 141 allowed in that tasks cpuset. ··· 146 and related changes in both sched.c and arch/ia64/kernel/domain.c 147 - in the mbind and set_mempolicy system calls, to mask the requested 148 Memory Nodes by what's allowed in that tasks cpuset. 149 - - in page_alloc, to restrict memory to allowed nodes. 150 - in vmscan.c, to restrict page recovery to the current cpuset. 151 152 In addition a new file system, of type "cpuset" may be mounted,
··· 135 The implementation of cpusets requires a few, simple hooks 136 into the rest of the kernel, none in performance critical paths: 137 138 + - in init/main.c, to initialize the root cpuset at system boot. 139 - in fork and exit, to attach and detach a task from its cpuset. 140 - in sched_setaffinity, to mask the requested CPUs by what's 141 allowed in that tasks cpuset. ··· 146 and related changes in both sched.c and arch/ia64/kernel/domain.c 147 - in the mbind and set_mempolicy system calls, to mask the requested 148 Memory Nodes by what's allowed in that tasks cpuset. 149 + - in page_alloc.c, to restrict memory to allowed nodes. 150 - in vmscan.c, to restrict page recovery to the current cpuset. 151 152 In addition a new file system, of type "cpuset" may be mounted,
+3 -3
Documentation/laptop-mode.txt
··· 3 4 Document Author: Bart Samwel (bart@samwel.tk) 5 Date created: January 2, 2004 6 - Last modified: July 10, 2004 7 8 Introduction 9 ------------ ··· 33 laptop mode will automatically be started when you're on battery. For 34 your convenience, a tarball containing an installer can be downloaded at: 35 36 - http://www.xs4all.nl/~bsamwel/laptop_mode/tools 37 38 To configure laptop mode, you need to edit the configuration file, which is 39 located in /etc/default/laptop-mode on Debian-based systems, or in ··· 912 exit(0); 913 } 914 915 - int main(int ac, char **av) 916 { 917 int fd; 918 char *disk = 0;
··· 3 4 Document Author: Bart Samwel (bart@samwel.tk) 5 Date created: January 2, 2004 6 + Last modified: December 06, 2004 7 8 Introduction 9 ------------ ··· 33 laptop mode will automatically be started when you're on battery. For 34 your convenience, a tarball containing an installer can be downloaded at: 35 36 + http://www.xs4all.nl/~bsamwel/laptop_mode/tools/ 37 38 To configure laptop mode, you need to edit the configuration file, which is 39 located in /etc/default/laptop-mode on Debian-based systems, or in ··· 912 exit(0); 913 } 914 915 + int main(int argc, char **argv) 916 { 917 int fd; 918 char *disk = 0;
+1 -1
MAINTAINERS
··· 2487 M: lethal@linux-sh.org 2488 P: Kazumoto Kojima 2489 M: kkojima@rr.iij4u.or.jp 2490 - L: linux-sh@m17n.org 2491 W: http://www.linux-sh.org 2492 W: http://www.m17n.org/linux-sh/ 2493 W: http://www.rr.iij4u.or.jp/~kkojima/linux-sh4.html
··· 2487 M: lethal@linux-sh.org 2488 P: Kazumoto Kojima 2489 M: kkojima@rr.iij4u.or.jp 2490 + L: linuxsh-dev@lists.sourceforge.net 2491 W: http://www.linux-sh.org 2492 W: http://www.m17n.org/linux-sh/ 2493 W: http://www.rr.iij4u.or.jp/~kkojima/linux-sh4.html
+1 -1
arch/i386/boot/Makefile
··· 101 if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi 102 103 install: 104 - sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)"
··· 101 if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi 102 103 install: 104 + sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
+1 -1
arch/i386/mm/pageattr.c
··· 224 return; 225 if (!enable) 226 mutex_debug_check_no_locks_freed(page_address(page), 227 - page_address(page+numpages)); 228 229 /* the return value is ignored - the calls cannot fail, 230 * large pages are disabled at boot time.
··· 224 return; 225 if (!enable) 226 mutex_debug_check_no_locks_freed(page_address(page), 227 + numpages * PAGE_SIZE); 228 229 /* the return value is ignored - the calls cannot fail, 230 * large pages are disabled at boot time.
+10 -10
arch/parisc/kernel/cache.c
··· 29 #include <asm/processor.h> 30 #include <asm/sections.h> 31 32 - int split_tlb; 33 - int dcache_stride; 34 - int icache_stride; 35 EXPORT_SYMBOL(dcache_stride); 36 37 ··· 45 EXPORT_SYMBOL(pa_tlb_lock); 46 #endif 47 48 - struct pdc_cache_info cache_info; 49 #ifndef CONFIG_PA20 50 - static struct pdc_btlb_info btlb_info; 51 #endif 52 53 #ifdef CONFIG_SMP 54 void 55 flush_data_cache(void) 56 { 57 - on_each_cpu((void (*)(void *))flush_data_cache_local, NULL, 1, 1); 58 } 59 void 60 flush_instruction_cache(void) 61 { 62 - on_each_cpu((void (*)(void *))flush_instruction_cache_local, NULL, 1, 1); 63 } 64 #endif 65 66 void 67 flush_cache_all_local(void) 68 { 69 - flush_instruction_cache_local(); 70 - flush_data_cache_local(); 71 } 72 EXPORT_SYMBOL(flush_cache_all_local); 73 ··· 332 } 333 334 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ 335 - int parisc_cache_flush_threshold = FLUSH_THRESHOLD; 336 337 void parisc_setup_cache_timing(void) 338 {
··· 29 #include <asm/processor.h> 30 #include <asm/sections.h> 31 32 + int split_tlb __read_mostly; 33 + int dcache_stride __read_mostly; 34 + int icache_stride __read_mostly; 35 EXPORT_SYMBOL(dcache_stride); 36 37 ··· 45 EXPORT_SYMBOL(pa_tlb_lock); 46 #endif 47 48 + struct pdc_cache_info cache_info __read_mostly; 49 #ifndef CONFIG_PA20 50 + static struct pdc_btlb_info btlb_info __read_mostly; 51 #endif 52 53 #ifdef CONFIG_SMP 54 void 55 flush_data_cache(void) 56 { 57 + on_each_cpu(flush_data_cache_local, NULL, 1, 1); 58 } 59 void 60 flush_instruction_cache(void) 61 { 62 + on_each_cpu(flush_instruction_cache_local, NULL, 1, 1); 63 } 64 #endif 65 66 void 67 flush_cache_all_local(void) 68 { 69 + flush_instruction_cache_local(NULL); 70 + flush_data_cache_local(NULL); 71 } 72 EXPORT_SYMBOL(flush_cache_all_local); 73 ··· 332 } 333 334 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ 335 + int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; 336 337 void parisc_setup_cache_timing(void) 338 {
+9 -4
arch/parisc/kernel/drivers.c
··· 39 #include <asm/parisc-device.h> 40 41 /* See comments in include/asm-parisc/pci.h */ 42 - struct hppa_dma_ops *hppa_dma_ops; 43 EXPORT_SYMBOL(hppa_dma_ops); 44 45 static struct device root = { ··· 515 (iodc_data[5] << 8) | iodc_data[6]; 516 dev->hpa.name = parisc_pathname(dev); 517 dev->hpa.start = hpa; 518 - if (hpa == 0xf4000000 || hpa == 0xf6000000 || 519 - hpa == 0xf8000000 || hpa == 0xfa000000) { 520 dev->hpa.end = hpa + 0x01ffffff; 521 } else { 522 dev->hpa.end = hpa + 0xfff; ··· 839 840 if (dev->num_addrs) { 841 int k; 842 - printk(", additional addresses: "); 843 for (k = 0; k < dev->num_addrs; k++) 844 printk("0x%lx ", dev->addr[k]); 845 }
··· 39 #include <asm/parisc-device.h> 40 41 /* See comments in include/asm-parisc/pci.h */ 42 + struct hppa_dma_ops *hppa_dma_ops __read_mostly; 43 EXPORT_SYMBOL(hppa_dma_ops); 44 45 static struct device root = { ··· 515 (iodc_data[5] << 8) | iodc_data[6]; 516 dev->hpa.name = parisc_pathname(dev); 517 dev->hpa.start = hpa; 518 + /* This is awkward. The STI spec says that gfx devices may occupy 519 + * 32MB or 64MB. Unfortunately, we don't know how to tell whether 520 + * it's the former or the latter. Assumptions either way can hurt us. 521 + */ 522 + if (hpa == 0xf4000000 || hpa == 0xf8000000) { 523 + dev->hpa.end = hpa + 0x03ffffff; 524 + } else if (hpa == 0xf6000000 || hpa == 0xfa000000) { 525 dev->hpa.end = hpa + 0x01ffffff; 526 } else { 527 dev->hpa.end = hpa + 0xfff; ··· 834 835 if (dev->num_addrs) { 836 int k; 837 + printk(", additional addresses: "); 838 for (k = 0; k < dev->num_addrs; k++) 839 printk("0x%lx ", dev->addr[k]); 840 }
+1 -1
arch/parisc/kernel/firmware.c
··· 80 81 /* Firmware needs to be initially set to narrow to determine the 82 * actual firmware width. */ 83 - int parisc_narrow_firmware = 1; 84 #endif 85 86 /* On most currently-supported platforms, IODC I/O calls are 32-bit calls
··· 80 81 /* Firmware needs to be initially set to narrow to determine the 82 * actual firmware width. */ 83 + int parisc_narrow_firmware __read_mostly = 1; 84 #endif 85 86 /* On most currently-supported platforms, IODC I/O calls are 32-bit calls
+1
arch/parisc/kernel/hardware.c
··· 551 {HPHW_BCPORT, 0x804, 0x0000C, 0x10, "REO I/O BC Merced Port"}, 552 {HPHW_BCPORT, 0x782, 0x0000C, 0x00, "REO I/O BC Ropes Port"}, 553 {HPHW_BCPORT, 0x784, 0x0000C, 0x00, "Pluto I/O BC Ropes Port"}, 554 {HPHW_BRIDGE, 0x680, 0x0000A, 0x00, "Dino PCI Bridge"}, 555 {HPHW_BRIDGE, 0x682, 0x0000A, 0x00, "Cujo PCI Bridge"}, 556 {HPHW_BRIDGE, 0x782, 0x0000A, 0x00, "Elroy PCI Bridge"},
··· 551 {HPHW_BCPORT, 0x804, 0x0000C, 0x10, "REO I/O BC Merced Port"}, 552 {HPHW_BCPORT, 0x782, 0x0000C, 0x00, "REO I/O BC Ropes Port"}, 553 {HPHW_BCPORT, 0x784, 0x0000C, 0x00, "Pluto I/O BC Ropes Port"}, 554 + {HPHW_BRIDGE, 0x05D, 0x0000A, 0x00, "SummitHawk Dino PCI Bridge"}, 555 {HPHW_BRIDGE, 0x680, 0x0000A, 0x00, "Dino PCI Bridge"}, 556 {HPHW_BRIDGE, 0x682, 0x0000A, 0x00, "Cujo PCI Bridge"}, 557 {HPHW_BRIDGE, 0x782, 0x0000A, 0x00, "Elroy PCI Bridge"},
+3 -3
arch/parisc/kernel/inventory.c
··· 38 */ 39 #undef DEBUG_PAT 40 41 - int pdc_type = PDC_TYPE_ILLEGAL; 42 43 void __init setup_pdc(void) 44 { ··· 120 * pdc info is bad in this case). 121 */ 122 123 - if ( ((start & (PAGE_SIZE - 1)) != 0) 124 - || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) ) { 125 126 panic("Memory range doesn't align with page size!\n"); 127 }
··· 38 */ 39 #undef DEBUG_PAT 40 41 + int pdc_type __read_mostly = PDC_TYPE_ILLEGAL; 42 43 void __init setup_pdc(void) 44 { ··· 120 * pdc info is bad in this case). 121 */ 122 123 + if (unlikely( ((start & (PAGE_SIZE - 1)) != 0) 124 + || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) { 125 126 panic("Memory range doesn't align with page size!\n"); 127 }
+3 -3
arch/parisc/kernel/pci-dma.c
··· 33 #include <asm/uaccess.h> 34 #include <asm/tlbflush.h> /* for purge_tlb_*() macros */ 35 36 - static struct proc_dir_entry * proc_gsc_root = NULL; 37 static int pcxl_proc_info(char *buffer, char **start, off_t offset, int length); 38 - static unsigned long pcxl_used_bytes = 0; 39 - static unsigned long pcxl_used_pages = 0; 40 41 extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */ 42 static spinlock_t pcxl_res_lock;
··· 33 #include <asm/uaccess.h> 34 #include <asm/tlbflush.h> /* for purge_tlb_*() macros */ 35 36 + static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; 37 static int pcxl_proc_info(char *buffer, char **start, off_t offset, int length); 38 + static unsigned long pcxl_used_bytes __read_mostly = 0; 39 + static unsigned long pcxl_used_pages __read_mostly = 0; 40 41 extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */ 42 static spinlock_t pcxl_res_lock;
+7 -6
arch/parisc/kernel/pdc_chassis.c
··· 30 #include <linux/kernel.h> 31 #include <linux/reboot.h> 32 #include <linux/notifier.h> 33 34 #include <asm/pdc_chassis.h> 35 #include <asm/processor.h> ··· 39 40 41 #ifdef CONFIG_PDC_CHASSIS 42 - static int pdc_chassis_old = 0; 43 - static unsigned int pdc_chassis_enabled = 1; 44 45 46 /** ··· 133 { 134 #ifdef CONFIG_PDC_CHASSIS 135 int handle = 0; 136 - if (pdc_chassis_enabled) { 137 DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__); 138 139 /* Let see if we have something to handle... */ ··· 143 printk(KERN_INFO "Enabling PDC_PAT chassis codes support.\n"); 144 handle = 1; 145 } 146 - else if (pdc_chassis_old) { 147 printk(KERN_INFO "Enabling old style chassis LED panel support.\n"); 148 handle = 1; 149 } ··· 179 /* Maybe we should do that in an other way ? */ 180 int retval = 0; 181 #ifdef CONFIG_PDC_CHASSIS 182 - if (pdc_chassis_enabled) { 183 184 DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message); 185 ··· 215 } 216 } else retval = -1; 217 #else 218 - if (pdc_chassis_old) { 219 switch (message) { 220 case PDC_CHASSIS_DIRECT_BSTART: 221 case PDC_CHASSIS_DIRECT_BCOMPLETE:
··· 30 #include <linux/kernel.h> 31 #include <linux/reboot.h> 32 #include <linux/notifier.h> 33 + #include <linux/cache.h> 34 35 #include <asm/pdc_chassis.h> 36 #include <asm/processor.h> ··· 38 39 40 #ifdef CONFIG_PDC_CHASSIS 41 + static int pdc_chassis_old __read_mostly = 0; 42 + static unsigned int pdc_chassis_enabled __read_mostly = 1; 43 44 45 /** ··· 132 { 133 #ifdef CONFIG_PDC_CHASSIS 134 int handle = 0; 135 + if (likely(pdc_chassis_enabled)) { 136 DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__); 137 138 /* Let see if we have something to handle... */ ··· 142 printk(KERN_INFO "Enabling PDC_PAT chassis codes support.\n"); 143 handle = 1; 144 } 145 + else if (unlikely(pdc_chassis_old)) { 146 printk(KERN_INFO "Enabling old style chassis LED panel support.\n"); 147 handle = 1; 148 } ··· 178 /* Maybe we should do that in an other way ? */ 179 int retval = 0; 180 #ifdef CONFIG_PDC_CHASSIS 181 + if (likely(pdc_chassis_enabled)) { 182 183 DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message); 184 ··· 214 } 215 } else retval = -1; 216 #else 217 + if (unlikely(pdc_chassis_old)) { 218 switch (message) { 219 case PDC_CHASSIS_DIRECT_BSTART: 220 case PDC_CHASSIS_DIRECT_BCOMPLETE:
+3 -3
arch/parisc/kernel/perf.c
··· 66 uint8_t write_control; 67 }; 68 69 - static int perf_processor_interface = UNKNOWN_INTF; 70 - static int perf_enabled = 0; 71 static spinlock_t perf_lock; 72 - struct parisc_device *cpu_device = NULL; 73 74 /* RDRs to write for PCX-W */ 75 static int perf_rdrs_W[] =
··· 66 uint8_t write_control; 67 }; 68 69 + static int perf_processor_interface __read_mostly = UNKNOWN_INTF; 70 + static int perf_enabled __read_mostly = 0; 71 static spinlock_t perf_lock; 72 + struct parisc_device *cpu_device __read_mostly = NULL; 73 74 /* RDRs to write for PCX-W */ 75 static int perf_rdrs_W[] =
+1 -1
arch/parisc/kernel/process.c
··· 54 #include <asm/uaccess.h> 55 #include <asm/unwind.h> 56 57 - static int hlt_counter; 58 59 /* 60 * Power off function, if any
··· 54 #include <asm/uaccess.h> 55 #include <asm/unwind.h> 56 57 + static int hlt_counter __read_mostly; 58 59 /* 60 * Power off function, if any
+4 -4
arch/parisc/kernel/processor.c
··· 44 #include <asm/irq.h> /* for struct irq_region */ 45 #include <asm/parisc-device.h> 46 47 - struct system_cpuinfo_parisc boot_cpu_data; 48 EXPORT_SYMBOL(boot_cpu_data); 49 50 - struct cpuinfo_parisc cpu_data[NR_CPUS]; 51 52 /* 53 ** PARISC CPU driver - claim "device" and initialize CPU data structures. ··· 378 return 0; 379 } 380 381 - static struct parisc_device_id processor_tbl[] = { 382 { HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID }, 383 { 0, } 384 }; 385 386 - static struct parisc_driver cpu_driver = { 387 .name = "CPU", 388 .id_table = processor_tbl, 389 .probe = processor_probe
··· 44 #include <asm/irq.h> /* for struct irq_region */ 45 #include <asm/parisc-device.h> 46 47 + struct system_cpuinfo_parisc boot_cpu_data __read_mostly; 48 EXPORT_SYMBOL(boot_cpu_data); 49 50 + struct cpuinfo_parisc cpu_data[NR_CPUS] __read_mostly; 51 52 /* 53 ** PARISC CPU driver - claim "device" and initialize CPU data structures. ··· 378 return 0; 379 } 380 381 + static struct parisc_device_id processor_tbl[] __read_mostly = { 382 { HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID }, 383 { 0, } 384 }; 385 386 + static struct parisc_driver cpu_driver __read_mostly = { 387 .name = "CPU", 388 .id_table = processor_tbl, 389 .probe = processor_probe
+5 -5
arch/parisc/kernel/setup.c
··· 46 #include <asm/io.h> 47 #include <asm/setup.h> 48 49 - char command_line[COMMAND_LINE_SIZE]; 50 51 /* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */ 52 - struct proc_dir_entry * proc_runway_root = NULL; 53 - struct proc_dir_entry * proc_gsc_root = NULL; 54 - struct proc_dir_entry * proc_mckinley_root = NULL; 55 56 #if !defined(CONFIG_PA20) && (defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA)) 57 - int parisc_bus_is_phys = 1; /* Assume no IOMMU is present */ 58 EXPORT_SYMBOL(parisc_bus_is_phys); 59 #endif 60
··· 46 #include <asm/io.h> 47 #include <asm/setup.h> 48 49 + char command_line[COMMAND_LINE_SIZE] __read_mostly; 50 51 /* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */ 52 + struct proc_dir_entry * proc_runway_root __read_mostly = NULL; 53 + struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; 54 + struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL; 55 56 #if !defined(CONFIG_PA20) && (defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA)) 57 + int parisc_bus_is_phys __read_mostly = 1; /* Assume no IOMMU is present */ 58 EXPORT_SYMBOL(parisc_bus_is_phys); 59 #endif 60
+7 -9
arch/parisc/kernel/smp.c
··· 39 #include <asm/atomic.h> 40 #include <asm/current.h> 41 #include <asm/delay.h> 42 - #include <asm/pgalloc.h> /* for flush_tlb_all() proto/macro */ 43 44 #include <asm/io.h> 45 #include <asm/irq.h> /* for CPU_IRQ_REGION and friends */ ··· 58 59 volatile struct task_struct *smp_init_current_idle_task; 60 61 - static volatile int cpu_now_booting = 0; /* track which CPU is booting */ 62 63 - static int parisc_max_cpus = 1; 64 65 /* online cpus are ones that we've managed to bring up completely 66 * possible cpus are all valid cpu ··· 71 * empty in the beginning. 72 */ 73 74 - cpumask_t cpu_online_map = CPU_MASK_NONE; /* Bitmap of online CPUs */ 75 - cpumask_t cpu_possible_map = CPU_MASK_ALL; /* Bitmap of Present CPUs */ 76 77 EXPORT_SYMBOL(cpu_online_map); 78 EXPORT_SYMBOL(cpu_possible_map); ··· 406 * as we want to ensure all TLB's flushed before proceeding. 407 */ 408 409 - extern void flush_tlb_all_local(void); 410 - 411 void 412 smp_flush_tlb_all(void) 413 { 414 - on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); 415 } 416 417 ··· 485 #endif 486 487 flush_cache_all_local(); /* start with known state */ 488 - flush_tlb_all_local(); 489 490 local_irq_enable(); /* Interrupts have been off until now */ 491
··· 39 #include <asm/atomic.h> 40 #include <asm/current.h> 41 #include <asm/delay.h> 42 + #include <asm/tlbflush.h> 43 44 #include <asm/io.h> 45 #include <asm/irq.h> /* for CPU_IRQ_REGION and friends */ ··· 58 59 volatile struct task_struct *smp_init_current_idle_task; 60 61 + static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is booting */ 62 63 + static int parisc_max_cpus __read_mostly = 1; 64 65 /* online cpus are ones that we've managed to bring up completely 66 * possible cpus are all valid cpu ··· 71 * empty in the beginning. 72 */ 73 74 + cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; /* Bitmap of online CPUs */ 75 + cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; /* Bitmap of Present CPUs */ 76 77 EXPORT_SYMBOL(cpu_online_map); 78 EXPORT_SYMBOL(cpu_possible_map); ··· 406 * as we want to ensure all TLB's flushed before proceeding. 407 */ 408 409 void 410 smp_flush_tlb_all(void) 411 { 412 + on_each_cpu(flush_tlb_all_local, NULL, 1, 1); 413 } 414 415 ··· 487 #endif 488 489 flush_cache_all_local(); /* start with known state */ 490 + flush_tlb_all_local(NULL); 491 492 local_irq_enable(); /* Interrupts have been off until now */ 493
+2 -2
arch/parisc/kernel/time.c
··· 36 /* xtime and wall_jiffies keep wall-clock time */ 37 extern unsigned long wall_jiffies; 38 39 - static long clocktick; /* timer cycles per tick */ 40 - static long halftick; 41 42 #ifdef CONFIG_SMP 43 extern void smp_do_timer(struct pt_regs *regs);
··· 36 /* xtime and wall_jiffies keep wall-clock time */ 37 extern unsigned long wall_jiffies; 38 39 + static long clocktick __read_mostly; /* timer cycles per tick */ 40 + static long halftick __read_mostly; 41 42 #ifdef CONFIG_SMP 43 extern void smp_do_timer(struct pt_regs *regs);
+2 -1
arch/parisc/kernel/topology.c
··· 20 #include <linux/init.h> 21 #include <linux/smp.h> 22 #include <linux/cpu.h> 23 24 - static struct cpu cpu_devices[NR_CPUS]; 25 26 static int __init topology_init(void) 27 {
··· 20 #include <linux/init.h> 21 #include <linux/smp.h> 22 #include <linux/cpu.h> 23 + #include <linux/cache.h> 24 25 + static struct cpu cpu_devices[NR_CPUS] __read_mostly; 26 27 static int __init topology_init(void) 28 {
+1 -1
arch/parisc/kernel/unaligned.c
··· 122 #define ERR_NOTHANDLED -1 123 #define ERR_PAGEFAULT -2 124 125 - int unaligned_enabled = 1; 126 127 void die_if_kernel (char *str, struct pt_regs *regs, long err); 128
··· 122 #define ERR_NOTHANDLED -1 123 #define ERR_PAGEFAULT -2 124 125 + int unaligned_enabled __read_mostly = 1; 126 127 void die_if_kernel (char *str, struct pt_regs *regs, long err); 128
+1 -1
arch/parisc/kernel/unwind.c
··· 35 * we can call unwind_init as early in the bootup process as 36 * possible (before the slab allocator is initialized) 37 */ 38 - static struct unwind_table kernel_unwind_table; 39 static LIST_HEAD(unwind_tables); 40 41 static inline const struct unwind_table_entry *
··· 35 * we can call unwind_init as early in the bootup process as 36 * possible (before the slab allocator is initialized) 37 */ 38 + static struct unwind_table kernel_unwind_table __read_mostly; 39 static LIST_HEAD(unwind_tables); 40 41 static inline const struct unwind_table_entry *
+6 -9
arch/parisc/kernel/vmlinux.lds.S
··· 68 RODATA 69 70 /* writeable */ 71 - . = ALIGN(4096); /* Make sure this is paged aligned so 72 that we can properly leave these 73 as writable */ 74 data_start = .; ··· 104 /* PA-RISC locks requires 16-byte alignment */ 105 . = ALIGN(16); 106 .data.lock_aligned : { *(.data.lock_aligned) } 107 108 _edata = .; /* End of data section */ 109 ··· 198 #endif 199 } 200 201 - /* Stabs debugging sections. */ 202 - .stab 0 : { *(.stab) } 203 - .stabstr 0 : { *(.stabstr) } 204 - .stab.excl 0 : { *(.stab.excl) } 205 - .stab.exclstr 0 : { *(.stab.exclstr) } 206 - .stab.index 0 : { *(.stab.index) } 207 - .stab.indexstr 0 : { *(.stab.indexstr) } 208 - .comment 0 : { *(.comment) } 209 .note 0 : { *(.note) } 210 211 }
··· 68 RODATA 69 70 /* writeable */ 71 + . = ALIGN(4096); /* Make sure this is page aligned so 72 that we can properly leave these 73 as writable */ 74 data_start = .; ··· 104 /* PA-RISC locks requires 16-byte alignment */ 105 . = ALIGN(16); 106 .data.lock_aligned : { *(.data.lock_aligned) } 107 + 108 + /* rarely changed data like cpu maps */ 109 + . = ALIGN(16); 110 + .data.read_mostly : { *(.data.read_mostly) } 111 112 _edata = .; /* End of data section */ 113 ··· 194 #endif 195 } 196 197 + STABS_DEBUG 198 .note 0 : { *(.note) } 199 200 }
+18 -11
arch/parisc/mm/init.c
··· 36 extern char __init_begin, __init_end; 37 38 #ifdef CONFIG_DISCONTIGMEM 39 - struct node_map_data node_data[MAX_NUMNODES]; 40 - bootmem_data_t bmem_data[MAX_NUMNODES]; 41 - unsigned char pfnnid_map[PFNNID_MAP_MAX]; 42 #endif 43 44 static struct resource data_resource = { ··· 58 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 59 }; 60 61 - static struct resource sysram_resources[MAX_PHYSMEM_RANGES]; 62 63 /* The following array is initialized from the firmware specific 64 * information retrieved in kernel/inventory.c. 65 */ 66 67 - physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES]; 68 - int npmem_ranges; 69 70 #ifdef __LP64__ 71 #define MAX_MEM (~0UL) ··· 73 #define MAX_MEM (3584U*1024U*1024U) 74 #endif /* !__LP64__ */ 75 76 - static unsigned long mem_limit = MAX_MEM; 77 78 static void __init mem_limit_func(void) 79 { ··· 300 max_pfn = start_pfn + npages; 301 } 302 303 if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) { 304 printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n"); 305 BUG(); ··· 438 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ 439 & ~(VM_MAP_OFFSET-1))) 440 441 - void *vmalloc_start; 442 EXPORT_SYMBOL(vmalloc_start); 443 444 #ifdef CONFIG_PA11 445 - unsigned long pcxl_dma_start; 446 #endif 447 448 void __init mem_init(void) ··· 482 return 0; 483 } 484 485 - unsigned long *empty_zero_page; 486 487 void show_mem(void) 488 { ··· 1005 void flush_tlb_all(void) 1006 { 1007 spin_lock(&sid_lock); 1008 - flush_tlb_all_local(); 1009 recycle_sids(); 1010 spin_unlock(&sid_lock); 1011 }
··· 36 extern char __init_begin, __init_end; 37 38 #ifdef CONFIG_DISCONTIGMEM 39 + struct node_map_data node_data[MAX_NUMNODES] __read_mostly; 40 + bootmem_data_t bmem_data[MAX_NUMNODES] __read_mostly; 41 + unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; 42 #endif 43 44 static struct resource data_resource = { ··· 58 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 59 }; 60 61 + static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; 62 63 /* The following array is initialized from the firmware specific 64 * information retrieved in kernel/inventory.c. 65 */ 66 67 + physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; 68 + int npmem_ranges __read_mostly; 69 70 #ifdef __LP64__ 71 #define MAX_MEM (~0UL) ··· 73 #define MAX_MEM (3584U*1024U*1024U) 74 #endif /* !__LP64__ */ 75 76 + static unsigned long mem_limit __read_mostly = MAX_MEM; 77 78 static void __init mem_limit_func(void) 79 { ··· 300 max_pfn = start_pfn + npages; 301 } 302 303 + /* IOMMU is always used to access "high mem" on those boxes 304 + * that can support enough mem that a PCI device couldn't 305 + * directly DMA to any physical addresses. 306 + * ISA DMA support will need to revisit this. 307 + */ 308 + max_low_pfn = max_pfn; 309 + 310 if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) { 311 printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n"); 312 BUG(); ··· 431 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ 432 & ~(VM_MAP_OFFSET-1))) 433 434 + void *vmalloc_start __read_mostly; 435 EXPORT_SYMBOL(vmalloc_start); 436 437 #ifdef CONFIG_PA11 438 + unsigned long pcxl_dma_start __read_mostly; 439 #endif 440 441 void __init mem_init(void) ··· 475 return 0; 476 } 477 478 + unsigned long *empty_zero_page __read_mostly; 479 480 void show_mem(void) 481 { ··· 998 void flush_tlb_all(void) 999 { 1000 spin_lock(&sid_lock); 1001 + flush_tlb_all_local(NULL); 1002 recycle_sids(); 1003 spin_unlock(&sid_lock); 1004 }
+63 -37
arch/parisc/mm/ioremap.c
··· 1 /* 2 * arch/parisc/mm/ioremap.c 3 * 4 - * Re-map IO memory to kernel address space so that we can access it. 5 - * This is needed for high PCI addresses that aren't mapped in the 6 - * 640k-1MB IO memory area on PC's 7 - * 8 * (C) Copyright 1995 1996 Linus Torvalds 9 * (C) Copyright 2001 Helge Deller <deller@gmx.de> 10 */ 11 12 #include <linux/vmalloc.h> ··· 11 #include <linux/module.h> 12 #include <asm/io.h> 13 #include <asm/pgalloc.h> 14 15 - static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, 16 - unsigned long phys_addr, unsigned long flags) 17 { 18 - unsigned long end; 19 20 address &= ~PMD_MASK; 21 end = address + size; 22 if (end > PMD_SIZE) 23 end = PMD_SIZE; 24 - if (address >= end) 25 - BUG(); 26 do { 27 - if (!pte_none(*pte)) { 28 - printk(KERN_ERR "remap_area_pte: page already exists\n"); 29 - BUG(); 30 - } 31 - set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW | 32 - _PAGE_DIRTY | _PAGE_ACCESSED | flags))); 33 address += PAGE_SIZE; 34 - phys_addr += PAGE_SIZE; 35 pte++; 36 } while (address && (address < end)); 37 } 38 39 - static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, 40 - unsigned long phys_addr, unsigned long flags) 41 { 42 unsigned long end; 43 44 address &= ~PGDIR_MASK; 45 end = address + size; 46 if (end > PGDIR_SIZE) 47 end = PGDIR_SIZE; 48 phys_addr -= address; 49 - if (address >= end) 50 - BUG(); 51 do { 52 - pte_t * pte = pte_alloc_kernel(pmd, address); 53 if (!pte) 54 return -ENOMEM; 55 - remap_area_pte(pte, address, end - address, address + phys_addr, flags); 56 address = (address + PMD_SIZE) & PMD_MASK; 57 pmd++; 58 } while (address && (address < end)); 59 return 0; 60 } 61 62 - #if (USE_HPPA_IOREMAP) 63 - static int remap_area_pages(unsigned long address, unsigned long phys_addr, 64 - unsigned long size, unsigned long flags) 65 { 66 - int error; 67 - pgd_t * dir; 68 unsigned long end = address + size; 69 70 phys_addr -= address; 71 - dir = pgd_offset(&init_mm, address); 72 flush_cache_all(); 73 - if (address >= end) 74 - BUG(); 75 do { 76 pmd_t *pmd; 77 - pmd = pmd_alloc(&init_mm, dir, address); 78 error = -ENOMEM; 79 if (!pmd) 80 break; 81 if (remap_area_pmd(pmd, address, end - address, 82 - phys_addr + address, flags)) 83 break; 84 error = 0; 85 address = (address + PGDIR_SIZE) & PGDIR_MASK; 86 dir++; 87 } while (address && (address < end)); 88 flush_tlb_all(); 89 return error; 90 } 91 #endif /* USE_HPPA_IOREMAP */ ··· 146 147 /* 148 * Remap an arbitrary physical address space into the kernel virtual 149 - * address space. Needed when the kernel wants to access high addresses 150 - * directly. 151 * 152 * NOTE! We need to allow non-page-aligned mappings too: we will obviously 153 * have to convert them into an offset in a page-aligned mapping, but the ··· 170 #endif 171 172 #else 173 - void * addr; 174 - struct vm_struct * area; 175 unsigned long offset, last_addr; 176 177 /* Don't allow wraparound or zero size */ ··· 189 t_addr = __va(phys_addr); 190 t_end = t_addr + (size - 1); 191 192 - for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) 193 if(!PageReserved(page)) 194 return NULL; 195 } 196 197 /* ··· 209 area = get_vm_area(size, VM_IOREMAP); 210 if (!area) 211 return NULL; 212 addr = area->addr; 213 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { 214 vfree(addr); 215 return NULL; 216 } 217 return (void __iomem *) (offset + (char *)addr); 218 #endif 219 }
··· 1 /* 2 * arch/parisc/mm/ioremap.c 3 * 4 * (C) Copyright 1995 1996 Linus Torvalds 5 * (C) Copyright 2001 Helge Deller <deller@gmx.de> 6 + * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org> 7 */ 8 9 #include <linux/vmalloc.h> ··· 14 #include <linux/module.h> 15 #include <asm/io.h> 16 #include <asm/pgalloc.h> 17 + #include <asm/tlbflush.h> 18 + #include <asm/cacheflush.h> 19 20 + static inline void 21 + remap_area_pte(pte_t *pte, unsigned long address, unsigned long size, 22 + unsigned long phys_addr, unsigned long flags) 23 { 24 + unsigned long end, pfn; 25 + pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | 26 + _PAGE_ACCESSED | flags); 27 28 address &= ~PMD_MASK; 29 + 30 end = address + size; 31 if (end > PMD_SIZE) 32 end = PMD_SIZE; 33 + 34 + BUG_ON(address >= end); 35 + 36 + pfn = phys_addr >> PAGE_SHIFT; 37 do { 38 + BUG_ON(!pte_none(*pte)); 39 + 40 + set_pte(pte, pfn_pte(pfn, pgprot)); 41 + 42 address += PAGE_SIZE; 43 + pfn++; 44 pte++; 45 } while (address && (address < end)); 46 } 47 48 + static inline int 49 + remap_area_pmd(pmd_t *pmd, unsigned long address, unsigned long size, 50 + unsigned long phys_addr, unsigned long flags) 51 { 52 unsigned long end; 53 54 address &= ~PGDIR_MASK; 55 + 56 end = address + size; 57 if (end > PGDIR_SIZE) 58 end = PGDIR_SIZE; 59 + 60 + BUG_ON(address >= end); 61 + 62 phys_addr -= address; 63 do { 64 + pte_t *pte = pte_alloc_kernel(pmd, address); 65 if (!pte) 66 return -ENOMEM; 67 + 68 + remap_area_pte(pte, address, end - address, 69 + address + phys_addr, flags); 70 + 71 address = (address + PMD_SIZE) & PMD_MASK; 72 pmd++; 73 } while (address && (address < end)); 74 + 75 return 0; 76 } 77 78 + #if USE_HPPA_IOREMAP 79 + static int 80 + remap_area_pages(unsigned long address, unsigned long phys_addr, 81 + unsigned long size, unsigned long flags) 82 { 83 + pgd_t *dir; 84 + int error = 0; 85 unsigned long end = address + size; 86 87 + BUG_ON(address >= end); 88 + 89 phys_addr -= address; 90 + dir = pgd_offset_k(address); 91 + 92 flush_cache_all(); 93 + 94 do { 95 + pud_t *pud; 96 pmd_t *pmd; 97 + 98 error = -ENOMEM; 99 + pud = pud_alloc(&init_mm, dir, address); 100 + if (!pud) 101 + break; 102 + 103 + pmd = pmd_alloc(&init_mm, pud, address); 104 if (!pmd) 105 break; 106 + 107 if (remap_area_pmd(pmd, address, end - address, 108 + phys_addr + address, flags)) 109 break; 110 + 111 error = 0; 112 address = (address + PGDIR_SIZE) & PGDIR_MASK; 113 dir++; 114 } while (address && (address < end)); 115 + 116 flush_tlb_all(); 117 + 118 return error; 119 } 120 #endif /* USE_HPPA_IOREMAP */ ··· 123 124 /* 125 * Remap an arbitrary physical address space into the kernel virtual 126 + * address space. 127 * 128 * NOTE! We need to allow non-page-aligned mappings too: we will obviously 129 * have to convert them into an offset in a page-aligned mapping, but the ··· 148 #endif 149 150 #else 151 + void *addr; 152 + struct vm_struct *area; 153 unsigned long offset, last_addr; 154 155 /* Don't allow wraparound or zero size */ ··· 167 t_addr = __va(phys_addr); 168 t_end = t_addr + (size - 1); 169 170 + for (page = virt_to_page(t_addr); 171 + page <= virt_to_page(t_end); page++) { 172 if(!PageReserved(page)) 173 return NULL; 174 + } 175 } 176 177 /* ··· 185 area = get_vm_area(size, VM_IOREMAP); 186 if (!area) 187 return NULL; 188 + 189 addr = area->addr; 190 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { 191 vfree(addr); 192 return NULL; 193 } 194 + 195 return (void __iomem *) (offset + (char *)addr); 196 #endif 197 }
+1 -1
drivers/net/gianfar_sysfs.c
··· 7 * Based on 8260_io/fcc_enet.c 8 * 9 * Author: Andy Fleming 10 - * Maintainer: Kumar Gala (kumar.gala@freescale.com) 11 * 12 * Copyright (c) 2002-2005 Freescale Semiconductor, Inc. 13 *
··· 7 * Based on 8260_io/fcc_enet.c 8 * 9 * Author: Andy Fleming 10 + * Maintainer: Kumar Gala (galak@kernel.crashing.org) 11 * 12 * Copyright (c) 2002-2005 Freescale Semiconductor, Inc. 13 *
+23 -7
drivers/parisc/dino.c
··· 83 ** bus number for each dino. 84 */ 85 86 - #define is_card_dino(id) ((id)->hw_type == HPHW_A_DMA) 87 88 #define DINO_IAR0 0x004 89 #define DINO_IODC_ADDR 0x008 ··· 125 126 #define DINO_IRQS 11 /* bits 0-10 are architected */ 127 #define DINO_IRR_MASK 0x5ff /* only 10 bits are implemented */ 128 129 #define DINO_MASK_IRQ(x) (1<<(x)) 130 ··· 148 unsigned long txn_addr; /* EIR addr to generate interrupt */ 149 u32 txn_data; /* EIR data assign to each dino */ 150 u32 imr; /* IRQ's which are enabled */ 151 - int global_irq[12]; /* map IMR bit to global irq */ 152 #ifdef DINO_DEBUG 153 unsigned int dino_irr0; /* save most recent IRQ line stat */ 154 #endif ··· 299 static void dino_disable_irq(unsigned int irq) 300 { 301 struct dino_device *dino_dev = irq_desc[irq].handler_data; 302 - int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, irq); 303 304 DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq); 305 ··· 311 static void dino_enable_irq(unsigned int irq) 312 { 313 struct dino_device *dino_dev = irq_desc[irq].handler_data; 314 - int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, irq); 315 u32 tmp; 316 317 DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq); ··· 436 437 dino_assign_irq(dino, irq, &dev->irq); 438 } 439 440 static void __init 441 dino_bios_init(void) ··· 683 printk(KERN_WARNING "Device %s has unassigned IRQ\n", pci_name(dev)); 684 #endif 685 } else { 686 - 687 /* Adjust INT_LINE for that busses region */ 688 dino_assign_irq(dino_dev, dev->irq, &dev->irq); 689 } ··· 888 889 /* allocate I/O Port resource region */ 890 res = &dino_dev->hba.io_space; 891 - if (dev->id.hversion == 0x680 || is_card_dino(&dev->id)) { 892 res->name = "Dino I/O Port"; 893 } else { 894 res->name = "Cujo I/O Port"; ··· 943 if (is_card_dino(&dev->id)) { 944 version = "3.x (card mode)"; 945 } else { 946 - if(dev->id.hversion == 0x680) { 947 if (dev->id.hversion_rev < 4) { 948 version = dino_vers[dev->id.hversion_rev]; 949 }
··· 83 ** bus number for each dino. 84 */ 85 86 + #define is_card_dino(id) ((id)->hw_type == HPHW_A_DMA) 87 + #define is_cujo(id) ((id)->hversion == 0x682) 88 89 #define DINO_IAR0 0x004 90 #define DINO_IODC_ADDR 0x008 ··· 124 125 #define DINO_IRQS 11 /* bits 0-10 are architected */ 126 #define DINO_IRR_MASK 0x5ff /* only 10 bits are implemented */ 127 + #define DINO_LOCAL_IRQS (DINO_IRQS+1) 128 129 #define DINO_MASK_IRQ(x) (1<<(x)) 130 ··· 146 unsigned long txn_addr; /* EIR addr to generate interrupt */ 147 u32 txn_data; /* EIR data assign to each dino */ 148 u32 imr; /* IRQ's which are enabled */ 149 + int global_irq[DINO_LOCAL_IRQS]; /* map IMR bit to global irq */ 150 #ifdef DINO_DEBUG 151 unsigned int dino_irr0; /* save most recent IRQ line stat */ 152 #endif ··· 297 static void dino_disable_irq(unsigned int irq) 298 { 299 struct dino_device *dino_dev = irq_desc[irq].handler_data; 300 + int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 301 302 DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq); 303 ··· 309 static void dino_enable_irq(unsigned int irq) 310 { 311 struct dino_device *dino_dev = irq_desc[irq].handler_data; 312 + int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 313 u32 tmp; 314 315 DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq); ··· 434 435 dino_assign_irq(dino, irq, &dev->irq); 436 } 437 + 438 + 439 + /* 440 + * Cirrus 6832 Cardbus reports wrong irq on RDI Tadpole PARISC Laptop (deller@gmx.de) 441 + * (the irqs are off-by-one, not sure yet if this is a cirrus, dino-hardware or dino-driver problem...) 442 + */ 443 + static void __devinit quirk_cirrus_cardbus(struct pci_dev *dev) 444 + { 445 + u8 new_irq = dev->irq - 1; 446 + printk(KERN_INFO "PCI: Cirrus Cardbus IRQ fixup for %s, from %d to %d\n", 447 + pci_name(dev), dev->irq, new_irq); 448 + dev->irq = new_irq; 449 + } 450 + DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus ); 451 + 452 453 static void __init 454 dino_bios_init(void) ··· 666 printk(KERN_WARNING "Device %s has unassigned IRQ\n", pci_name(dev)); 667 #endif 668 } else { 669 /* Adjust INT_LINE for that busses region */ 670 dino_assign_irq(dino_dev, dev->irq, &dev->irq); 671 } ··· 872 873 /* allocate I/O Port resource region */ 874 res = &dino_dev->hba.io_space; 875 + if (!is_cujo(&dev->id)) { 876 res->name = "Dino I/O Port"; 877 } else { 878 res->name = "Cujo I/O Port"; ··· 927 if (is_card_dino(&dev->id)) { 928 version = "3.x (card mode)"; 929 } else { 930 + if (!is_cujo(&dev->id)) { 931 if (dev->id.hversion_rev < 4) { 932 version = dino_vers[dev->id.hversion_rev]; 933 }
+2 -2
drivers/parisc/eisa.c
··· 57 58 static DEFINE_SPINLOCK(eisa_irq_lock); 59 60 - void __iomem *eisa_eeprom_addr; 61 62 /* We can only have one EISA adapter in the system because neither 63 * implementation can be flexed. ··· 141 * in the furure. 142 */ 143 /* irq 13,8,2,1,0 must be edge */ 144 - static unsigned int eisa_irq_level; /* default to edge triggered */ 145 146 147 /* called by free irq */
··· 57 58 static DEFINE_SPINLOCK(eisa_irq_lock); 59 60 + void __iomem *eisa_eeprom_addr __read_mostly; 61 62 /* We can only have one EISA adapter in the system because neither 63 * implementation can be flexed. ··· 141 * in the furure. 142 */ 143 /* irq 13,8,2,1,0 must be edge */ 144 + static unsigned int eisa_irq_level __read_mostly; /* default to edge triggered */ 145 146 147 /* called by free irq */
+1 -1
drivers/parisc/eisa_eeprom.c
··· 48 } 49 50 static ssize_t eisa_eeprom_read(struct file * file, 51 - char *buf, size_t count, loff_t *ppos ) 52 { 53 unsigned char *tmp; 54 ssize_t ret;
··· 48 } 49 50 static ssize_t eisa_eeprom_read(struct file * file, 51 + char __user *buf, size_t count, loff_t *ppos ) 52 { 53 unsigned char *tmp; 54 ssize_t ret;
+1 -1
drivers/parisc/lasi.c
··· 150 * 151 */ 152 153 - static unsigned long lasi_power_off_hpa; 154 155 static void lasi_power_off(void) 156 {
··· 150 * 151 */ 152 153 + static unsigned long lasi_power_off_hpa __read_mostly; 154 155 static void lasi_power_off(void) 156 {
+91 -24
drivers/parisc/lba_pci.c
··· 167 168 /* non-postable I/O port space, densely packed */ 169 #define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL) 170 - static void __iomem *astro_iop_base; 171 172 #define ELROY_HVERS 0x782 173 #define MERCURY_HVERS 0x783 ··· 695 } 696 } 697 } 698 - #else 699 - #define lba_claim_dev_resources(dev) 700 - #endif 701 702 703 /* 704 ** The algorithm is generic code. ··· 807 lba_dump_res(&ioport_resource, 2); 808 BUG(); 809 } 810 811 if (ldev->hba.elmmio_space.start) { 812 err = request_resource(&iomem_resource, ··· 823 824 /* lba_dump_res(&iomem_resource, 2); */ 825 /* BUG(); */ 826 - } 827 } 828 829 - err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space)); 830 - if (err < 0) { 831 - /* FIXME overlaps with elmmio will fail here. 832 - * Need to prune (or disable) the distributed range. 833 - * 834 - * BEWARE: conflicts with this lmmio range may be 835 - * elmmio range which is pointing down another rope. 836 - */ 837 838 - printk("FAILED: lba_fixup_bus() request for " 839 "lmmio_space [%lx/%lx]\n", 840 ldev->hba.lmmio_space.start, 841 ldev->hba.lmmio_space.end); 842 - /* lba_dump_res(&iomem_resource, 2); */ 843 } 844 845 #ifdef CONFIG_64BIT ··· 866 lba_dump_res(&iomem_resource, 2); 867 BUG(); 868 } 869 } 870 #endif 871 872 - /* advertize Host bridge resources to PCI bus */ 873 - bus->resource[0] = &(ldev->hba.io_space); 874 - bus->resource[1] = &(ldev->hba.lmmio_space); 875 - i=2; 876 - if (ldev->hba.elmmio_space.start) 877 - bus->resource[i++] = &(ldev->hba.elmmio_space); 878 - if (ldev->hba.gmmio_space.start) 879 - bus->resource[i++] = &(ldev->hba.gmmio_space); 880 - 881 } 882 883 list_for_each(ln, &bus->devices) {
··· 167 168 /* non-postable I/O port space, densely packed */ 169 #define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL) 170 + static void __iomem *astro_iop_base __read_mostly; 171 172 #define ELROY_HVERS 0x782 173 #define MERCURY_HVERS 0x783 ··· 695 } 696 } 697 } 698 699 + 700 + /* 701 + * truncate_pat_collision: Deal with overlaps or outright collisions 702 + * between PAT PDC reported ranges. 703 + * 704 + * Broken PA8800 firmware will report lmmio range that 705 + * overlaps with CPU HPA. Just truncate the lmmio range. 706 + * 707 + * BEWARE: conflicts with this lmmio range may be an 708 + * elmmio range which is pointing down another rope. 709 + * 710 + * FIXME: only deals with one collision per range...theoretically we 711 + * could have several. Supporting more than one collision will get messy. 712 + */ 713 + static unsigned long 714 + truncate_pat_collision(struct resource *root, struct resource *new) 715 + { 716 + unsigned long start = new->start; 717 + unsigned long end = new->end; 718 + struct resource *tmp = root->child; 719 + 720 + if (end <= start || start < root->start || !tmp) 721 + return 0; 722 + 723 + /* find first overlap */ 724 + while (tmp && tmp->end < start) 725 + tmp = tmp->sibling; 726 + 727 + /* no entries overlap */ 728 + if (!tmp) return 0; 729 + 730 + /* found one that starts behind the new one 731 + ** Don't need to do anything. 732 + */ 733 + if (tmp->start >= end) return 0; 734 + 735 + if (tmp->start <= start) { 736 + /* "front" of new one overlaps */ 737 + new->start = tmp->end + 1; 738 + 739 + if (tmp->end >= end) { 740 + /* AACCKK! totally overlaps! drop this range. */ 741 + return 1; 742 + } 743 + } 744 + 745 + if (tmp->end < end ) { 746 + /* "end" of new one overlaps */ 747 + new->end = tmp->start - 1; 748 + } 749 + 750 + printk(KERN_WARNING "LBA: Truncating lmmio_space [%lx/%lx] " 751 + "to [%lx,%lx]\n", 752 + start, end, 753 + new->start, new->end ); 754 + 755 + return 0; /* truncation successful */ 756 + } 757 + 758 + #else 759 + #define lba_claim_dev_resources(dev) do { } while (0) 760 + #define truncate_pat_collision(r,n) (0) 761 + #endif 762 763 /* 764 ** The algorithm is generic code. ··· 747 lba_dump_res(&ioport_resource, 2); 748 BUG(); 749 } 750 + /* advertize Host bridge resources to PCI bus */ 751 + bus->resource[0] = &(ldev->hba.io_space); 752 + i = 1; 753 754 if (ldev->hba.elmmio_space.start) { 755 err = request_resource(&iomem_resource, ··· 760 761 /* lba_dump_res(&iomem_resource, 2); */ 762 /* BUG(); */ 763 + } else 764 + bus->resource[i++] = &(ldev->hba.elmmio_space); 765 } 766 767 768 + /* Overlaps with elmmio can (and should) fail here. 769 + * We will prune (or ignore) the distributed range. 770 + * 771 + * FIXME: SBA code should register all elmmio ranges first. 772 + * that would take care of elmmio ranges routed 773 + * to a different rope (already discovered) from 774 + * getting registered *after* LBA code has already 775 + * registered it's distributed lmmio range. 776 + */ 777 + if (truncate_pat_collision(&iomem_resource, 778 + &(ldev->hba.lmmio_space))) { 779 + 780 + printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n", 781 + ldev->hba.lmmio_space.start, 782 + ldev->hba.lmmio_space.end); 783 + } else { 784 + err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space)); 785 + if (err < 0) { 786 + printk(KERN_ERR "FAILED: lba_fixup_bus() request for " 787 "lmmio_space [%lx/%lx]\n", 788 ldev->hba.lmmio_space.start, 789 ldev->hba.lmmio_space.end); 790 + } else 791 + bus->resource[i++] = &(ldev->hba.lmmio_space); 792 } 793 794 #ifdef CONFIG_64BIT ··· 791 lba_dump_res(&iomem_resource, 2); 792 BUG(); 793 } 794 + bus->resource[i++] = &(ldev->hba.gmmio_space); 795 } 796 #endif 797 798 } 799 800 list_for_each(ln, &bus->devices) {
+9 -9
drivers/parisc/led.c
··· 3 * 4 * (c) Copyright 2000 Red Hat Software 5 * (c) Copyright 2000 Helge Deller <hdeller@redhat.com> 6 - * (c) Copyright 2001-2004 Helge Deller <deller@gmx.de> 7 * (c) Copyright 2001 Randolph Chung <tausq@debian.org> 8 * 9 * This program is free software; you can redistribute it and/or modify ··· 56 relatively large amount of CPU time, some of the calculations can be 57 turned off with the following variables (controlled via procfs) */ 58 59 - static int led_type = -1; 60 static unsigned char lastleds; /* LED state from most recent update */ 61 - static unsigned int led_heartbeat = 1; 62 - static unsigned int led_diskio = 1; 63 - static unsigned int led_lanrxtx = 1; 64 - static char lcd_text[32]; 65 - static char lcd_text_default[32]; 66 67 68 static struct workqueue_struct *led_wq; ··· 108 /* lcd_info is pre-initialized to the values needed to program KittyHawk LCD's 109 * HP seems to have used Sharp/Hitachi HD44780 LCDs most of the time. */ 110 static struct pdc_chassis_lcd_info_ret_block 111 - lcd_info __attribute__((aligned(8))) = 112 { 113 .model = DISPLAY_MODEL_LCD, 114 .lcd_width = 16, ··· 144 device_initcall(start_task); 145 146 /* ptr to LCD/LED-specific function */ 147 - static void (*led_func_ptr) (unsigned char); 148 149 #ifdef CONFIG_PROC_FS 150 static int led_proc_read(char *page, char **start, off_t off, int count,
··· 3 * 4 * (c) Copyright 2000 Red Hat Software 5 * (c) Copyright 2000 Helge Deller <hdeller@redhat.com> 6 + * (c) Copyright 2001-2005 Helge Deller <deller@gmx.de> 7 * (c) Copyright 2001 Randolph Chung <tausq@debian.org> 8 * 9 * This program is free software; you can redistribute it and/or modify ··· 56 relatively large amount of CPU time, some of the calculations can be 57 turned off with the following variables (controlled via procfs) */ 58 59 + static int led_type __read_mostly = -1; 60 static unsigned char lastleds; /* LED state from most recent update */ 61 + static unsigned int led_heartbeat __read_mostly = 1; 62 + static unsigned int led_diskio __read_mostly = 1; 63 + static unsigned int led_lanrxtx __read_mostly = 1; 64 + static char lcd_text[32] __read_mostly; 65 + static char lcd_text_default[32] __read_mostly; 66 67 68 static struct workqueue_struct *led_wq; ··· 108 /* lcd_info is pre-initialized to the values needed to program KittyHawk LCD's 109 * HP seems to have used Sharp/Hitachi HD44780 LCDs most of the time. */ 110 static struct pdc_chassis_lcd_info_ret_block 111 + lcd_info __attribute__((aligned(8))) __read_mostly = 112 { 113 .model = DISPLAY_MODEL_LCD, 114 .lcd_width = 16, ··· 144 device_initcall(start_task); 145 146 /* ptr to LCD/LED-specific function */ 147 + static void (*led_func_ptr) (unsigned char) __read_mostly; 148 149 #ifdef CONFIG_PROC_FS 150 static int led_proc_read(char *page, char **start, off_t off, int count,
+32 -12
drivers/parisc/pdc_stable.c
··· 56 #include <asm/uaccess.h> 57 #include <asm/hardware.h> 58 59 - #define PDCS_VERSION "0.09" 60 61 #define PDCS_ADDR_PPRI 0x00 62 #define PDCS_ADDR_OSID 0x40 ··· 70 MODULE_LICENSE("GPL"); 71 MODULE_VERSION(PDCS_VERSION); 72 73 - static unsigned long pdcs_size = 0; 74 75 /* This struct defines what we need to deal with a parisc pdc path entry */ 76 struct pdcspath_entry { ··· 194 return -EIO; 195 } 196 197 - entry->ready = 1; 198 199 DPRINTK("%s: device: 0x%p\n", __func__, entry->dev); 200 ··· 654 { 655 unsigned short i; 656 struct pdcspath_entry *entry; 657 658 for (i = 0; (entry = pdcspath_entries[i]); i++) { 659 if (pdcspath_fetch(entry) < 0) 660 continue; 661 662 - kobject_set_name(&entry->kobj, "%s", entry->name); 663 kobj_set_kset_s(entry, paths_subsys); 664 - kobject_register(&entry->kobj); 665 - 666 if (!entry->dev) 667 continue; 668 ··· 682 /** 683 * pdcs_unregister_pathentries - Routine called when unregistering the module. 684 */ 685 - static inline void __exit 686 pdcs_unregister_pathentries(void) 687 { 688 unsigned short i; 689 struct pdcspath_entry *entry; 690 691 for (i = 0; (entry = pdcspath_entries[i]); i++) 692 - if (entry->ready) 693 kobject_unregister(&entry->kobj); 694 } 695 ··· 711 712 /* For now we'll register the pdc subsys within this driver */ 713 if ((rc = firmware_register(&pdc_subsys))) 714 - return rc; 715 716 /* Don't forget the info entry */ 717 for (i = 0; (attr = pdcs_subsys_attrs[i]) && !error; i++) ··· 720 721 /* register the paths subsys as a subsystem of pdc subsys */ 722 kset_set_kset_s(&paths_subsys, pdc_subsys); 723 - subsystem_register(&paths_subsys); 724 725 /* now we create all "files" for the paths subsys */ 726 - pdcs_register_pathentries(); 727 728 - return 0; 729 } 730 731 static void __exit
··· 56 #include <asm/uaccess.h> 57 #include <asm/hardware.h> 58 59 + #define PDCS_VERSION "0.10" 60 61 #define PDCS_ADDR_PPRI 0x00 62 #define PDCS_ADDR_OSID 0x40 ··· 70 MODULE_LICENSE("GPL"); 71 MODULE_VERSION(PDCS_VERSION); 72 73 + static unsigned long pdcs_size __read_mostly; 74 75 /* This struct defines what we need to deal with a parisc pdc path entry */ 76 struct pdcspath_entry { ··· 194 return -EIO; 195 } 196 197 + /* kobject is already registered */ 198 + entry->ready = 2; 199 200 DPRINTK("%s: device: 0x%p\n", __func__, entry->dev); 201 ··· 653 { 654 unsigned short i; 655 struct pdcspath_entry *entry; 656 + int err; 657 658 for (i = 0; (entry = pdcspath_entries[i]); i++) { 659 if (pdcspath_fetch(entry) < 0) 660 continue; 661 662 + if ((err = kobject_set_name(&entry->kobj, "%s", entry->name))) 663 + return err; 664 kobj_set_kset_s(entry, paths_subsys); 665 + if ((err = kobject_register(&entry->kobj))) 666 + return err; 667 + 668 + /* kobject is now registered */ 669 + entry->ready = 2; 670 + 671 if (!entry->dev) 672 continue; 673 ··· 675 /** 676 * pdcs_unregister_pathentries - Routine called when unregistering the module. 677 */ 678 + static inline void 679 pdcs_unregister_pathentries(void) 680 { 681 unsigned short i; 682 struct pdcspath_entry *entry; 683 684 for (i = 0; (entry = pdcspath_entries[i]); i++) 685 + if (entry->ready >= 2) 686 kobject_unregister(&entry->kobj); 687 } 688 ··· 704 705 /* For now we'll register the pdc subsys within this driver */ 706 if ((rc = firmware_register(&pdc_subsys))) 707 + goto fail_firmreg; 708 709 /* Don't forget the info entry */ 710 for (i = 0; (attr = pdcs_subsys_attrs[i]) && !error; i++) ··· 713 714 /* register the paths subsys as a subsystem of pdc subsys */ 715 kset_set_kset_s(&paths_subsys, pdc_subsys); 716 + if ((rc= subsystem_register(&paths_subsys))) 717 + goto fail_subsysreg; 718 719 /* now we create all "files" for the paths subsys */ 720 + if ((rc = pdcs_register_pathentries())) 721 + goto fail_pdcsreg; 722 + 723 + return rc; 724 725 + fail_pdcsreg: 726 + pdcs_unregister_pathentries(); 727 + subsystem_unregister(&paths_subsys); 728 + 729 + fail_subsysreg: 730 + firmware_unregister(&pdc_subsys); 731 + 732 + fail_firmreg: 733 + printk(KERN_INFO "PDC Stable Storage bailing out\n"); 734 + return rc; 735 } 736 737 static void __exit
+6 -6
drivers/parisc/power.c
··· 2 * linux/arch/parisc/kernel/power.c 3 * HP PARISC soft power switch support driver 4 * 5 - * Copyright (c) 2001-2002 Helge Deller <deller@gmx.de> 6 * All rights reserved. 7 * 8 * ··· 102 103 static void poweroff(void) 104 { 105 - static int powering_off; 106 107 if (powering_off) 108 return; ··· 113 114 115 /* local time-counter for shutdown */ 116 - static int shutdown_timer; 117 118 /* check, give feedback and start shutdown after one second */ 119 static void process_shutdown(void) ··· 139 DECLARE_TASKLET_DISABLED(power_tasklet, NULL, 0); 140 141 /* soft power switch enabled/disabled */ 142 - int pwrsw_enabled = 1; 143 144 /* 145 * On gecko style machines (e.g. 712/xx and 715/xx) ··· 149 */ 150 static void gecko_tasklet_func(unsigned long unused) 151 { 152 - if (!pwrsw_enabled) 153 return; 154 155 if (__getDIAG(25) & 0x80000000) { ··· 173 { 174 unsigned long current_status; 175 176 - if (!pwrsw_enabled) 177 return; 178 179 current_status = gsc_readl(soft_power_reg);
··· 2 * linux/arch/parisc/kernel/power.c 3 * HP PARISC soft power switch support driver 4 * 5 + * Copyright (c) 2001-2005 Helge Deller <deller@gmx.de> 6 * All rights reserved. 7 * 8 * ··· 102 103 static void poweroff(void) 104 { 105 + static int powering_off __read_mostly; 106 107 if (powering_off) 108 return; ··· 113 114 115 /* local time-counter for shutdown */ 116 + static int shutdown_timer __read_mostly; 117 118 /* check, give feedback and start shutdown after one second */ 119 static void process_shutdown(void) ··· 139 DECLARE_TASKLET_DISABLED(power_tasklet, NULL, 0); 140 141 /* soft power switch enabled/disabled */ 142 + int pwrsw_enabled __read_mostly = 1; 143 144 /* 145 * On gecko style machines (e.g. 712/xx and 715/xx) ··· 149 */ 150 static void gecko_tasklet_func(unsigned long unused) 151 { 152 + if (unlikely(!pwrsw_enabled)) 153 return; 154 155 if (__getDIAG(25) & 0x80000000) { ··· 173 { 174 unsigned long current_status; 175 176 + if (unlikely(!pwrsw_enabled)) 177 return; 178 179 current_status = gsc_readl(soft_power_reg);
+1
drivers/parport/Kconfig
··· 121 tristate 122 default GSC 123 depends on PARPORT 124 125 config PARPORT_SUNBPP 126 tristate "Sparc hardware (EXPERIMENTAL)"
··· 121 tristate 122 default GSC 123 depends on PARPORT 124 + select PARPORT_NOT_PC 125 126 config PARPORT_SUNBPP 127 tristate "Sparc hardware (EXPERIMENTAL)"
+28 -37
drivers/video/stifb.c
··· 3 * Low level Frame buffer driver for HP workstations with 4 * STI (standard text interface) video firmware. 5 * 6 - * Copyright (C) 2001-2004 Helge Deller <deller@gmx.de> 7 * Portions Copyright (C) 2001 Thomas Bogendoerfer <tsbogend@alpha.franken.de> 8 * 9 * Based on: ··· 73 #include "sticore.h" 74 75 /* REGION_BASE(fb_info, index) returns the virtual address for region <index> */ 76 - #ifdef __LP64__ 77 - #define REGION_BASE(fb_info, index) \ 78 - (fb_info->sti->glob_cfg->region_ptrs[index] | 0xffffffff00000000) 79 - #else 80 - #define REGION_BASE(fb_info, index) \ 81 - fb_info->sti->glob_cfg->region_ptrs[index] 82 - #endif 83 84 #define NGLEDEVDEPROM_CRT_REGION 1 85 86 typedef struct { 87 __s32 video_config_reg; ··· 109 ngle_rom_t ngle_rom; 110 struct sti_struct *sti; 111 int deviceSpecificConfig; 112 - u32 pseudo_palette[256]; 113 }; 114 115 static int __initdata stifb_bpp_pref[MAX_STI_ROMS]; ··· 349 #define IS_888_DEVICE(fb) \ 350 (!(IS_24_DEVICE(fb))) 351 352 - #define GET_FIFO_SLOTS(fb, cnt, numslots) \ 353 - { while (cnt < numslots) \ 354 cnt = READ_WORD(fb, REG_34); \ 355 - cnt -= numslots; \ 356 } 357 358 #define IndexedDcd 0 /* Pixel data is indexed (pseudo) color */ ··· 992 struct stifb_info *fb = (struct stifb_info *) info; 993 u32 color; 994 995 - if (regno >= 256) /* no. of hw registers */ 996 return 1; 997 998 red >>= 8; ··· 1002 DEBUG_OFF(); 1003 1004 START_IMAGE_COLORMAP_ACCESS(fb); 1005 - 1006 - if (fb->info.var.grayscale) { 1007 /* gray = 0.30*R + 0.59*G + 0.11*B */ 1008 color = ((red * 77) + 1009 (green * 151) + ··· 1014 (blue)); 1015 } 1016 1017 - if (info->var.bits_per_pixel == 32) { 1018 - ((u32 *)(info->pseudo_palette))[regno] = 1019 - (red << info->var.red.offset) | 1020 - (green << info->var.green.offset) | 1021 - (blue << info->var.blue.offset); 1022 - } else { 1023 - ((u32 *)(info->pseudo_palette))[regno] = regno; 1024 } 1025 1026 WRITE_IMAGE_COLOR(fb, regno, color); 1027 - 1028 if (fb->id == S9000_ID_HCRX) { 1029 NgleLutBltCtl lutBltCtl; 1030 ··· 1063 case S9000_ID_HCRX: 1064 HYPER_ENABLE_DISABLE_DISPLAY(fb, enable); 1065 break; 1066 - case S9000_ID_A1659A:; /* fall through */ 1067 - case S9000_ID_TIMBER:; 1068 - case CRX24_OVERLAY_PLANES:; 1069 default: 1070 ENABLE_DISABLE_DISPLAY(fb, enable); 1071 break; ··· 1247 memset(&fb->ngle_rom, 0, sizeof(fb->ngle_rom)); 1248 if ((fb->sti->regions_phys[0] & 0xfc000000) == 1249 (fb->sti->regions_phys[2] & 0xfc000000)) 1250 - sti_rom_address = fb->sti->regions_phys[0]; 1251 else 1252 - sti_rom_address = fb->sti->regions_phys[1]; 1253 - #ifdef __LP64__ 1254 - sti_rom_address |= 0xffffffff00000000; 1255 - #endif 1256 fb->deviceSpecificConfig = gsc_readl(sti_rom_address); 1257 if (IS_24_DEVICE(fb)) { 1258 if (bpp_pref == 8 || bpp_pref == 32) ··· 1310 break; 1311 case 32: 1312 fix->type = FB_TYPE_PACKED_PIXELS; 1313 - fix->visual = FB_VISUAL_TRUECOLOR; 1314 var->red.length = var->green.length = var->blue.length = var->transp.length = 8; 1315 var->blue.offset = 0; 1316 var->green.offset = 8; ··· 1332 info->pseudo_palette = &fb->pseudo_palette; 1333 1334 /* This has to been done !!! */ 1335 - fb_alloc_cmap(&info->cmap, 256, 0); 1336 stifb_init_display(fb); 1337 1338 if (!request_mem_region(fix->smem_start, fix->smem_len, "stifb fb")) { ··· 1483 MODULE_AUTHOR("Helge Deller <deller@gmx.de>, Thomas Bogendoerfer <tsbogend@alpha.franken.de>"); 1484 MODULE_DESCRIPTION("Framebuffer driver for HP's NGLE series graphics cards in HP PARISC machines"); 1485 MODULE_LICENSE("GPL v2"); 1486 - 1487 - MODULE_PARM(bpp, "i"); 1488 - MODULE_PARM_DESC(mem, "Bits per pixel (default: 8)"); 1489 -
··· 3 * Low level Frame buffer driver for HP workstations with 4 * STI (standard text interface) video firmware. 5 * 6 + * Copyright (C) 2001-2005 Helge Deller <deller@gmx.de> 7 * Portions Copyright (C) 2001 Thomas Bogendoerfer <tsbogend@alpha.franken.de> 8 * 9 * Based on: ··· 73 #include "sticore.h" 74 75 /* REGION_BASE(fb_info, index) returns the virtual address for region <index> */ 76 + #define REGION_BASE(fb_info, index) \ 77 + F_EXTEND(fb_info->sti->glob_cfg->region_ptrs[index]) 78 79 #define NGLEDEVDEPROM_CRT_REGION 1 80 + 81 + #define NR_PALETTE 256 82 83 typedef struct { 84 __s32 video_config_reg; ··· 112 ngle_rom_t ngle_rom; 113 struct sti_struct *sti; 114 int deviceSpecificConfig; 115 + u32 pseudo_palette[16]; 116 }; 117 118 static int __initdata stifb_bpp_pref[MAX_STI_ROMS]; ··· 352 #define IS_888_DEVICE(fb) \ 353 (!(IS_24_DEVICE(fb))) 354 355 + #define GET_FIFO_SLOTS(fb, cnt, numslots) \ 356 + { while (cnt < numslots) \ 357 cnt = READ_WORD(fb, REG_34); \ 358 + cnt -= numslots; \ 359 } 360 361 #define IndexedDcd 0 /* Pixel data is indexed (pseudo) color */ ··· 995 struct stifb_info *fb = (struct stifb_info *) info; 996 u32 color; 997 998 + if (regno >= NR_PALETTE) 999 return 1; 1000 1001 red >>= 8; ··· 1005 DEBUG_OFF(); 1006 1007 START_IMAGE_COLORMAP_ACCESS(fb); 1008 + 1009 + if (unlikely(fb->info.var.grayscale)) { 1010 /* gray = 0.30*R + 0.59*G + 0.11*B */ 1011 color = ((red * 77) + 1012 (green * 151) + ··· 1017 (blue)); 1018 } 1019 1020 + if (fb->info.fix.visual == FB_VISUAL_DIRECTCOLOR) { 1021 + struct fb_var_screeninfo *var = &fb->info.var; 1022 + if (regno < 16) 1023 + ((u32 *)fb->info.pseudo_palette)[regno] = 1024 + regno << var->red.offset | 1025 + regno << var->green.offset | 1026 + regno << var->blue.offset; 1027 } 1028 1029 WRITE_IMAGE_COLOR(fb, regno, color); 1030 + 1031 if (fb->id == S9000_ID_HCRX) { 1032 NgleLutBltCtl lutBltCtl; 1033 ··· 1066 case S9000_ID_HCRX: 1067 HYPER_ENABLE_DISABLE_DISPLAY(fb, enable); 1068 break; 1069 + case S9000_ID_A1659A: /* fall through */ 1070 + case S9000_ID_TIMBER: 1071 + case CRX24_OVERLAY_PLANES: 1072 default: 1073 ENABLE_DISABLE_DISPLAY(fb, enable); 1074 break; ··· 1250 memset(&fb->ngle_rom, 0, sizeof(fb->ngle_rom)); 1251 if ((fb->sti->regions_phys[0] & 0xfc000000) == 1252 (fb->sti->regions_phys[2] & 0xfc000000)) 1253 + sti_rom_address = F_EXTEND(fb->sti->regions_phys[0]); 1254 else 1255 + sti_rom_address = F_EXTEND(fb->sti->regions_phys[1]); 1256 + 1257 fb->deviceSpecificConfig = gsc_readl(sti_rom_address); 1258 if (IS_24_DEVICE(fb)) { 1259 if (bpp_pref == 8 || bpp_pref == 32) ··· 1315 break; 1316 case 32: 1317 fix->type = FB_TYPE_PACKED_PIXELS; 1318 + fix->visual = FB_VISUAL_DIRECTCOLOR; 1319 var->red.length = var->green.length = var->blue.length = var->transp.length = 8; 1320 var->blue.offset = 0; 1321 var->green.offset = 8; ··· 1337 info->pseudo_palette = &fb->pseudo_palette; 1338 1339 /* This has to been done !!! */ 1340 + fb_alloc_cmap(&info->cmap, NR_PALETTE, 0); 1341 stifb_init_display(fb); 1342 1343 if (!request_mem_region(fix->smem_start, fix->smem_len, "stifb fb")) { ··· 1488 MODULE_AUTHOR("Helge Deller <deller@gmx.de>, Thomas Bogendoerfer <tsbogend@alpha.franken.de>"); 1489 MODULE_DESCRIPTION("Framebuffer driver for HP's NGLE series graphics cards in HP PARISC machines"); 1490 MODULE_LICENSE("GPL v2");
+1 -1
fs/afs/cmservice.c
··· 118 _SRXAFSCM_xxxx_t func; 119 int die; 120 121 - printk("kAFS: Started kafscmd %d\n", current->pid); 122 123 daemonize("kafscmd"); 124
··· 118 _SRXAFSCM_xxxx_t func; 119 int die; 120 121 + printk(KERN_INFO "kAFS: Started kafscmd %d\n", current->pid); 122 123 daemonize("kafscmd"); 124
-1
fs/attr.c
··· 14 #include <linux/fcntl.h> 15 #include <linux/quotaops.h> 16 #include <linux/security.h> 17 - #include <linux/time.h> 18 19 /* Taken over from the old code... */ 20
··· 14 #include <linux/fcntl.h> 15 #include <linux/quotaops.h> 16 #include <linux/security.h> 17 18 /* Taken over from the old code... */ 19
+4 -4
fs/binfmt_elf.c
··· 1634 ELF_CORE_WRITE_EXTRA_DATA; 1635 #endif 1636 1637 - if ((off_t) file->f_pos != offset) { 1638 /* Sanity check */ 1639 - printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n", 1640 - (off_t) file->f_pos, offset); 1641 } 1642 1643 end_coredump: 1644 set_fs(fs); 1645 1646 cleanup: 1647 - while(!list_empty(&thread_list)) { 1648 struct list_head *tmp = thread_list.next; 1649 list_del(tmp); 1650 kfree(list_entry(tmp, struct elf_thread_status, list));
··· 1634 ELF_CORE_WRITE_EXTRA_DATA; 1635 #endif 1636 1637 + if ((off_t)file->f_pos != offset) { 1638 /* Sanity check */ 1639 + printk(KERN_WARNING "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n", 1640 + (off_t)file->f_pos, offset); 1641 } 1642 1643 end_coredump: 1644 set_fs(fs); 1645 1646 cleanup: 1647 + while (!list_empty(&thread_list)) { 1648 struct list_head *tmp = thread_list.next; 1649 list_del(tmp); 1650 kfree(list_entry(tmp, struct elf_thread_status, list));
+1 -1
fs/ext2/dir.c
··· 592 goto fail; 593 } 594 kaddr = kmap_atomic(page, KM_USER0); 595 - memset(kaddr, 0, chunk_size); 596 de = (struct ext2_dir_entry_2 *)kaddr; 597 de->name_len = 1; 598 de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(1));
··· 592 goto fail; 593 } 594 kaddr = kmap_atomic(page, KM_USER0); 595 + memset(kaddr, 0, chunk_size); 596 de = (struct ext2_dir_entry_2 *)kaddr; 597 de->name_len = 1; 598 de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(1));
-1
fs/proc/vmcore.c
··· 14 #include <linux/a.out.h> 15 #include <linux/elf.h> 16 #include <linux/elfcore.h> 17 - #include <linux/proc_fs.h> 18 #include <linux/highmem.h> 19 #include <linux/bootmem.h> 20 #include <linux/init.h>
··· 14 #include <linux/a.out.h> 15 #include <linux/elf.h> 16 #include <linux/elfcore.h> 17 #include <linux/highmem.h> 18 #include <linux/bootmem.h> 19 #include <linux/init.h>
-1
fs/xfs/xfs_iomap.c
··· 40 #include "xfs_ialloc.h" 41 #include "xfs_btree.h" 42 #include "xfs_bmap.h" 43 - #include "xfs_bit.h" 44 #include "xfs_rtalloc.h" 45 #include "xfs_error.h" 46 #include "xfs_itable.h"
··· 40 #include "xfs_ialloc.h" 41 #include "xfs_btree.h" 42 #include "xfs_bmap.h" 43 #include "xfs_rtalloc.h" 44 #include "xfs_error.h" 45 #include "xfs_itable.h"
+4 -4
include/asm-parisc/cache.h
··· 29 30 #define SMP_CACHE_BYTES L1_CACHE_BYTES 31 32 - extern void flush_data_cache_local(void); /* flushes local data-cache only */ 33 - extern void flush_instruction_cache_local(void); /* flushes local code-cache only */ 34 #ifdef CONFIG_SMP 35 extern void flush_data_cache(void); /* flushes data-cache only (all processors) */ 36 extern void flush_instruction_cache(void); /* flushes i-cache only (all processors) */ 37 #else 38 - #define flush_data_cache flush_data_cache_local 39 - #define flush_instruction_cache flush_instruction_cache_local 40 #endif 41 42 extern void parisc_cache_init(void); /* initializes cache-flushing */
··· 29 30 #define SMP_CACHE_BYTES L1_CACHE_BYTES 31 32 + extern void flush_data_cache_local(void *); /* flushes local data-cache only */ 33 + extern void flush_instruction_cache_local(void *); /* flushes local code-cache only */ 34 #ifdef CONFIG_SMP 35 extern void flush_data_cache(void); /* flushes data-cache only (all processors) */ 36 extern void flush_instruction_cache(void); /* flushes i-cache only (all processors) */ 37 #else 38 + #define flush_data_cache() flush_data_cache_local(NULL) 39 + #define flush_instruction_cache() flush_instruction_cache_local(NULL) 40 #endif 41 42 extern void parisc_cache_init(void); /* initializes cache-flushing */
+1 -1
include/asm-parisc/io.h
··· 41 #define __raw_check_addr(addr) \ 42 if (((unsigned long)addr >> NYBBLE_SHIFT) != 0xe) \ 43 __raw_bad_addr(addr); \ 44 - addr = (void *)((unsigned long)addr | (0xfUL << NYBBLE_SHIFT)); 45 #else 46 #define gsc_check_addr(addr) 47 #define __raw_check_addr(addr)
··· 41 #define __raw_check_addr(addr) \ 42 if (((unsigned long)addr >> NYBBLE_SHIFT) != 0xe) \ 43 __raw_bad_addr(addr); \ 44 + addr = (void __iomem *)((unsigned long)addr | (0xfUL << NYBBLE_SHIFT)); 45 #else 46 #define gsc_check_addr(addr) 47 #define __raw_check_addr(addr)
+7
include/asm-parisc/page.h
··· 135 #define pfn_valid(pfn) ((pfn) < max_mapnr) 136 #endif /* CONFIG_DISCONTIGMEM */ 137 138 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 139 140 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
··· 135 #define pfn_valid(pfn) ((pfn) < max_mapnr) 136 #endif /* CONFIG_DISCONTIGMEM */ 137 138 + #ifdef CONFIG_HUGETLB_PAGE 139 + #define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */ 140 + #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) 141 + #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 142 + #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 143 + #endif 144 + 145 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 146 147 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+7 -1
include/asm-parisc/pci.h
··· 84 /* 85 ** Convert between PCI (IO_VIEW) addresses and processor (PA_VIEW) addresses. 86 ** See pcibios.c for more conversions used by Generic PCI code. 87 */ 88 #define PCI_BUS_ADDR(hba,a) (PCI_IS_LMMIO(hba,a) \ 89 ? ((a) - hba->lmmio_space_offset) /* mangle LMMIO */ \ 90 : (a)) /* GMMIO */ 91 - #define PCI_HOST_ADDR(hba,a) ((a) + hba->lmmio_space_offset) 92 93 #else /* !CONFIG_64BIT */ 94
··· 84 /* 85 ** Convert between PCI (IO_VIEW) addresses and processor (PA_VIEW) addresses. 86 ** See pcibios.c for more conversions used by Generic PCI code. 87 + ** 88 + ** Platform characteristics/firmware guarantee that 89 + ** (1) PA_VIEW - IO_VIEW = lmmio_offset for both LMMIO and ELMMIO 90 + ** (2) PA_VIEW == IO_VIEW for GMMIO 91 */ 92 #define PCI_BUS_ADDR(hba,a) (PCI_IS_LMMIO(hba,a) \ 93 ? ((a) - hba->lmmio_space_offset) /* mangle LMMIO */ \ 94 : (a)) /* GMMIO */ 95 + #define PCI_HOST_ADDR(hba,a) (((a) & PCI_F_EXTEND) == 0 \ 96 + ? (a) + hba->lmmio_space_offset \ 97 + : (a)) 98 99 #else /* !CONFIG_64BIT */ 100
+9 -9
include/asm-parisc/processor.h
··· 144 }) 145 146 #define INIT_THREAD { \ 147 - regs: { gr: { 0, }, \ 148 - fr: { 0, }, \ 149 - sr: { 0, }, \ 150 - iasq: { 0, }, \ 151 - iaoq: { 0, }, \ 152 - cr27: 0, \ 153 }, \ 154 - task_size: DEFAULT_TASK_SIZE, \ 155 - map_base: DEFAULT_MAP_BASE, \ 156 - flags: 0 \ 157 } 158 159 /*
··· 144 }) 145 146 #define INIT_THREAD { \ 147 + .regs = { .gr = { 0, }, \ 148 + .fr = { 0, }, \ 149 + .sr = { 0, }, \ 150 + .iasq = { 0, }, \ 151 + .iaoq = { 0, }, \ 152 + .cr27 = 0, \ 153 }, \ 154 + .task_size = DEFAULT_TASK_SIZE, \ 155 + .map_base = DEFAULT_MAP_BASE, \ 156 + .flags = 0 \ 157 } 158 159 /*
+1
include/asm-parisc/tlbflush.h
··· 22 #define purge_tlb_end(x) spin_unlock(&pa_tlb_lock) 23 24 extern void flush_tlb_all(void); 25 26 /* 27 * flush_tlb_mm()
··· 22 #define purge_tlb_end(x) spin_unlock(&pa_tlb_lock) 23 24 extern void flush_tlb_all(void); 25 + extern void flush_tlb_all_local(void *); 26 27 /* 28 * flush_tlb_mm()
-1
include/asm-powerpc/elf.h
··· 92 * as published by the Free Software Foundation; either version 93 * 2 of the License, or (at your option) any later version. 94 */ 95 - #include <asm/ptrace.h> 96 97 #define ELF_NGREG 48 /* includes nip, msr, lr, etc. */ 98 #define ELF_NFPREG 33 /* includes fpscr */
··· 92 * as published by the Free Software Foundation; either version 93 * 2 of the License, or (at your option) any later version. 94 */ 95 96 #define ELF_NGREG 48 /* includes nip, msr, lr, etc. */ 97 #define ELF_NFPREG 33 /* includes fpscr */
+1 -1
include/linux/cache.h
··· 13 #define SMP_CACHE_BYTES L1_CACHE_BYTES 14 #endif 15 16 - #if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_IA64) 17 #define __read_mostly __attribute__((__section__(".data.read_mostly"))) 18 #else 19 #define __read_mostly
··· 13 #define SMP_CACHE_BYTES L1_CACHE_BYTES 14 #endif 15 16 + #if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) 17 #define __read_mostly __attribute__((__section__(".data.read_mostly"))) 18 #else 19 #define __read_mostly
+1 -1
include/linux/mm.h
··· 1027 { 1028 if (!PageHighMem(page) && !enable) 1029 mutex_debug_check_no_locks_freed(page_address(page), 1030 - page_address(page + numpages)); 1031 } 1032 #endif 1033
··· 1027 { 1028 if (!PageHighMem(page) && !enable) 1029 mutex_debug_check_no_locks_freed(page_address(page), 1030 + numpages * PAGE_SIZE); 1031 } 1032 #endif 1033
+1 -1
include/linux/mutex-debug.h
··· 18 extern void mutex_debug_show_all_locks(void); 19 extern void mutex_debug_show_held_locks(struct task_struct *filter); 20 extern void mutex_debug_check_no_locks_held(struct task_struct *task); 21 - extern void mutex_debug_check_no_locks_freed(const void *from, const void *to); 22 23 #endif
··· 18 extern void mutex_debug_show_all_locks(void); 19 extern void mutex_debug_show_held_locks(struct task_struct *filter); 20 extern void mutex_debug_check_no_locks_held(struct task_struct *task); 21 + extern void mutex_debug_check_no_locks_freed(const void *from, unsigned long len); 22 23 #endif
+2 -1
include/linux/mutex.h
··· 12 13 #include <linux/list.h> 14 #include <linux/spinlock_types.h> 15 16 #include <asm/atomic.h> 17 ··· 79 # define mutex_debug_show_all_locks() do { } while (0) 80 # define mutex_debug_show_held_locks(p) do { } while (0) 81 # define mutex_debug_check_no_locks_held(task) do { } while (0) 82 - # define mutex_debug_check_no_locks_freed(from, to) do { } while (0) 83 #endif 84 85 #define __MUTEX_INITIALIZER(lockname) \
··· 12 13 #include <linux/list.h> 14 #include <linux/spinlock_types.h> 15 + #include <linux/linkage.h> 16 17 #include <asm/atomic.h> 18 ··· 78 # define mutex_debug_show_all_locks() do { } while (0) 79 # define mutex_debug_show_held_locks(p) do { } while (0) 80 # define mutex_debug_check_no_locks_held(task) do { } while (0) 81 + # define mutex_debug_check_no_locks_freed(from, len) do { } while (0) 82 #endif 83 84 #define __MUTEX_INITIALIZER(lockname) \
+3 -3
kernel/mutex-debug.c
··· 333 * is destroyed or reinitialized - this code checks whether there is 334 * any held lock in the memory range of <from> to <to>: 335 */ 336 - void mutex_debug_check_no_locks_freed(const void *from, const void *to) 337 { 338 struct list_head *curr, *next; 339 unsigned long flags; 340 struct mutex *lock; 341 void *lock_addr; ··· 438 /* 439 * Make sure we are not reinitializing a held lock: 440 */ 441 - mutex_debug_check_no_locks_freed((void *)lock, (void *)(lock + 1)); 442 lock->owner = NULL; 443 INIT_LIST_HEAD(&lock->held_list); 444 lock->name = name; ··· 460 } 461 462 EXPORT_SYMBOL_GPL(mutex_destroy); 463 -
··· 333 * is destroyed or reinitialized - this code checks whether there is 334 * any held lock in the memory range of <from> to <to>: 335 */ 336 + void mutex_debug_check_no_locks_freed(const void *from, unsigned long len) 337 { 338 struct list_head *curr, *next; 339 + const void *to = from + len; 340 unsigned long flags; 341 struct mutex *lock; 342 void *lock_addr; ··· 437 /* 438 * Make sure we are not reinitializing a held lock: 439 */ 440 + mutex_debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 441 lock->owner = NULL; 442 INIT_LIST_HEAD(&lock->held_list); 443 lock->name = name; ··· 459 } 460 461 EXPORT_SYMBOL_GPL(mutex_destroy);
+1 -4
kernel/mutex.c
··· 202 static fastcall noinline void 203 __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) 204 { 205 - struct mutex *lock = container_of(lock_count, struct mutex, count); 206 207 DEBUG_WARN_ON(lock->owner != current_thread_info()); 208 ··· 313 } 314 315 EXPORT_SYMBOL(mutex_trylock); 316 - 317 - 318 -
··· 202 static fastcall noinline void 203 __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) 204 { 205 + struct mutex *lock = container_of(lock_count, struct mutex, count); 206 207 DEBUG_WARN_ON(lock->owner != current_thread_info()); 208 ··· 313 } 314 315 EXPORT_SYMBOL(mutex_trylock);
+1 -1
mm/page_alloc.c
··· 417 arch_free_page(page, order); 418 if (!PageHighMem(page)) 419 mutex_debug_check_no_locks_freed(page_address(page), 420 - page_address(page+(1<<order))); 421 422 #ifndef CONFIG_MMU 423 for (i = 1 ; i < (1 << order) ; ++i)
··· 417 arch_free_page(page, order); 418 if (!PageHighMem(page)) 419 mutex_debug_check_no_locks_freed(page_address(page), 420 + PAGE_SIZE<<order); 421 422 #ifndef CONFIG_MMU 423 for (i = 1 ; i < (1 << order) ; ++i)
+1 -1
mm/slab.c
··· 3071 local_irq_save(flags); 3072 kfree_debugcheck(objp); 3073 c = page_get_cache(virt_to_page(objp)); 3074 - mutex_debug_check_no_locks_freed(objp, objp+obj_reallen(c)); 3075 __cache_free(c, (void *)objp); 3076 local_irq_restore(flags); 3077 }
··· 3071 local_irq_save(flags); 3072 kfree_debugcheck(objp); 3073 c = page_get_cache(virt_to_page(objp)); 3074 + mutex_debug_check_no_locks_freed(objp, obj_reallen(c)); 3075 __cache_free(c, (void *)objp); 3076 local_irq_restore(flags); 3077 }
+1 -1
mm/swapfile.c
··· 1442 else if (!memcmp("SWAPSPACE2",swap_header->magic.magic,10)) 1443 swap_header_version = 2; 1444 else { 1445 - printk("Unable to find swap-space signature\n"); 1446 error = -EINVAL; 1447 goto bad_swap; 1448 }
··· 1442 else if (!memcmp("SWAPSPACE2",swap_header->magic.magic,10)) 1443 swap_header_version = 2; 1444 else { 1445 + printk(KERN_ERR "Unable to find swap-space signature\n"); 1446 error = -EINVAL; 1447 goto bad_swap; 1448 }
+1
net/core/wireless.c
··· 78 #include <linux/seq_file.h> 79 #include <linux/init.h> /* for __init */ 80 #include <linux/if_arp.h> /* ARPHRD_ETHER */ 81 82 #include <linux/wireless.h> /* Pretty obvious */ 83 #include <net/iw_handler.h> /* New driver API */
··· 78 #include <linux/seq_file.h> 79 #include <linux/init.h> /* for __init */ 80 #include <linux/if_arp.h> /* ARPHRD_ETHER */ 81 + #include <linux/etherdevice.h> /* compare_ether_addr */ 82 83 #include <linux/wireless.h> /* Pretty obvious */ 84 #include <net/iw_handler.h> /* New driver API */
-2
net/decnet/netfilter/dn_rtmsg.c
··· 26 #include <net/dn.h> 27 #include <net/dn_route.h> 28 29 - #include <linux/netfilter_decnet.h> 30 - 31 static struct sock *dnrmg = NULL; 32 33
··· 26 #include <net/dn.h> 27 #include <net/dn_route.h> 28 29 static struct sock *dnrmg = NULL; 30 31
-1
net/ipv4/netfilter/ip_conntrack_proto_icmp.c
··· 16 #include <linux/skbuff.h> 17 #include <net/ip.h> 18 #include <net/checksum.h> 19 - #include <linux/netfilter.h> 20 #include <linux/netfilter_ipv4.h> 21 #include <linux/netfilter_ipv4/ip_conntrack.h> 22 #include <linux/netfilter_ipv4/ip_conntrack_core.h>
··· 16 #include <linux/skbuff.h> 17 #include <net/ip.h> 18 #include <net/checksum.h> 19 #include <linux/netfilter_ipv4.h> 20 #include <linux/netfilter_ipv4/ip_conntrack.h> 21 #include <linux/netfilter_ipv4/ip_conntrack_core.h>
-1
net/ipv4/netfilter/ip_conntrack_proto_tcp.c
··· 32 33 #include <net/tcp.h> 34 35 - #include <linux/netfilter.h> 36 #include <linux/netfilter_ipv4.h> 37 #include <linux/netfilter_ipv4/ip_conntrack.h> 38 #include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
··· 32 33 #include <net/tcp.h> 34 35 #include <linux/netfilter_ipv4.h> 36 #include <linux/netfilter_ipv4/ip_conntrack.h> 37 #include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
-1
net/ipv4/netfilter/ip_conntrack_proto_udp.c
··· 15 #include <linux/udp.h> 16 #include <linux/seq_file.h> 17 #include <net/checksum.h> 18 - #include <linux/netfilter.h> 19 #include <linux/netfilter_ipv4.h> 20 #include <linux/netfilter_ipv4/ip_conntrack_protocol.h> 21
··· 15 #include <linux/udp.h> 16 #include <linux/seq_file.h> 17 #include <net/checksum.h> 18 #include <linux/netfilter_ipv4.h> 19 #include <linux/netfilter_ipv4/ip_conntrack_protocol.h> 20
+2 -1
net/ipv6/Makefile
··· 12 13 ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \ 14 xfrm6_output.o 15 ipv6-objs += $(ipv6-y) 16 17 obj-$(CONFIG_INET6_AH) += ah6.o 18 obj-$(CONFIG_INET6_ESP) += esp6.o 19 obj-$(CONFIG_INET6_IPCOMP) += ipcomp6.o 20 obj-$(CONFIG_INET6_TUNNEL) += xfrm6_tunnel.o 21 - obj-$(CONFIG_NETFILTER) += netfilter.o netfilter/ 22 23 obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o 24
··· 12 13 ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \ 14 xfrm6_output.o 15 + ipv6-$(CONFIG_NETFILTER) += netfilter.o 16 ipv6-objs += $(ipv6-y) 17 18 obj-$(CONFIG_INET6_AH) += ah6.o 19 obj-$(CONFIG_INET6_ESP) += esp6.o 20 obj-$(CONFIG_INET6_IPCOMP) += ipcomp6.o 21 obj-$(CONFIG_INET6_TUNNEL) += xfrm6_tunnel.o 22 + obj-$(CONFIG_NETFILTER) += netfilter/ 23 24 obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o 25
+4 -1
net/ipv6/netfilter.c
··· 90 return nf_register_queue_rerouter(PF_INET6, &ip6_reroute); 91 } 92 93 - void __exit ipv6_netfilter_fini(void) 94 { 95 nf_unregister_queue_rerouter(PF_INET6); 96 }
··· 90 return nf_register_queue_rerouter(PF_INET6, &ip6_reroute); 91 } 92 93 + /* This can be called from inet6_init() on errors, so it cannot 94 + * be marked __exit. -DaveM 95 + */ 96 + void ipv6_netfilter_fini(void) 97 { 98 nf_unregister_queue_rerouter(PF_INET6); 99 }
+2 -2
sound/oss/harmony.c
··· 1236 } 1237 1238 /* Set the HPA of harmony */ 1239 - harmony.hpa = (struct harmony_hpa *)dev->hpa; 1240 harmony.dev = dev; 1241 1242 /* Grab the ID and revision from the device */ ··· 1250 1251 printk(KERN_INFO "Lasi Harmony Audio driver " HARMONY_VERSION ", " 1252 "h/w id %i, rev. %i at 0x%lx, IRQ %i\n", 1253 - id, rev, dev->hpa, harmony.dev->irq); 1254 1255 /* Make sure the control bit isn't set, although I don't think it 1256 ever is. */
··· 1236 } 1237 1238 /* Set the HPA of harmony */ 1239 + harmony.hpa = (struct harmony_hpa *)dev->hpa.start; 1240 harmony.dev = dev; 1241 1242 /* Grab the ID and revision from the device */ ··· 1250 1251 printk(KERN_INFO "Lasi Harmony Audio driver " HARMONY_VERSION ", " 1252 "h/w id %i, rev. %i at 0x%lx, IRQ %i\n", 1253 + id, rev, dev->hpa.start, harmony.dev->irq); 1254 1255 /* Make sure the control bit isn't set, although I don't think it 1256 ever is. */