Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'x86/debug' into x86/irq

We merge this branch because x86/debug touches code that we started
cleaning up in x86/irq. The two branches started out independent,
but as unexpected amount of activity went into x86/irq, they became
dependent. Resolve that by this cross-merge.

+2524 -1736
+8 -3
Documentation/kernel-parameters.txt
··· 294 294 Possible values are: 295 295 isolate - enable device isolation (each device, as far 296 296 as possible, will get its own protection 297 - domain) 297 + domain) [default] 298 + share - put every device behind one IOMMU into the 299 + same protection domain 298 300 fullflush - enable flushing of IO/TLB entries when 299 301 they are unmapped. Otherwise they are 300 302 flushed before they will be reused, which ··· 1195 1193 it is equivalent to "nosmp", which also disables 1196 1194 the IO APIC. 1197 1195 1198 - max_addr=[KMG] [KNL,BOOT,ia64] All physical memory greater than or 1199 - equal to this physical address is ignored. 1196 + max_addr=nn[KMG] [KNL,BOOT,ia64] All physical memory greater than 1197 + or equal to this physical address is ignored. 1200 1198 1201 1199 max_luns= [SCSI] Maximum number of LUNs to probe. 1202 1200 Should be between 1 and 2^32-1. ··· 1295 1293 problem by letting the user disable the workaround. 1296 1294 1297 1295 mga= [HW,DRM] 1296 + 1297 + min_addr=nn[KMG] [KNL,BOOT,ia64] All physical memory below this 1298 + physical address is ignored. 1298 1299 1299 1300 mminit_loglevel= 1300 1301 [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
+1 -1
Documentation/networking/phy.txt
··· 96 96 static void adjust_link(struct net_device *dev); 97 97 98 98 Next, you need to know the device name of the PHY connected to this device. 99 - The name will look something like, "phy0:0", where the first number is the 99 + The name will look something like, "0:00", where the first number is the 100 100 bus id, and the second is the PHY's address on that bus. Typically, 101 101 the bus is responsible for making its ID unique. 102 102
+1 -3
MAINTAINERS
··· 1809 1809 1810 1810 FTRACE 1811 1811 P: Steven Rostedt 1812 - M: srostedt@redhat.com 1812 + M: rostedt@goodmis.org 1813 1813 S: Maintained 1814 1814 1815 1815 FUJITSU FR-V (FRV) PORT ··· 3928 3928 S: Maintained 3929 3929 3930 3930 SOFTWARE RAID (Multiple Disks) SUPPORT 3931 - P: Ingo Molnar 3932 - M: mingo@redhat.com 3933 3931 P: Neil Brown 3934 3932 M: neilb@suse.de 3935 3933 L: linux-raid@vger.kernel.org
+1 -1
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 28 4 - EXTRAVERSION = -rc5 4 + EXTRAVERSION = -rc6 5 5 NAME = Killer Bat of Doom 6 6 7 7 # *DOCUMENTATION*
+1 -1
arch/blackfin/include/asm/bfin-global.h
··· 101 101 extern unsigned long _ramstart, _ramend, _rambase; 102 102 extern unsigned long memory_start, memory_end, physical_mem_end; 103 103 extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], 104 - _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _ebss_b_l1[], 104 + _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[], 105 105 _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[], 106 106 _ebss_l2[], _l2_lma_start[]; 107 107
+5 -1
arch/blackfin/include/asm/dma-mapping.h
··· 15 15 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 16 16 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 17 17 18 - #define dma_mapping_error 18 + static inline 19 + int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 20 + { 21 + return 0; 22 + } 19 23 20 24 /* 21 25 * Map a single buffer of the indicated size for DMA in streaming mode.
+1 -1
arch/blackfin/kernel/bfin_gpio.c
··· 218 218 if (gpio == GPIO_PB15 || gpio == GPIO_PC14 || gpio == GPIO_PC15 219 219 || gpio == GPIO_PH14 || gpio == GPIO_PH15 220 220 || gpio == GPIO_PJ14 || gpio == GPIO_PJ15 221 - || gpio > MAX_BLACKFIN_GPIOS) 221 + || gpio >= MAX_BLACKFIN_GPIOS) 222 222 return -EINVAL; 223 223 return 0; 224 224 }
+5 -4
arch/blackfin/kernel/cplb-nompu/cplbinit.c
··· 188 188 189 189 static u16 __init lock_kernel_check(u32 start, u32 end) 190 190 { 191 - if ((end <= (u32) _end && end >= (u32)_stext) || 192 - (start <= (u32) _end && start >= (u32)_stext)) 193 - return IN_KERNEL; 194 - return 0; 191 + if (start >= (u32)_end || end <= (u32)_stext) 192 + return 0; 193 + 194 + /* This cplb block overlapped with kernel area. */ 195 + return IN_KERNEL; 195 196 } 196 197 197 198 static unsigned short __init
+6 -1
arch/blackfin/kernel/process.c
··· 351 351 return 1; 352 352 #endif 353 353 #if L1_DATA_B_LENGTH != 0 354 - if (addr >= L1_DATA_B_START 354 + if (addr >= L1_DATA_B_START + (_ebss_b_l1 - _sdata_b_l1) 355 355 && addr + size <= L1_DATA_B_START + L1_DATA_B_LENGTH) 356 + return 1; 357 + #endif 358 + #if L2_LENGTH != 0 359 + if (addr >= L2_START + (_ebss_l2 - _stext_l2) 360 + && addr + size <= L2_START + L2_LENGTH) 356 361 return 1; 357 362 #endif 358 363 return 0;
+6 -6
arch/blackfin/kernel/setup.c
··· 119 119 /* Copy _stext_l1 to _etext_l1 to L1 instruction SRAM */ 120 120 dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length); 121 121 122 - l1_data_a_length = _ebss_l1 - _sdata_l1; 122 + l1_data_a_length = _sbss_l1 - _sdata_l1; 123 123 if (l1_data_a_length > L1_DATA_A_LENGTH) 124 124 panic("L1 Data SRAM Bank A Overflow\n"); 125 125 126 - /* Copy _sdata_l1 to _ebss_l1 to L1 data bank A SRAM */ 126 + /* Copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */ 127 127 dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length); 128 128 129 - l1_data_b_length = _ebss_b_l1 - _sdata_b_l1; 129 + l1_data_b_length = _sbss_b_l1 - _sdata_b_l1; 130 130 if (l1_data_b_length > L1_DATA_B_LENGTH) 131 131 panic("L1 Data SRAM Bank B Overflow\n"); 132 132 133 - /* Copy _sdata_b_l1 to _ebss_b_l1 to L1 data bank B SRAM */ 133 + /* Copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */ 134 134 dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length + 135 135 l1_data_a_length, l1_data_b_length); 136 136 137 137 if (L2_LENGTH != 0) { 138 - l2_length = _ebss_l2 - _stext_l2; 138 + l2_length = _sbss_l2 - _stext_l2; 139 139 if (l2_length > L2_LENGTH) 140 140 panic("L2 SRAM Overflow\n"); 141 141 ··· 827 827 printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n", 828 828 bfin_compiled_revid(), bfin_revid()); 829 829 } 830 - if (bfin_revid() <= CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX) 830 + if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX) 831 831 printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n", 832 832 CPU, bfin_revid()); 833 833 }
+7 -4
arch/blackfin/kernel/traps.c
··· 59 59 #endif 60 60 61 61 62 - #ifdef CONFIG_VERBOSE_DEBUG 62 + #ifdef CONFIG_DEBUG_VERBOSE 63 63 #define verbose_printk(fmt, arg...) \ 64 64 printk(fmt, ##arg) 65 65 #else ··· 147 147 char *name = p->comm; 148 148 struct file *file = vma->vm_file; 149 149 150 - if (file) 151 - name = d_path(&file->f_path, _tmpbuf, 150 + if (file) { 151 + char *d_name = d_path(&file->f_path, _tmpbuf, 152 152 sizeof(_tmpbuf)); 153 + if (!IS_ERR(d_name)) 154 + name = d_name; 155 + } 153 156 154 157 /* FLAT does not have its text aligned to the start of 155 158 * the map while FDPIC ELF does ... ··· 574 571 #endif 575 572 panic("Kernel exception"); 576 573 } else { 577 - #ifdef CONFIG_VERBOSE_DEBUG 574 + #ifdef CONFIG_DEBUG_VERBOSE 578 575 unsigned long *stack; 579 576 /* Dump the user space stack */ 580 577 stack = (unsigned long *)rdusp();
+6 -2
arch/blackfin/mach-common/cache.S
··· 25 25 */ 26 26 .macro do_flush flushins:req optflushins optnopins label 27 27 28 + R2 = -L1_CACHE_BYTES; 29 + 30 + /* start = (start & -L1_CACHE_BYTES) */ 31 + R0 = R0 & R2; 32 + 28 33 /* end = ((end - 1) & -L1_CACHE_BYTES) + L1_CACHE_BYTES; */ 29 34 R1 += -1; 30 - R2 = -L1_CACHE_BYTES; 31 35 R1 = R1 & R2; 32 36 R1 += L1_CACHE_BYTES; 33 37 ··· 67 63 68 64 /* Flush all cache lines assocoiated with this area of memory. */ 69 65 ENTRY(_blackfin_icache_dcache_flush_range) 70 - do_flush IFLUSH, FLUSH 66 + do_flush FLUSH, IFLUSH 71 67 ENDPROC(_blackfin_icache_dcache_flush_range) 72 68 73 69 /* Throw away all D-cached data in specified region without any obligation to
+7 -7
arch/blackfin/mach-common/cpufreq.c
··· 72 72 73 73 /**************************************************************************/ 74 74 75 - static unsigned int bfin_getfreq(unsigned int cpu) 75 + static unsigned int bfin_getfreq_khz(unsigned int cpu) 76 76 { 77 77 /* The driver only support single cpu */ 78 78 if (cpu != 0) 79 79 return -1; 80 80 81 - return get_cclk(); 81 + return get_cclk() / 1000; 82 82 } 83 83 84 84 ··· 96 96 97 97 cclk_hz = bfin_freq_table[index].frequency; 98 98 99 - freqs.old = bfin_getfreq(0); 99 + freqs.old = bfin_getfreq_khz(0); 100 100 freqs.new = cclk_hz; 101 101 freqs.cpu = 0; 102 102 ··· 137 137 if (policy->cpu != 0) 138 138 return -EINVAL; 139 139 140 - cclk = get_cclk(); 141 - sclk = get_sclk(); 140 + cclk = get_cclk() / 1000; 141 + sclk = get_sclk() / 1000; 142 142 143 143 #if ANOMALY_05000273 || (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_DCACHE)) 144 144 min_cclk = sclk * 2; ··· 152 152 dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */ 153 153 dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1; 154 154 155 - pr_debug("cpufreq: freq:%d csel:%d tscale:%d\n", 155 + pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n", 156 156 bfin_freq_table[index].frequency, 157 157 dpm_state_table[index].csel, 158 158 dpm_state_table[index].tscale); ··· 173 173 static struct cpufreq_driver bfin_driver = { 174 174 .verify = bfin_verify_speed, 175 175 .target = bfin_target, 176 - .get = bfin_getfreq, 176 + .get = bfin_getfreq_khz, 177 177 .init = __bfin_cpu_init, 178 178 .name = "bfin cpufreq", 179 179 .owner = THIS_MODULE,
+1 -1
arch/blackfin/mach-common/entry.S
··· 277 277 p5.h = hi(ILAT); 278 278 r6 = [p5]; 279 279 r7 = 0x20; /* Did I just cause anther HW error? */ 280 - r7 = r7 & r1; 280 + r6 = r7 & r6; 281 281 CC = R7 == R6; 282 282 if CC JUMP _double_fault; 283 283 #endif
+4 -4
arch/blackfin/mm/sram-alloc.c
··· 183 183 return; 184 184 } 185 185 186 - free_l2_sram_head.next->paddr = (void *)L2_START + 187 - (_etext_l2 - _stext_l2) + (_edata_l2 - _sdata_l2); 188 - free_l2_sram_head.next->size = L2_LENGTH - 189 - (_etext_l2 - _stext_l2) + (_edata_l2 - _sdata_l2); 186 + free_l2_sram_head.next->paddr = 187 + (void *)L2_START + (_ebss_l2 - _stext_l2); 188 + free_l2_sram_head.next->size = 189 + L2_LENGTH - (_ebss_l2 - _stext_l2); 190 190 free_l2_sram_head.next->pid = 0; 191 191 free_l2_sram_head.next->next = NULL; 192 192
+1 -1
arch/ia64/include/asm/intrinsics.h
··· 226 226 /************************************************/ 227 227 #define ia64_ssm IA64_INTRINSIC_MACRO(ssm) 228 228 #define ia64_rsm IA64_INTRINSIC_MACRO(rsm) 229 - #define ia64_getreg IA64_INTRINSIC_API(getreg) 229 + #define ia64_getreg IA64_INTRINSIC_MACRO(getreg) 230 230 #define ia64_setreg IA64_INTRINSIC_API(setreg) 231 231 #define ia64_set_rr IA64_INTRINSIC_API(set_rr) 232 232 #define ia64_get_rr IA64_INTRINSIC_API(get_rr)
+13
arch/ia64/include/asm/paravirt_privop.h
··· 78 78 ia64_native_rsm(mask); \ 79 79 } while (0) 80 80 81 + /* returned ip value should be the one in the caller, 82 + * not in __paravirt_getreg() */ 83 + #define paravirt_getreg(reg) \ 84 + ({ \ 85 + unsigned long res; \ 86 + BUILD_BUG_ON(!__builtin_constant_p(reg)); \ 87 + if ((reg) == _IA64_REG_IP) \ 88 + res = ia64_native_getreg(_IA64_REG_IP); \ 89 + else \ 90 + res = pv_cpu_ops.getreg(reg); \ 91 + res; \ 92 + }) 93 + 81 94 /****************************************************************************** 82 95 * replacement of hand written assembly codes. 83 96 */
+1
arch/ia64/kernel/entry.S
··· 499 499 END(prefetch_stack) 500 500 501 501 GLOBAL_ENTRY(kernel_execve) 502 + rum psr.ac 502 503 mov r15=__NR_execve // put syscall number in place 503 504 break __BREAK_SYSCALL 504 505 br.ret.sptk.many rp
+1 -1
arch/ia64/kernel/head.S
··· 260 260 * Switch into virtual mode: 261 261 */ 262 262 movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \ 263 - |IA64_PSR_DI) 263 + |IA64_PSR_DI|IA64_PSR_AC) 264 264 ;; 265 265 mov cr.ipsr=r16 266 266 movl r17=1f
+1 -1
arch/ia64/kernel/mca.c
··· 1139 1139 return previous_current; 1140 1140 1141 1141 no_mod: 1142 - printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", 1142 + mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", 1143 1143 smp_processor_id(), type, msg); 1144 1144 return previous_current; 1145 1145 }
+1 -1
arch/ia64/kernel/paravirt.c
··· 130 130 unsigned long res = -1; 131 131 switch (regnum) { 132 132 CASE_GET_REG(GP); 133 - CASE_GET_REG(IP); 133 + /*CASE_GET_REG(IP);*/ /* returned ip value shouldn't be constant */ 134 134 CASE_GET_REG(PSR); 135 135 CASE_GET_REG(TP); 136 136 CASE_GET_REG(SP);
-1
arch/ia64/kernel/pci-dma.c
··· 19 19 #include <linux/kernel.h> 20 20 21 21 #include <asm/page.h> 22 - #include <asm/iommu.h> 23 22 24 23 dma_addr_t bad_dma_address __read_mostly; 25 24 EXPORT_SYMBOL(bad_dma_address);
+1 -1
arch/ia64/xen/hypercall.S
··· 58 58 __HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR) 59 59 60 60 #ifdef CONFIG_IA32_SUPPORT 61 - __HCALL1(xen_get_eflag, HYPERPRIVOP_GET_EFLAG) 61 + __HCALL0(xen_get_eflag, HYPERPRIVOP_GET_EFLAG) 62 62 __HCALL1(xen_set_eflag, HYPERPRIVOP_SET_EFLAG) // refer SDM vol1 3.1.8 63 63 #endif /* CONFIG_IA32_SUPPORT */ 64 64
+2
arch/mips/include/asm/mach-rc32434/gpio.h
··· 84 84 extern unsigned get_434_reg(unsigned reg_offs); 85 85 extern void set_latch_u5(unsigned char or_mask, unsigned char nand_mask); 86 86 extern unsigned char get_latch_u5(void); 87 + extern void rb532_gpio_set_ilevel(int bit, unsigned gpio); 88 + extern void rb532_gpio_set_istat(int bit, unsigned gpio); 87 89 88 90 #endif /* _RC32434_GPIO_H_ */
+8 -6
arch/mips/include/asm/mach-rc32434/rb.h
··· 40 40 #define BTCS 0x010040 41 41 #define BTCOMPARE 0x010044 42 42 #define GPIOBASE 0x050000 43 - #define GPIOCFG 0x050004 44 - #define GPIOD 0x050008 45 - #define GPIOILEVEL 0x05000C 46 - #define GPIOISTAT 0x050010 47 - #define GPIONMIEN 0x050014 48 - #define IMASK6 0x038038 43 + /* Offsets relative to GPIOBASE */ 44 + #define GPIOFUNC 0x00 45 + #define GPIOCFG 0x04 46 + #define GPIOD 0x08 47 + #define GPIOILEVEL 0x0C 48 + #define GPIOISTAT 0x10 49 + #define GPIONMIEN 0x14 50 + #define IMASK6 0x38 49 51 #define LO_WPX (1 << 0) 50 52 #define LO_ALE (1 << 1) 51 53 #define LO_CLE (1 << 2)
+1 -1
arch/mips/include/asm/time.h
··· 63 63 /* 64 64 * Initialize the count register as a clocksource 65 65 */ 66 - #ifdef CONFIG_CEVT_R4K 66 + #ifdef CONFIG_CSRC_R4K 67 67 extern int init_mips_clocksource(void); 68 68 #else 69 69 static inline int init_mips_clocksource(void)
+1 -1
arch/mips/kernel/csrc-r4k.c
··· 27 27 if (!cpu_has_counter || !mips_hpt_frequency) 28 28 return -ENXIO; 29 29 30 - /* Calclate a somewhat reasonable rating value */ 30 + /* Calculate a somewhat reasonable rating value */ 31 31 clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; 32 32 33 33 clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
+1 -1
arch/mips/mm/sc-ip22.c
··· 161 161 162 162 /* XXX Check with wje if the Indy caches can differenciate between 163 163 writeback + invalidate and just invalidate. */ 164 - struct bcache_ops indy_sc_ops = { 164 + static struct bcache_ops indy_sc_ops = { 165 165 .bc_enable = indy_sc_enable, 166 166 .bc_disable = indy_sc_disable, 167 167 .bc_wback_inv = indy_sc_wback_invalidate,
+3 -3
arch/mips/mti-malta/malta-amon.c
··· 22 22 #include <linux/init.h> 23 23 #include <linux/smp.h> 24 24 25 - #include <asm-mips/addrspace.h> 26 - #include <asm-mips/mips-boards/launch.h> 27 - #include <asm-mips/mipsmtregs.h> 25 + #include <asm/addrspace.h> 26 + #include <asm/mips-boards/launch.h> 27 + #include <asm/mipsmtregs.h> 28 28 29 29 int amon_cpu_avail(int cpu) 30 30 {
+1 -1
arch/mips/rb532/devices.c
··· 118 118 /* Resources and device for NAND */ 119 119 static int rb532_dev_ready(struct mtd_info *mtd) 120 120 { 121 - return readl(IDT434_REG_BASE + GPIOD) & GPIO_RDY; 121 + return gpio_get_value(GPIO_RDY); 122 122 } 123 123 124 124 static void rb532_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+75 -122
arch/mips/rb532/gpio.c
··· 39 39 struct rb532_gpio_chip { 40 40 struct gpio_chip chip; 41 41 void __iomem *regbase; 42 - void (*set_int_level)(struct gpio_chip *chip, unsigned offset, int value); 43 - int (*get_int_level)(struct gpio_chip *chip, unsigned offset); 44 - void (*set_int_status)(struct gpio_chip *chip, unsigned offset, int value); 45 - int (*get_int_status)(struct gpio_chip *chip, unsigned offset); 46 42 }; 47 43 48 44 struct mpmc_device dev3; ··· 107 111 } 108 112 EXPORT_SYMBOL(get_latch_u5); 109 113 114 + /* rb532_set_bit - sanely set a bit 115 + * 116 + * bitval: new value for the bit 117 + * offset: bit index in the 4 byte address range 118 + * ioaddr: 4 byte aligned address being altered 119 + */ 120 + static inline void rb532_set_bit(unsigned bitval, 121 + unsigned offset, void __iomem *ioaddr) 122 + { 123 + unsigned long flags; 124 + u32 val; 125 + 126 + bitval = !!bitval; /* map parameter to {0,1} */ 127 + 128 + local_irq_save(flags); 129 + 130 + val = readl(ioaddr); 131 + val &= ~( ~bitval << offset ); /* unset bit if bitval == 0 */ 132 + val |= ( bitval << offset ); /* set bit if bitval == 1 */ 133 + writel(val, ioaddr); 134 + 135 + local_irq_restore(flags); 136 + } 137 + 138 + /* rb532_get_bit - read a bit 139 + * 140 + * returns the boolean state of the bit, which may be > 1 141 + */ 142 + static inline int rb532_get_bit(unsigned offset, void __iomem *ioaddr) 143 + { 144 + return (readl(ioaddr) & (1 << offset)); 145 + } 146 + 110 147 /* 111 148 * Return GPIO level */ 112 149 static int rb532_gpio_get(struct gpio_chip *chip, unsigned offset) 113 150 { 114 - u32 mask = 1 << offset; 115 151 struct rb532_gpio_chip *gpch; 116 152 117 153 gpch = container_of(chip, struct rb532_gpio_chip, chip); 118 - return readl(gpch->regbase + GPIOD) & mask; 154 + return rb532_get_bit(offset, gpch->regbase + GPIOD); 119 155 } 120 156 121 157 /* ··· 156 128 static void rb532_gpio_set(struct gpio_chip *chip, 157 129 unsigned offset, int value) 158 130 { 159 - unsigned long flags; 160 - u32 mask = 1 << offset; 161 - u32 tmp; 162 131 struct rb532_gpio_chip *gpch; 163 - void __iomem *gpvr; 164 132 165 133 gpch = container_of(chip, struct rb532_gpio_chip, chip); 166 - gpvr = gpch->regbase + GPIOD; 167 - 168 - local_irq_save(flags); 169 - tmp = readl(gpvr); 170 - if (value) 171 - tmp |= mask; 172 - else 173 - tmp &= ~mask; 174 - writel(tmp, gpvr); 175 - local_irq_restore(flags); 134 + rb532_set_bit(value, offset, gpch->regbase + GPIOD); 176 135 } 177 136 178 137 /* ··· 167 152 */ 168 153 static int rb532_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 169 154 { 170 - unsigned long flags; 171 - u32 mask = 1 << offset; 172 - u32 value; 173 155 struct rb532_gpio_chip *gpch; 174 - void __iomem *gpdr; 175 156 176 157 gpch = container_of(chip, struct rb532_gpio_chip, chip); 177 - gpdr = gpch->regbase + GPIOCFG; 178 158 179 - local_irq_save(flags); 180 - value = readl(gpdr); 181 - value &= ~mask; 182 - writel(value, gpdr); 183 - local_irq_restore(flags); 159 + if (rb532_get_bit(offset, gpch->regbase + GPIOFUNC)) 160 + return 1; /* alternate function, GPIOCFG is ignored */ 184 161 162 + rb532_set_bit(0, offset, gpch->regbase + GPIOCFG); 185 163 return 0; 186 164 } 187 165 ··· 184 176 static int rb532_gpio_direction_output(struct gpio_chip *chip, 185 177 unsigned offset, int value) 186 178 { 187 - unsigned long flags; 188 - u32 mask = 1 << offset; 189 - u32 tmp; 190 179 struct rb532_gpio_chip *gpch; 191 - void __iomem *gpdr; 192 180 193 181 gpch = container_of(chip, struct rb532_gpio_chip, chip); 194 - writel(mask, gpch->regbase + GPIOD); 195 - gpdr = gpch->regbase + GPIOCFG; 196 182 197 - local_irq_save(flags); 198 - tmp = readl(gpdr); 199 - tmp |= mask; 200 - writel(tmp, gpdr); 201 - local_irq_restore(flags); 183 + if (rb532_get_bit(offset, gpch->regbase + GPIOFUNC)) 184 + return 1; /* alternate function, GPIOCFG is ignored */ 202 185 186 + /* set the initial output value */ 187 + rb532_set_bit(value, offset, gpch->regbase + GPIOD); 188 + 189 + rb532_set_bit(1, offset, gpch->regbase + GPIOCFG); 203 190 return 0; 204 - } 205 - 206 - /* 207 - * Set the GPIO interrupt level 208 - */ 209 - static void rb532_gpio_set_int_level(struct gpio_chip *chip, 210 - unsigned offset, int value) 211 - { 212 - unsigned long flags; 213 - u32 mask = 1 << offset; 214 - u32 tmp; 215 - struct rb532_gpio_chip *gpch; 216 - void __iomem *gpil; 217 - 218 - gpch = container_of(chip, struct rb532_gpio_chip, chip); 219 - gpil = gpch->regbase + GPIOILEVEL; 220 - 221 - local_irq_save(flags); 222 - tmp = readl(gpil); 223 - if (value) 224 - tmp |= mask; 225 - else 226 - tmp &= ~mask; 227 - writel(tmp, gpil); 228 - local_irq_restore(flags); 229 - } 230 - 231 - /* 232 - * Get the GPIO interrupt level 233 - */ 234 - static int rb532_gpio_get_int_level(struct gpio_chip *chip, unsigned offset) 235 - { 236 - u32 mask = 1 << offset; 237 - struct rb532_gpio_chip *gpch; 238 - 239 - gpch = container_of(chip, struct rb532_gpio_chip, chip); 240 - return readl(gpch->regbase + GPIOILEVEL) & mask; 241 - } 242 - 243 - /* 244 - * Set the GPIO interrupt status 245 - */ 246 - static void rb532_gpio_set_int_status(struct gpio_chip *chip, 247 - unsigned offset, int value) 248 - { 249 - unsigned long flags; 250 - u32 mask = 1 << offset; 251 - u32 tmp; 252 - struct rb532_gpio_chip *gpch; 253 - void __iomem *gpis; 254 - 255 - gpch = container_of(chip, struct rb532_gpio_chip, chip); 256 - gpis = gpch->regbase + GPIOISTAT; 257 - 258 - local_irq_save(flags); 259 - tmp = readl(gpis); 260 - if (value) 261 - tmp |= mask; 262 - else 263 - tmp &= ~mask; 264 - writel(tmp, gpis); 265 - local_irq_restore(flags); 266 - } 267 - 268 - /* 269 - * Get the GPIO interrupt status 270 - */ 271 - static int rb532_gpio_get_int_status(struct gpio_chip *chip, unsigned offset) 272 - { 273 - u32 mask = 1 << offset; 274 - struct rb532_gpio_chip *gpch; 275 - 276 - gpch = container_of(chip, struct rb532_gpio_chip, chip); 277 - return readl(gpch->regbase + GPIOISTAT) & mask; 278 191 } 279 192 280 193 static struct rb532_gpio_chip rb532_gpio_chip[] = { ··· 209 280 .base = 0, 210 281 .ngpio = 32, 211 282 }, 212 - .get_int_level = rb532_gpio_get_int_level, 213 - .set_int_level = rb532_gpio_set_int_level, 214 - .get_int_status = rb532_gpio_get_int_status, 215 - .set_int_status = rb532_gpio_set_int_status, 216 283 }, 217 284 }; 285 + 286 + /* 287 + * Set GPIO interrupt level 288 + */ 289 + void rb532_gpio_set_ilevel(int bit, unsigned gpio) 290 + { 291 + rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOILEVEL); 292 + } 293 + EXPORT_SYMBOL(rb532_gpio_set_ilevel); 294 + 295 + /* 296 + * Set GPIO interrupt status 297 + */ 298 + void rb532_gpio_set_istat(int bit, unsigned gpio) 299 + { 300 + rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOISTAT); 301 + } 302 + EXPORT_SYMBOL(rb532_gpio_set_istat); 303 + 304 + /* 305 + * Configure GPIO alternate function 306 + */ 307 + static void rb532_gpio_set_func(int bit, unsigned gpio) 308 + { 309 + rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOFUNC); 310 + } 218 311 219 312 int __init rb532_gpio_init(void) 220 313 { ··· 261 310 return -ENXIO; 262 311 } 263 312 264 - /* Set the interrupt status and level for the CF pin */ 265 - rb532_gpio_set_int_level(&rb532_gpio_chip->chip, CF_GPIO_NUM, 1); 266 - rb532_gpio_set_int_status(&rb532_gpio_chip->chip, CF_GPIO_NUM, 0); 313 + /* configure CF_GPIO_NUM as CFRDY IRQ source */ 314 + rb532_gpio_set_func(0, CF_GPIO_NUM); 315 + rb532_gpio_direction_input(&rb532_gpio_chip->chip, CF_GPIO_NUM); 316 + rb532_gpio_set_ilevel(1, CF_GPIO_NUM); 317 + rb532_gpio_set_istat(0, CF_GPIO_NUM); 267 318 268 319 return 0; 269 320 }
+5 -5
arch/parisc/kernel/ptrace.c
··· 183 183 * being 64 bit in both cases. 184 184 */ 185 185 186 - static long translate_usr_offset(long offset) 186 + static compat_ulong_t translate_usr_offset(compat_ulong_t offset) 187 187 { 188 188 if (offset < 0) 189 - return -1; 189 + return sizeof(struct pt_regs); 190 190 else if (offset <= 32*4) /* gr[0..31] */ 191 191 return offset * 2 + 4; 192 192 else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */ ··· 194 194 else if (offset < sizeof(struct pt_regs)/2 + 32*4) 195 195 return offset * 2 + 4 - 32*8; 196 196 else 197 - return -1; 197 + return sizeof(struct pt_regs); 198 198 } 199 199 200 200 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, ··· 209 209 if (addr & (sizeof(compat_uint_t)-1)) 210 210 break; 211 211 addr = translate_usr_offset(addr); 212 - if (addr < 0) 212 + if (addr >= sizeof(struct pt_regs)) 213 213 break; 214 214 215 215 tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr); ··· 236 236 if (addr & (sizeof(compat_uint_t)-1)) 237 237 break; 238 238 addr = translate_usr_offset(addr); 239 - if (addr < 0) 239 + if (addr >= sizeof(struct pt_regs)) 240 240 break; 241 241 if (addr >= PT_FR0 && addr <= PT_FR31 + 4) { 242 242 /* Special case, fp regs are 64 bits anyway */
+5 -6
arch/sparc/include/asm/termbits.h
··· 29 29 tcflag_t c_cflag; /* control mode flags */ 30 30 tcflag_t c_lflag; /* local mode flags */ 31 31 cc_t c_line; /* line discipline */ 32 + #ifndef __KERNEL__ 32 33 cc_t c_cc[NCCS]; /* control characters */ 33 - #ifdef __KERNEL__ 34 + #else 35 + cc_t c_cc[NCCS+2]; /* kernel needs 2 more to hold vmin/vtime */ 34 36 #define SIZEOF_USER_TERMIOS sizeof (struct termios) - (2*sizeof (cc_t)) 35 - cc_t _x_cc[2]; /* We need them to hold vmin/vtime */ 36 37 #endif 37 38 }; 38 39 ··· 43 42 tcflag_t c_cflag; /* control mode flags */ 44 43 tcflag_t c_lflag; /* local mode flags */ 45 44 cc_t c_line; /* line discipline */ 46 - cc_t c_cc[NCCS]; /* control characters */ 47 - cc_t _x_cc[2]; /* padding to match ktermios */ 45 + cc_t c_cc[NCCS+2]; /* control characters */ 48 46 speed_t c_ispeed; /* input speed */ 49 47 speed_t c_ospeed; /* output speed */ 50 48 }; ··· 54 54 tcflag_t c_cflag; /* control mode flags */ 55 55 tcflag_t c_lflag; /* local mode flags */ 56 56 cc_t c_line; /* line discipline */ 57 - cc_t c_cc[NCCS]; /* control characters */ 58 - cc_t _x_cc[2]; /* We need them to hold vmin/vtime */ 57 + cc_t c_cc[NCCS+2]; /* control characters */ 59 58 speed_t c_ispeed; /* input speed */ 60 59 speed_t c_ospeed; /* output speed */ 61 60 };
+2 -1
arch/sparc/include/asm/unistd_32.h
··· 338 338 #define __NR_dup3 320 339 339 #define __NR_pipe2 321 340 340 #define __NR_inotify_init1 322 341 + #define __NR_accept4 323 341 342 342 - #define NR_SYSCALLS 323 343 + #define NR_SYSCALLS 324 343 344 344 345 /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, 345 346 * it never had the plain ones and there is no value to adding those
+2 -1
arch/sparc/include/asm/unistd_64.h
··· 340 340 #define __NR_dup3 320 341 341 #define __NR_pipe2 321 342 342 #define __NR_inotify_init1 322 343 + #define __NR_accept4 323 343 344 344 - #define NR_SYSCALLS 323 345 + #define NR_SYSCALLS 324 345 346 346 347 #ifdef __KERNEL__ 347 348 #define __ARCH_WANT_IPC_PARSE_VERSION
+2 -2
arch/sparc/kernel/of_device.c
··· 563 563 op->dev.parent = parent; 564 564 op->dev.bus = &of_platform_bus_type; 565 565 if (!parent) 566 - strcpy(op->dev.bus_id, "root"); 566 + dev_set_name(&op->dev, "root"); 567 567 else 568 - sprintf(op->dev.bus_id, "%08x", dp->node); 568 + dev_set_name(&op->dev, "%08x", dp->node); 569 569 570 570 if (of_device_register(op)) { 571 571 printk("%s: Could not register of device.\n",
+1 -1
arch/sparc/kernel/systbls.S
··· 81 81 /*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait 82 82 /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate 83 83 /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 84 - /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1 84 + /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4
+12 -1
arch/sparc64/kernel/sys32.S
··· 150 150 sys32_socketcall: /* %o0=call, %o1=args */ 151 151 cmp %o0, 1 152 152 bl,pn %xcc, do_einval 153 - cmp %o0, 17 153 + cmp %o0, 18 154 154 bg,pn %xcc, do_einval 155 155 sub %o0, 1, %o0 156 156 sllx %o0, 5, %o0 ··· 319 319 nop 320 320 nop 321 321 nop 322 + do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */ 323 + 63: ldswa [%o1 + 0x0] %asi, %o0 324 + sethi %hi(sys_accept4), %g1 325 + 64: lduwa [%o1 + 0x8] %asi, %o2 326 + 65: ldswa [%o1 + 0xc] %asi, %o3 327 + jmpl %g1 + %lo(sys_accept4), %g0 328 + 66: lduwa [%o1 + 0x4] %asi, %o1 329 + nop 330 + nop 322 331 323 332 .section __ex_table,"a" 324 333 .align 4 ··· 362 353 .word 57b, __retl_efault, 58b, __retl_efault 363 354 .word 59b, __retl_efault, 60b, __retl_efault 364 355 .word 61b, __retl_efault, 62b, __retl_efault 356 + .word 63b, __retl_efault, 64b, __retl_efault 357 + .word 65b, __retl_efault, 66b, __retl_efault 365 358 .previous
+2 -2
arch/sparc64/kernel/systbls.S
··· 82 82 .word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait 83 83 /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate 84 84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 85 - /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1 85 + /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4 86 86 87 87 #endif /* CONFIG_COMPAT */ 88 88 ··· 156 156 .word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait 157 157 /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate 158 158 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 159 - /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1 159 + /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4
+1 -1
arch/x86/Kconfig
··· 952 952 config NUMA 953 953 bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)" 954 954 depends on SMP 955 - depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && BROKEN) 955 + depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL) 956 956 default n if X86_PC 957 957 default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP) 958 958 help
+4
arch/x86/include/asm/mmzone_32.h
··· 34 34 35 35 extern int early_pfn_to_nid(unsigned long pfn); 36 36 37 + extern void resume_map_numa_kva(pgd_t *pgd); 38 + 37 39 #else /* !CONFIG_NUMA */ 38 40 39 41 #define get_memcfg_numa get_memcfg_numa_flat 42 + 43 + static inline void resume_map_numa_kva(pgd_t *pgd) {} 40 44 41 45 #endif /* CONFIG_NUMA */ 42 46
+1 -1
arch/x86/include/asm/uaccess_64.h
··· 46 46 return ret; 47 47 case 10: 48 48 __get_user_asm(*(u64 *)dst, (u64 __user *)src, 49 - ret, "q", "", "=r", 16); 49 + ret, "q", "", "=r", 10); 50 50 if (unlikely(ret)) 51 51 return ret; 52 52 __get_user_asm(*(u16 *)(8 + (char *)dst),
+2 -2
arch/x86/include/asm/unistd_64.h
··· 639 639 __SYSCALL(__NR_timerfd_settime, sys_timerfd_settime) 640 640 #define __NR_timerfd_gettime 287 641 641 __SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) 642 - #define __NR_paccept 288 643 - __SYSCALL(__NR_paccept, sys_paccept) 642 + #define __NR_accept4 288 643 + __SYSCALL(__NR_accept4, sys_accept4) 644 644 #define __NR_signalfd4 289 645 645 __SYSCALL(__NR_signalfd4, sys_signalfd4) 646 646 #define __NR_eventfd2 290
+1
arch/x86/kernel/Makefile
··· 12 12 CFLAGS_REMOVE_rtc.o = -pg 13 13 CFLAGS_REMOVE_paravirt-spinlocks.o = -pg 14 14 CFLAGS_REMOVE_ftrace.o = -pg 15 + CFLAGS_REMOVE_early_printk.o = -pg 15 16 endif 16 17 17 18 #
+1 -1
arch/x86/kernel/amd_iommu.c
··· 537 537 address >>= PAGE_SHIFT; 538 538 iommu_area_free(dom->bitmap, address, pages); 539 539 540 - if (address + pages >= dom->next_bit) 540 + if (address >= dom->next_bit) 541 541 dom->need_flush = true; 542 542 } 543 543
+4 -2
arch/x86/kernel/amd_iommu_init.c
··· 121 121 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings 122 122 we find in ACPI */ 123 123 unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ 124 - int amd_iommu_isolate; /* if 1, device isolation is enabled */ 124 + int amd_iommu_isolate = 1; /* if 1, device isolation is enabled */ 125 125 bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ 126 126 127 127 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the ··· 1213 1213 for (; *str; ++str) { 1214 1214 if (strncmp(str, "isolate", 7) == 0) 1215 1215 amd_iommu_isolate = 1; 1216 - if (strncmp(str, "fullflush", 11) == 0) 1216 + if (strncmp(str, "share", 5) == 0) 1217 + amd_iommu_isolate = 0; 1218 + if (strncmp(str, "fullflush", 9) == 0) 1217 1219 amd_iommu_unmap_flush = true; 1218 1220 } 1219 1221
+1
arch/x86/kernel/entry_32.S
··· 847 847 push %eax 848 848 CFI_ADJUST_CFA_OFFSET 4 849 849 call do_exit 850 + ud2 # padding for call trace 850 851 CFI_ENDPROC 851 852 ENDPROC(kernel_thread_helper) 852 853
+3
arch/x86/kernel/entry_64.S
··· 373 373 call schedule_tail 374 374 GET_THREAD_INFO(%rcx) 375 375 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) 376 + CFI_REMEMBER_STATE 376 377 jnz rff_trace 377 378 rff_action: 378 379 RESTORE_REST ··· 383 382 jnz int_ret_from_sys_call 384 383 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET 385 384 jmp ret_from_sys_call 385 + CFI_RESTORE_STATE 386 386 rff_trace: 387 387 movq %rsp,%rdi 388 388 call syscall_trace_leave ··· 1175 1173 # exit 1176 1174 mov %eax, %edi 1177 1175 call do_exit 1176 + ud2 # padding for call trace 1178 1177 CFI_ENDPROC 1179 1178 END(child_rip) 1180 1179
+14
arch/x86/kernel/io_apic.c
··· 1140 1140 1141 1141 cfg->vector = 0; 1142 1142 cpus_clear(cfg->domain); 1143 + 1144 + if (likely(!cfg->move_in_progress)) 1145 + return; 1146 + cpus_and(mask, cfg->old_domain, cpu_online_map); 1147 + for_each_cpu_mask_nr(cpu, mask) { 1148 + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1149 + vector++) { 1150 + if (per_cpu(vector_irq, cpu)[vector] != irq) 1151 + continue; 1152 + per_cpu(vector_irq, cpu)[vector] = -1; 1153 + break; 1154 + } 1155 + } 1156 + cfg->move_in_progress = 0; 1143 1157 } 1144 1158 1145 1159 void __setup_vector_irq(int cpu)
+9 -13
arch/x86/kernel/irq_64.c
··· 18 18 #include <asm/idle.h> 19 19 #include <asm/smp.h> 20 20 21 - #ifdef CONFIG_DEBUG_STACKOVERFLOW 22 21 /* 23 22 * Probabilistic stack overflow check: 24 23 * ··· 27 28 */ 28 29 static inline void stack_overflow_check(struct pt_regs *regs) 29 30 { 31 + #ifdef CONFIG_DEBUG_STACKOVERFLOW 30 32 u64 curbase = (u64)task_stack_page(current); 31 - static unsigned long warned = -60*HZ; 32 33 33 - if (regs->sp >= curbase && regs->sp <= curbase + THREAD_SIZE && 34 - regs->sp < curbase + sizeof(struct thread_info) + 128 && 35 - time_after(jiffies, warned + 60*HZ)) { 36 - printk("do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n", 37 - current->comm, curbase, regs->sp); 38 - show_stack(NULL,NULL); 39 - warned = jiffies; 40 - } 41 - } 34 + WARN_ONCE(regs->sp >= curbase && 35 + regs->sp <= curbase + THREAD_SIZE && 36 + regs->sp < curbase + sizeof(struct thread_info) + 37 + sizeof(struct pt_regs) + 128, 38 + 39 + "do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n", 40 + current->comm, curbase, regs->sp); 42 41 #endif 42 + } 43 43 44 44 /* 45 45 * do_IRQ handles all normal device IRQ's (the special ··· 58 60 irq_enter(); 59 61 irq = __get_cpu_var(vector_irq)[vector]; 60 62 61 - #ifdef CONFIG_DEBUG_STACKOVERFLOW 62 63 stack_overflow_check(regs); 63 - #endif 64 64 65 65 desc = irq_to_desc(irq); 66 66 if (likely(desc))
+9
arch/x86/kernel/reboot.c
··· 169 169 DMI_MATCH(DMI_BOARD_NAME, "0KW626"), 170 170 }, 171 171 }, 172 + { /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */ 173 + .callback = set_bios_reboot, 174 + .ident = "Dell OptiPlex 330", 175 + .matches = { 176 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 177 + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"), 178 + DMI_MATCH(DMI_BOARD_NAME, "0KP561"), 179 + }, 180 + }, 172 181 { /* Handle problems with rebooting on Dell 2400's */ 173 182 .callback = set_bios_reboot, 174 183 .ident = "Dell PowerEdge 2400",
+1 -1
arch/x86/kernel/setup.c
··· 764 764 .callback = dmi_low_memory_corruption, 765 765 .ident = "Phoenix BIOS", 766 766 .matches = { 767 - DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), 767 + DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"), 768 768 }, 769 769 }, 770 770 #endif
+2
arch/x86/kernel/time_64.c
··· 80 80 break; 81 81 no_ctr_free = (i == 4); 82 82 if (no_ctr_free) { 83 + WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... " 84 + "cpu_khz value may be incorrect.\n"); 83 85 i = 3; 84 86 rdmsrl(MSR_K7_EVNTSEL3, evntsel3); 85 87 wrmsrl(MSR_K7_EVNTSEL3, 0);
+35
arch/x86/mm/numa_32.c
··· 222 222 } 223 223 } 224 224 225 + #ifdef CONFIG_HIBERNATION 226 + /** 227 + * resume_map_numa_kva - add KVA mapping to the temporary page tables created 228 + * during resume from hibernation 229 + * @pgd_base - temporary resume page directory 230 + */ 231 + void resume_map_numa_kva(pgd_t *pgd_base) 232 + { 233 + int node; 234 + 235 + for_each_online_node(node) { 236 + unsigned long start_va, start_pfn, size, pfn; 237 + 238 + start_va = (unsigned long)node_remap_start_vaddr[node]; 239 + start_pfn = node_remap_start_pfn[node]; 240 + size = node_remap_size[node]; 241 + 242 + printk(KERN_DEBUG "%s: node %d\n", __FUNCTION__, node); 243 + 244 + for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) { 245 + unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); 246 + pgd_t *pgd = pgd_base + pgd_index(vaddr); 247 + pud_t *pud = pud_offset(pgd, vaddr); 248 + pmd_t *pmd = pmd_offset(pud, vaddr); 249 + 250 + set_pmd(pmd, pfn_pmd(start_pfn + pfn, 251 + PAGE_KERNEL_LARGE_EXEC)); 252 + 253 + printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n", 254 + __FUNCTION__, vaddr, start_pfn + pfn); 255 + } 256 + } 257 + } 258 + #endif 259 + 225 260 static unsigned long calculate_numa_remap_pages(void) 226 261 { 227 262 int nid;
+4
arch/x86/power/hibernate_32.c
··· 12 12 #include <asm/system.h> 13 13 #include <asm/page.h> 14 14 #include <asm/pgtable.h> 15 + #include <asm/mmzone.h> 15 16 16 17 /* Defined in hibernate_asm_32.S */ 17 18 extern int restore_image(void); ··· 128 127 } 129 128 } 130 129 } 130 + 131 + resume_map_numa_kva(pgd_base); 132 + 131 133 return 0; 132 134 } 133 135
+6
block/blk-map.c
··· 217 217 return PTR_ERR(bio); 218 218 219 219 if (bio->bi_size != len) { 220 + /* 221 + * Grab an extra reference to this bio, as bio_unmap_user() 222 + * expects to be able to drop it twice as it happens on the 223 + * normal IO completion path 224 + */ 225 + bio_get(bio); 220 226 bio_endio(bio, 0); 221 227 bio_unmap_user(bio); 222 228 return -EINVAL;
+2
block/genhd.c
··· 768 768 bdev_map = kobj_map_init(base_probe, &block_class_lock); 769 769 blk_dev_init(); 770 770 771 + register_blkdev(BLOCK_EXT_MAJOR, "blkext"); 772 + 771 773 #ifndef CONFIG_SYSFS_DEPRECATED 772 774 /* create top-level block dir */ 773 775 block_depr = kobject_create_and_add("block", NULL);
+3 -4
block/ioctl.c
··· 18 18 struct disk_part_iter piter; 19 19 long long start, length; 20 20 int partno; 21 - int err; 22 21 23 22 if (!capable(CAP_SYS_ADMIN)) 24 23 return -EACCES; ··· 60 61 disk_part_iter_exit(&piter); 61 62 62 63 /* all seems OK */ 63 - err = add_partition(disk, partno, start, length, 64 - ADDPART_FLAG_NONE); 64 + part = add_partition(disk, partno, start, length, 65 + ADDPART_FLAG_NONE); 65 66 mutex_unlock(&bdev->bd_mutex); 66 - return err; 67 + return IS_ERR(part) ? PTR_ERR(part) : 0; 67 68 case BLKPG_DEL_PARTITION: 68 69 part = disk_get_part(disk, partno); 69 70 if (!part)
+1 -1
drivers/acpi/sleep/proc.c
··· 366 366 dev->wakeup.state.enabled ? "enabled" : "disabled"); 367 367 if (ldev) 368 368 seq_printf(seq, "%s:%s", 369 - dev_name(ldev) ? ldev->bus->name : "no-bus", 369 + ldev->bus ? ldev->bus->name : "no-bus", 370 370 dev_name(ldev)); 371 371 seq_printf(seq, "\n"); 372 372 put_device(ldev);
+5 -4
drivers/block/cciss.c
··· 2847 2847 h->maxSG = seg; 2848 2848 2849 2849 #ifdef CCISS_DEBUG 2850 - printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", 2850 + printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n", 2851 2851 creq->nr_sectors, seg); 2852 2852 #endif /* CCISS_DEBUG */ 2853 2853 ··· 3197 3197 3198 3198 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */ 3199 3199 #ifdef CCISS_DEBUG 3200 - printk("address 0 = %x\n", c->paddr); 3200 + printk("address 0 = %lx\n", c->paddr); 3201 3201 #endif /* CCISS_DEBUG */ 3202 3202 c->vaddr = remap_pci_mem(c->paddr, 0x250); 3203 3203 ··· 3224 3224 #endif /* CCISS_DEBUG */ 3225 3225 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr); 3226 3226 #ifdef CCISS_DEBUG 3227 - printk("cfg base address index = %x\n", cfg_base_addr_index); 3227 + printk("cfg base address index = %llx\n", 3228 + (unsigned long long)cfg_base_addr_index); 3228 3229 #endif /* CCISS_DEBUG */ 3229 3230 if (cfg_base_addr_index == -1) { 3230 3231 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n"); ··· 3235 3234 3236 3235 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET); 3237 3236 #ifdef CCISS_DEBUG 3238 - printk("cfg offset = %x\n", cfg_offset); 3237 + printk("cfg offset = %llx\n", (unsigned long long)cfg_offset); 3239 3238 #endif /* CCISS_DEBUG */ 3240 3239 c->cfgtable = remap_pci_mem(pci_resource_start(pdev, 3241 3240 cfg_base_addr_index) +
+7 -1
drivers/block/xen-blkfront.c
··· 338 338 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) 339 339 { 340 340 struct request_queue *rq; 341 + elevator_t *old_e; 341 342 342 343 rq = blk_init_queue(do_blkif_request, &blkif_io_lock); 343 344 if (rq == NULL) 344 345 return -1; 345 346 346 - elevator_init(rq, "noop"); 347 + old_e = rq->elevator; 348 + if (IS_ERR_VALUE(elevator_init(rq, "noop"))) 349 + printk(KERN_WARNING 350 + "blkfront: Switch elevator failed, use default\n"); 351 + else 352 + elevator_exit(old_e); 347 353 348 354 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 349 355 blk_queue_hardsect_size(rq, sector_size);
+1 -1
drivers/gpio/gpiolib.c
··· 1134 1134 continue; 1135 1135 1136 1136 is_out = test_bit(FLAG_IS_OUT, &gdesc->flags); 1137 - seq_printf(s, " gpio-%-3d (%-12s) %s %s", 1137 + seq_printf(s, " gpio-%-3d (%-20.20s) %s %s", 1138 1138 gpio, gdesc->label, 1139 1139 is_out ? "out" : "in ", 1140 1140 chip->get
+9 -2
drivers/hid/hid-apple.c
··· 55 55 56 56 static struct apple_key_translation apple_fn_keys[] = { 57 57 { KEY_BACKSPACE, KEY_DELETE }, 58 + { KEY_ENTER, KEY_INSERT }, 58 59 { KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY }, 59 60 { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY }, 60 - { KEY_F3, KEY_FN_F5, APPLE_FLAG_FKEY }, /* Exposé */ 61 - { KEY_F4, KEY_FN_F4, APPLE_FLAG_FKEY }, /* Dashboard */ 61 + { KEY_F3, KEY_SCALE, APPLE_FLAG_FKEY }, 62 + { KEY_F4, KEY_DASHBOARD, APPLE_FLAG_FKEY }, 62 63 { KEY_F5, KEY_KBDILLUMDOWN, APPLE_FLAG_FKEY }, 63 64 { KEY_F6, KEY_KBDILLUMUP, APPLE_FLAG_FKEY }, 64 65 { KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY }, ··· 418 417 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO), 419 418 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, 420 419 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS), 420 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, 421 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI), 422 + .driver_data = APPLE_HAS_FN }, 423 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO), 424 + .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, 425 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS), 421 426 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, 422 427 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY), 423 428 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+9 -3
drivers/hid/hid-core.c
··· 1250 1250 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) }, 1251 1251 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) }, 1252 1252 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) }, 1253 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) }, 1254 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) }, 1255 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) }, 1253 1256 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 1254 1257 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1255 - { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, 1256 1258 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, 1257 1259 { HID_USB_DEVICE(USB_VENDOR_ID_BRIGHT, USB_DEVICE_ID_BRIGHT_ABNT2) }, 1258 1260 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, ··· 1267 1265 { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, 1268 1266 { HID_USB_DEVICE(USB_VENDOR_ID_GENERIC_13BA, USB_DEVICE_ID_GENERIC_13BA_KBD_MOUSE) }, 1269 1267 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, 1270 - { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) }, 1271 1268 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, 1272 1269 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, 1273 1270 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, ··· 1410 1409 { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, 1411 1410 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM)}, 1412 1411 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM2)}, 1412 + { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, 1413 1413 { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) }, 1414 1414 { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) }, 1415 1415 { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) }, ··· 1488 1486 { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) }, 1489 1487 { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) }, 1490 1488 { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) }, 1489 + { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) }, 1491 1490 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) }, 1492 1491 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) }, 1493 1492 { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) }, ··· 1576 1573 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) }, 1577 1574 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) }, 1578 1575 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) }, 1576 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) }, 1577 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) }, 1578 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) }, 1579 1579 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 1580 1580 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1581 1581 { } ··· 1736 1730 goto err_bus; 1737 1731 1738 1732 #ifdef CONFIG_HID_COMPAT 1739 - hid_compat_wq = create_workqueue("hid_compat"); 1733 + hid_compat_wq = create_singlethread_workqueue("hid_compat"); 1740 1734 if (!hid_compat_wq) { 1741 1735 hidraw_exit(); 1742 1736 goto err;
+3
drivers/hid/hid-ids.h
··· 82 82 #define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230 83 83 #define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231 84 84 #define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232 85 + #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236 86 + #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237 87 + #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238 85 88 #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 86 89 #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 87 90 #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241
+16 -14
drivers/hid/hidraw.c
··· 38 38 static struct cdev hidraw_cdev; 39 39 static struct class *hidraw_class; 40 40 static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES]; 41 - static DEFINE_SPINLOCK(minors_lock); 41 + static DEFINE_MUTEX(minors_lock); 42 42 43 43 static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) 44 44 { ··· 159 159 struct hidraw_list *list; 160 160 int err = 0; 161 161 162 - lock_kernel(); 163 162 if (!(list = kzalloc(sizeof(struct hidraw_list), GFP_KERNEL))) { 164 163 err = -ENOMEM; 165 164 goto out; 166 165 } 167 166 168 - spin_lock(&minors_lock); 167 + lock_kernel(); 168 + mutex_lock(&minors_lock); 169 169 if (!hidraw_table[minor]) { 170 170 printk(KERN_EMERG "hidraw device with minor %d doesn't exist\n", 171 171 minor); ··· 180 180 file->private_data = list; 181 181 182 182 dev = hidraw_table[minor]; 183 - if (!dev->open++) 184 - dev->hid->ll_driver->open(dev->hid); 183 + if (!dev->open++) { 184 + err = dev->hid->ll_driver->open(dev->hid); 185 + if (err < 0) 186 + dev->open--; 187 + } 185 188 186 189 out_unlock: 187 - spin_unlock(&minors_lock); 188 - out: 190 + mutex_unlock(&minors_lock); 189 191 unlock_kernel(); 192 + out: 190 193 return err; 191 194 192 195 } ··· 313 310 314 311 result = -EINVAL; 315 312 316 - spin_lock(&minors_lock); 313 + mutex_lock(&minors_lock); 317 314 318 315 for (minor = 0; minor < HIDRAW_MAX_DEVICES; minor++) { 319 316 if (hidraw_table[minor]) ··· 323 320 break; 324 321 } 325 322 326 - spin_unlock(&minors_lock); 327 - 328 323 if (result) { 324 + mutex_unlock(&minors_lock); 329 325 kfree(dev); 330 326 goto out; 331 327 } ··· 333 331 NULL, "%s%d", "hidraw", minor); 334 332 335 333 if (IS_ERR(dev->dev)) { 336 - spin_lock(&minors_lock); 337 334 hidraw_table[minor] = NULL; 338 - spin_unlock(&minors_lock); 335 + mutex_unlock(&minors_lock); 339 336 result = PTR_ERR(dev->dev); 340 337 kfree(dev); 341 338 goto out; 342 339 } 343 340 341 + mutex_unlock(&minors_lock); 344 342 init_waitqueue_head(&dev->wait); 345 343 INIT_LIST_HEAD(&dev->list); 346 344 ··· 362 360 363 361 hidraw->exist = 0; 364 362 365 - spin_lock(&minors_lock); 363 + mutex_lock(&minors_lock); 366 364 hidraw_table[hidraw->minor] = NULL; 367 - spin_unlock(&minors_lock); 365 + mutex_unlock(&minors_lock); 368 366 369 367 device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); 370 368
+19 -6
drivers/hid/usbhid/hid-core.c
··· 781 781 unsigned int n, insize = 0; 782 782 int ret; 783 783 784 + clear_bit(HID_DISCONNECTED, &usbhid->iofl); 785 + 784 786 usbhid->bufsize = HID_MIN_BUFFER_SIZE; 785 787 hid_find_max_report(hid, HID_INPUT_REPORT, &usbhid->bufsize); 786 788 hid_find_max_report(hid, HID_OUTPUT_REPORT, &usbhid->bufsize); ··· 849 847 } 850 848 } 851 849 852 - if (!usbhid->urbin) { 853 - err_hid("couldn't find an input interrupt endpoint"); 854 - ret = -ENODEV; 855 - goto fail; 856 - } 857 - 858 850 init_waitqueue_head(&usbhid->wait); 859 851 INIT_WORK(&usbhid->reset_work, hid_reset); 860 852 setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid); ··· 884 888 usb_free_urb(usbhid->urbin); 885 889 usb_free_urb(usbhid->urbout); 886 890 usb_free_urb(usbhid->urbctrl); 891 + usbhid->urbin = NULL; 892 + usbhid->urbout = NULL; 893 + usbhid->urbctrl = NULL; 887 894 hid_free_buffers(dev, hid); 888 895 mutex_unlock(&usbhid->setup); 889 896 return ret; ··· 923 924 usb_free_urb(usbhid->urbin); 924 925 usb_free_urb(usbhid->urbctrl); 925 926 usb_free_urb(usbhid->urbout); 927 + usbhid->urbin = NULL; /* don't mess up next start */ 928 + usbhid->urbctrl = NULL; 929 + usbhid->urbout = NULL; 926 930 927 931 hid_free_buffers(hid_to_usb_dev(hid), hid); 928 932 mutex_unlock(&usbhid->setup); ··· 942 940 943 941 static int hid_probe(struct usb_interface *intf, const struct usb_device_id *id) 944 942 { 943 + struct usb_host_interface *interface = intf->cur_altsetting; 945 944 struct usb_device *dev = interface_to_usbdev(intf); 946 945 struct usbhid_device *usbhid; 947 946 struct hid_device *hid; 947 + unsigned int n, has_in = 0; 948 948 size_t len; 949 949 int ret; 950 950 951 951 dbg_hid("HID probe called for ifnum %d\n", 952 952 intf->altsetting->desc.bInterfaceNumber); 953 + 954 + for (n = 0; n < interface->desc.bNumEndpoints; n++) 955 + if (usb_endpoint_is_int_in(&interface->endpoint[n].desc)) 956 + has_in++; 957 + if (!has_in) { 958 + dev_err(&intf->dev, "couldn't find an input interrupt " 959 + "endpoint\n"); 960 + return -ENODEV; 961 + } 953 962 954 963 hid = hid_allocate_device(); 955 964 if (IS_ERR(hid))
+13
drivers/hwmon/applesmc.c
··· 128 128 /* Set 13: iMac 8,1 */ 129 129 { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TG0P", "TH0P", 130 130 "TL0P", "TO0P", "TW0P", "Tm0P", "Tp0P", NULL }, 131 + /* Set 14: iMac 6,1 */ 132 + { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TG0P", "TH0P", 133 + "TO0P", "Tp0P", NULL }, 131 134 }; 132 135 133 136 /* List of keys used to read/write fan speeds */ ··· 1299 1296 { .accelerometer = 1, .light = 1, .temperature_set = 12 }, 1300 1297 /* iMac 8: light sensor only, temperature set 13 */ 1301 1298 { .accelerometer = 0, .light = 0, .temperature_set = 13 }, 1299 + /* iMac 6: light sensor only, temperature set 14 */ 1300 + { .accelerometer = 0, .light = 0, .temperature_set = 14 }, 1302 1301 }; 1303 1302 1304 1303 /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". ··· 1354 1349 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1355 1350 DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") }, 1356 1351 &applesmc_dmi_data[4]}, 1352 + { applesmc_dmi_match, "Apple MacPro", { 1353 + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1354 + DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, 1355 + &applesmc_dmi_data[4]}, 1357 1356 { applesmc_dmi_match, "Apple iMac 8", { 1358 1357 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1359 1358 DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") }, 1360 1359 &applesmc_dmi_data[13]}, 1360 + { applesmc_dmi_match, "Apple iMac 6", { 1361 + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1362 + DMI_MATCH(DMI_PRODUCT_NAME, "iMac6") }, 1363 + &applesmc_dmi_data[14]}, 1361 1364 { applesmc_dmi_match, "Apple iMac 5", { 1362 1365 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1363 1366 DMI_MATCH(DMI_PRODUCT_NAME, "iMac5") },
+1
drivers/ide/ide-cs.c
··· 444 444 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), 445 445 PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e), 446 446 PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6), 447 + PCMCIA_DEVICE_PROD_ID2("Flash Card", 0x5a362506), 447 448 PCMCIA_DEVICE_NULL, 448 449 }; 449 450 MODULE_DEVICE_TABLE(pcmcia, ide_ids);
+4 -2
drivers/isdn/i4l/isdn_net.c
··· 1641 1641 /* slarp reply, send own ip/netmask; if values are nonsense remote 1642 1642 * should think we are unable to provide it with an address via SLARP */ 1643 1643 p += put_u32(p, CISCO_SLARP_REPLY); 1644 - p += put_u32(p, addr); // address 1645 - p += put_u32(p, mask); // netmask 1644 + *(__be32 *)p = addr; // address 1645 + p += 4; 1646 + *(__be32 *)p = mask; // netmask 1647 + p += 4; 1646 1648 p += put_u16(p, 0); // unused 1647 1649 1648 1650 isdn_net_write_super(lp, skb);
+1 -1
drivers/mfd/da903x.c
··· 267 267 { 268 268 uint8_t v[3]; 269 269 270 - chip->events_mask &= ~events; 270 + chip->events_mask |= events; 271 271 272 272 v[0] = (chip->events_mask & 0xff); 273 273 v[1] = (chip->events_mask >> 8) & 0xff;
+13 -2
drivers/mfd/wm8350-i2c.c
··· 30 30 ret = i2c_master_send(wm8350->i2c_client, &reg, 1); 31 31 if (ret < 0) 32 32 return ret; 33 - return i2c_master_recv(wm8350->i2c_client, dest, bytes); 33 + ret = i2c_master_recv(wm8350->i2c_client, dest, bytes); 34 + if (ret < 0) 35 + return ret; 36 + if (ret != bytes) 37 + return -EIO; 38 + return 0; 34 39 } 35 40 36 41 static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg, ··· 43 38 { 44 39 /* we add 1 byte for device register */ 45 40 u8 msg[(WM8350_MAX_REGISTER << 1) + 1]; 41 + int ret; 46 42 47 43 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1)) 48 44 return -EINVAL; 49 45 50 46 msg[0] = reg; 51 47 memcpy(&msg[1], src, bytes); 52 - return i2c_master_send(wm8350->i2c_client, msg, bytes + 1); 48 + ret = i2c_master_send(wm8350->i2c_client, msg, bytes + 1); 49 + if (ret < 0) 50 + return ret; 51 + if (ret != bytes + 1) 52 + return -EIO; 53 + return 0; 53 54 } 54 55 55 56 static int wm8350_i2c_probe(struct i2c_client *i2c,
+4
drivers/misc/sgi-gru/Makefile
··· 1 + ifdef CONFIG_SGI_GRU_DEBUG 2 + EXTRA_CFLAGS += -DDEBUG 3 + endif 4 + 1 5 obj-$(CONFIG_SGI_GRU) := gru.o 2 6 gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o 3 7
-4
drivers/net/atl1e/atl1e_hw.c
··· 163 163 * atl1e_hash_mc_addr 164 164 * purpose 165 165 * set hash value for a multicast address 166 - * hash calcu processing : 167 - * 1. calcu 32bit CRC for multicast address 168 - * 2. reverse crc with MSB to LSB 169 166 */ 170 167 u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr) 171 168 { ··· 171 174 int i; 172 175 173 176 crc32 = ether_crc_le(6, mc_addr); 174 - crc32 = ~crc32; 175 177 for (i = 0; i < 32; i++) 176 178 value |= (((crc32 >> i) & 1) << (31 - i)); 177 179
+3 -14
drivers/net/atlx/atl1.c
··· 3404 3404 { 3405 3405 struct atl1_adapter *adapter = netdev_priv(netdev); 3406 3406 3407 - wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; 3407 + wol->supported = WAKE_MAGIC; 3408 3408 wol->wolopts = 0; 3409 - if (adapter->wol & ATLX_WUFC_EX) 3410 - wol->wolopts |= WAKE_UCAST; 3411 - if (adapter->wol & ATLX_WUFC_MC) 3412 - wol->wolopts |= WAKE_MCAST; 3413 - if (adapter->wol & ATLX_WUFC_BC) 3414 - wol->wolopts |= WAKE_BCAST; 3415 3409 if (adapter->wol & ATLX_WUFC_MAG) 3416 3410 wol->wolopts |= WAKE_MAGIC; 3417 3411 return; ··· 3416 3422 { 3417 3423 struct atl1_adapter *adapter = netdev_priv(netdev); 3418 3424 3419 - if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 3425 + if (wol->wolopts & (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | 3426 + WAKE_ARP | WAKE_MAGICSECURE)) 3420 3427 return -EOPNOTSUPP; 3421 3428 adapter->wol = 0; 3422 - if (wol->wolopts & WAKE_UCAST) 3423 - adapter->wol |= ATLX_WUFC_EX; 3424 - if (wol->wolopts & WAKE_MCAST) 3425 - adapter->wol |= ATLX_WUFC_MC; 3426 - if (wol->wolopts & WAKE_BCAST) 3427 - adapter->wol |= ATLX_WUFC_BC; 3428 3429 if (wol->wolopts & WAKE_MAGIC) 3429 3430 adapter->wol |= ATLX_WUFC_MAG; 3430 3431 return 0;
+5 -3
drivers/net/atlx/atl2.c
··· 1690 1690 1691 1691 ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); 1692 1692 1693 - err = atl2_request_irq(adapter); 1694 - if (netif_running(netdev) && err) 1695 - return err; 1693 + if (netif_running(netdev)) { 1694 + err = atl2_request_irq(adapter); 1695 + if (err) 1696 + return err; 1697 + } 1696 1698 1697 1699 atl2_reset_hw(&adapter->hw); 1698 1700
+10 -10
drivers/net/e100.c
··· 166 166 167 167 #define DRV_NAME "e100" 168 168 #define DRV_EXT "-NAPI" 169 - #define DRV_VERSION "3.5.23-k4"DRV_EXT 169 + #define DRV_VERSION "3.5.23-k6"DRV_EXT 170 170 #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 171 171 #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" 172 172 #define PFX DRV_NAME ": " ··· 1804 1804 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; 1805 1805 put_unaligned_le32(rx->dma_addr, &prev_rfd->link); 1806 1806 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, 1807 - sizeof(struct rfd), PCI_DMA_TODEVICE); 1807 + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); 1808 1808 } 1809 1809 1810 1810 return 0; ··· 1823 1823 1824 1824 /* Need to sync before taking a peek at cb_complete bit */ 1825 1825 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr, 1826 - sizeof(struct rfd), PCI_DMA_FROMDEVICE); 1826 + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); 1827 1827 rfd_status = le16_to_cpu(rfd->status); 1828 1828 1829 1829 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status); ··· 1850 1850 1851 1851 /* Get data */ 1852 1852 pci_unmap_single(nic->pdev, rx->dma_addr, 1853 - RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 1853 + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); 1854 1854 1855 1855 /* If this buffer has the el bit, but we think the receiver 1856 1856 * is still running, check to see if it really stopped while ··· 1943 1943 new_before_last_rfd->command |= cpu_to_le16(cb_el); 1944 1944 pci_dma_sync_single_for_device(nic->pdev, 1945 1945 new_before_last_rx->dma_addr, sizeof(struct rfd), 1946 - PCI_DMA_TODEVICE); 1946 + PCI_DMA_BIDIRECTIONAL); 1947 1947 1948 1948 /* Now that we have a new stopping point, we can clear the old 1949 1949 * stopping point. We must sync twice to get the proper ··· 1951 1951 old_before_last_rfd->command &= ~cpu_to_le16(cb_el); 1952 1952 pci_dma_sync_single_for_device(nic->pdev, 1953 1953 old_before_last_rx->dma_addr, sizeof(struct rfd), 1954 - PCI_DMA_TODEVICE); 1954 + PCI_DMA_BIDIRECTIONAL); 1955 1955 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN); 1956 1956 pci_dma_sync_single_for_device(nic->pdev, 1957 1957 old_before_last_rx->dma_addr, sizeof(struct rfd), 1958 - PCI_DMA_TODEVICE); 1958 + PCI_DMA_BIDIRECTIONAL); 1959 1959 } 1960 1960 1961 1961 if(restart_required) { ··· 1978 1978 for(rx = nic->rxs, i = 0; i < count; rx++, i++) { 1979 1979 if(rx->skb) { 1980 1980 pci_unmap_single(nic->pdev, rx->dma_addr, 1981 - RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 1981 + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); 1982 1982 dev_kfree_skb(rx->skb); 1983 1983 } 1984 1984 } ··· 2021 2021 before_last->command |= cpu_to_le16(cb_el); 2022 2022 before_last->size = 0; 2023 2023 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, 2024 - sizeof(struct rfd), PCI_DMA_TODEVICE); 2024 + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); 2025 2025 2026 2026 nic->rx_to_use = nic->rx_to_clean = nic->rxs; 2027 2027 nic->ru_running = RU_SUSPENDED; ··· 2222 2222 msleep(10); 2223 2223 2224 2224 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr, 2225 - RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 2225 + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); 2226 2226 2227 2227 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), 2228 2228 skb->data, ETH_DATA_LEN))
+6 -2
drivers/net/e1000/e1000_ethtool.c
··· 1774 1774 1775 1775 /* this function will set ->supported = 0 and return 1 if wol is not 1776 1776 * supported by this hardware */ 1777 - if (e1000_wol_exclusion(adapter, wol)) 1777 + if (e1000_wol_exclusion(adapter, wol) || 1778 + !device_can_wakeup(&adapter->pdev->dev)) 1778 1779 return; 1779 1780 1780 1781 /* apply any specific unsupported masks here */ ··· 1812 1811 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1813 1812 return -EOPNOTSUPP; 1814 1813 1815 - if (e1000_wol_exclusion(adapter, wol)) 1814 + if (e1000_wol_exclusion(adapter, wol) || 1815 + !device_can_wakeup(&adapter->pdev->dev)) 1816 1816 return wol->wolopts ? -EOPNOTSUPP : 0; 1817 1817 1818 1818 switch (hw->device_id) { ··· 1839 1837 adapter->wol |= E1000_WUFC_BC; 1840 1838 if (wol->wolopts & WAKE_MAGIC) 1841 1839 adapter->wol |= E1000_WUFC_MAG; 1840 + 1841 + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1842 1842 1843 1843 return 0; 1844 1844 }
+1
drivers/net/e1000/e1000_main.c
··· 1179 1179 1180 1180 /* initialize the wol settings based on the eeprom settings */ 1181 1181 adapter->wol = adapter->eeprom_wol; 1182 + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1182 1183 1183 1184 /* print bus type/speed/width info */ 1184 1185 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+5
drivers/net/e1000e/e1000.h
··· 299 299 unsigned long led_status; 300 300 301 301 unsigned int flags; 302 + unsigned int flags2; 302 303 struct work_struct downshift_task; 303 304 struct work_struct update_phy_task; 304 305 }; ··· 307 306 struct e1000_info { 308 307 enum e1000_mac_type mac; 309 308 unsigned int flags; 309 + unsigned int flags2; 310 310 u32 pba; 311 311 s32 (*get_variants)(struct e1000_adapter *); 312 312 struct e1000_mac_operations *mac_ops; ··· 348 346 #define FLAG_TSO_FORCE (1 << 29) 349 347 #define FLAG_RX_RESTART_NOW (1 << 30) 350 348 #define FLAG_MSI_TEST_FAILED (1 << 31) 349 + 350 + /* CRC Stripping defines */ 351 + #define FLAG2_CRC_STRIPPING (1 << 0) 351 352 352 353 #define E1000_RX_DESC_PS(R, i) \ 353 354 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
+6 -2
drivers/net/e1000e/ethtool.c
··· 1713 1713 wol->supported = 0; 1714 1714 wol->wolopts = 0; 1715 1715 1716 - if (!(adapter->flags & FLAG_HAS_WOL)) 1716 + if (!(adapter->flags & FLAG_HAS_WOL) || 1717 + !device_can_wakeup(&adapter->pdev->dev)) 1717 1718 return; 1718 1719 1719 1720 wol->supported = WAKE_UCAST | WAKE_MCAST | ··· 1752 1751 if (wol->wolopts & WAKE_MAGICSECURE) 1753 1752 return -EOPNOTSUPP; 1754 1753 1755 - if (!(adapter->flags & FLAG_HAS_WOL)) 1754 + if (!(adapter->flags & FLAG_HAS_WOL) || 1755 + !device_can_wakeup(&adapter->pdev->dev)) 1756 1756 return wol->wolopts ? -EOPNOTSUPP : 0; 1757 1757 1758 1758 /* these settings will always override what we currently have */ ··· 1771 1769 adapter->wol |= E1000_WUFC_LNKC; 1772 1770 if (wol->wolopts & WAKE_ARP) 1773 1771 adapter->wol |= E1000_WUFC_ARP; 1772 + 1773 + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1774 1774 1775 1775 return 0; 1776 1776 }
+23 -2
drivers/net/e1000e/netdev.c
··· 499 499 goto next_desc; 500 500 } 501 501 502 + /* adjust length to remove Ethernet CRC */ 503 + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) 504 + length -= 4; 505 + 502 506 total_rx_bytes += length; 503 507 total_rx_packets++; 504 508 ··· 808 804 pci_dma_sync_single_for_device(pdev, ps_page->dma, 809 805 PAGE_SIZE, PCI_DMA_FROMDEVICE); 810 806 807 + /* remove the CRC */ 808 + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) 809 + l1 -= 4; 810 + 811 811 skb_put(skb, l1); 812 812 goto copydone; 813 813 } /* if */ ··· 832 824 skb->data_len += length; 833 825 skb->truesize += length; 834 826 } 827 + 828 + /* strip the ethernet crc, problem is we're using pages now so 829 + * this whole operation can get a little cpu intensive 830 + */ 831 + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) 832 + pskb_trim(skb, skb->len - 4); 835 833 836 834 copydone: 837 835 total_rx_bytes += skb->len; ··· 2315 2301 else 2316 2302 rctl |= E1000_RCTL_LPE; 2317 2303 2318 - /* Enable hardware CRC frame stripping */ 2319 - rctl |= E1000_RCTL_SECRC; 2304 + /* Some systems expect that the CRC is included in SMBUS traffic. The 2305 + * hardware strips the CRC before sending to both SMBUS (BMC) and to 2306 + * host memory when this is enabled 2307 + */ 2308 + if (adapter->flags2 & FLAG2_CRC_STRIPPING) 2309 + rctl |= E1000_RCTL_SECRC; 2320 2310 2321 2311 /* Setup buffer sizes */ 2322 2312 rctl &= ~E1000_RCTL_SZ_4096; ··· 4784 4766 adapter->ei = ei; 4785 4767 adapter->pba = ei->pba; 4786 4768 adapter->flags = ei->flags; 4769 + adapter->flags2 = ei->flags2; 4787 4770 adapter->hw.adapter = adapter; 4788 4771 adapter->hw.mac.type = ei->mac; 4789 4772 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; ··· 4989 4970 4990 4971 /* initialize the wol settings based on the eeprom settings */ 4991 4972 adapter->wol = adapter->eeprom_wol; 4973 + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 4992 4974 4993 4975 /* reset the hardware with the new settings */ 4994 4976 e1000e_reset(adapter); ··· 5028 5008 err_sw_init: 5029 5009 if (adapter->hw.flash_address) 5030 5010 iounmap(adapter->hw.flash_address); 5011 + e1000e_reset_interrupt_capability(adapter); 5031 5012 err_flashmap: 5032 5013 iounmap(adapter->hw.hw_addr); 5033 5014 err_ioremap:
+25
drivers/net/e1000e/param.c
··· 151 151 */ 152 152 E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); 153 153 154 + /* 155 + * Enable CRC Stripping 156 + * 157 + * Valid Range: 0, 1 158 + * 159 + * Default Value: 1 (enabled) 160 + */ 161 + E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \ 162 + "the CRC"); 163 + 154 164 struct e1000_option { 155 165 enum { enable_option, range_option, list_option } type; 156 166 const char *name; ··· 412 402 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) 413 403 && spd) 414 404 adapter->flags |= FLAG_SMART_POWER_DOWN; 405 + } 406 + } 407 + { /* CRC Stripping */ 408 + const struct e1000_option opt = { 409 + .type = enable_option, 410 + .name = "CRC Stripping", 411 + .err = "defaulting to enabled", 412 + .def = OPTION_ENABLED 413 + }; 414 + 415 + if (num_CrcStripping > bd) { 416 + unsigned int crc_stripping = CrcStripping[bd]; 417 + e1000_validate_option(&crc_stripping, &opt, adapter); 418 + if (crc_stripping == OPTION_ENABLED) 419 + adapter->flags2 |= FLAG2_CRC_STRIPPING; 415 420 } 416 421 } 417 422 { /* Kumeran Lock Loss Workaround */
+8 -7
drivers/net/gianfar.c
··· 1407 1407 if (bdp->status & TXBD_DEF) 1408 1408 dev->stats.collisions++; 1409 1409 1410 + /* Unmap the DMA memory */ 1411 + dma_unmap_single(&priv->dev->dev, bdp->bufPtr, 1412 + bdp->length, DMA_TO_DEVICE); 1413 + 1410 1414 /* Free the sk buffer associated with this TxBD */ 1411 1415 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); 1412 1416 ··· 1670 1666 1671 1667 skb = priv->rx_skbuff[priv->skb_currx]; 1672 1668 1669 + dma_unmap_single(&priv->dev->dev, bdp->bufPtr, 1670 + priv->rx_buffer_size, DMA_FROM_DEVICE); 1671 + 1673 1672 /* We drop the frame if we failed to allocate a new buffer */ 1674 1673 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || 1675 1674 bdp->status & RXBD_ERR)) { ··· 1681 1674 if (unlikely(!newskb)) 1682 1675 newskb = skb; 1683 1676 1684 - if (skb) { 1685 - dma_unmap_single(&priv->dev->dev, 1686 - bdp->bufPtr, 1687 - priv->rx_buffer_size, 1688 - DMA_FROM_DEVICE); 1689 - 1677 + if (skb) 1690 1678 dev_kfree_skb_any(skb); 1691 - } 1692 1679 } else { 1693 1680 /* Increment the number of packets */ 1694 1681 dev->stats.rx_packets++;
+6 -2
drivers/net/igb/igb_ethtool.c
··· 1776 1776 1777 1777 /* this function will set ->supported = 0 and return 1 if wol is not 1778 1778 * supported by this hardware */ 1779 - if (igb_wol_exclusion(adapter, wol)) 1779 + if (igb_wol_exclusion(adapter, wol) || 1780 + !device_can_wakeup(&adapter->pdev->dev)) 1780 1781 return; 1781 1782 1782 1783 /* apply any specific unsupported masks here */ ··· 1806 1805 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1807 1806 return -EOPNOTSUPP; 1808 1807 1809 - if (igb_wol_exclusion(adapter, wol)) 1808 + if (igb_wol_exclusion(adapter, wol) || 1809 + !device_can_wakeup(&adapter->pdev->dev)) 1810 1810 return wol->wolopts ? -EOPNOTSUPP : 0; 1811 1811 1812 1812 switch (hw->device_id) { ··· 1826 1824 adapter->wol |= E1000_WUFC_BC; 1827 1825 if (wol->wolopts & WAKE_MAGIC) 1828 1826 adapter->wol |= E1000_WUFC_MAG; 1827 + 1828 + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1829 1829 1830 1830 return 0; 1831 1831 }
+4 -4
drivers/net/igb/igb_main.c
··· 1019 1019 state &= ~PCIE_LINK_STATE_L0S; 1020 1020 pci_write_config_word(us_dev, pos + PCI_EXP_LNKCTL, 1021 1021 state); 1022 - printk(KERN_INFO "Disabling ASPM L0s upstream switch " 1023 - "port %x:%x.%x\n", us_dev->bus->number, 1024 - PCI_SLOT(us_dev->devfn), 1025 - PCI_FUNC(us_dev->devfn)); 1022 + dev_info(&pdev->dev, 1023 + "Disabling ASPM L0s upstream switch port %s\n", 1024 + pci_name(us_dev)); 1026 1025 } 1027 1026 default: 1028 1027 break; ··· 1243 1244 1244 1245 /* initialize the wol settings based on the eeprom settings */ 1245 1246 adapter->wol = adapter->eeprom_wol; 1247 + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1246 1248 1247 1249 /* reset the hardware with the new settings */ 1248 1250 igb_reset(adapter);
+4 -4
drivers/net/ipg.c
··· 1112 1112 struct ipg_rx *rxfd = sp->rxd + entry; 1113 1113 1114 1114 pci_unmap_single(sp->pdev, 1115 - le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), 1115 + le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 1116 1116 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1117 1117 dev_kfree_skb_irq(sp->rx_buff[entry]); 1118 1118 sp->rx_buff[entry] = NULL; ··· 1179 1179 */ 1180 1180 if (sp->rx_buff[entry]) { 1181 1181 pci_unmap_single(sp->pdev, 1182 - le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), 1182 + le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 1183 1183 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1184 1184 1185 1185 dev_kfree_skb_irq(sp->rx_buff[entry]); ··· 1246 1246 if (jumbo->found_start) 1247 1247 dev_kfree_skb_irq(jumbo->skb); 1248 1248 1249 - pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), 1249 + pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 1250 1250 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1251 1251 1252 1252 skb_put(skb, sp->rxfrag_size); ··· 1349 1349 unsigned int entry = curr % IPG_RFDLIST_LENGTH; 1350 1350 struct ipg_rx *rxfd = sp->rxd + entry; 1351 1351 1352 - if (!(rxfd->rfs & le64_to_cpu(IPG_RFS_RFDDONE))) 1352 + if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE))) 1353 1353 break; 1354 1354 1355 1355 switch (ipg_nic_rx_check_frame_type(dev)) {
+28 -30
drivers/net/ixgbe/ixgbe_main.c
··· 1287 1287 return; 1288 1288 } 1289 1289 1290 - static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter); 1290 + /** 1291 + * ixgbe_irq_disable - Mask off interrupt generation on the NIC 1292 + * @adapter: board private structure 1293 + **/ 1294 + static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 1295 + { 1296 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 1297 + IXGBE_WRITE_FLUSH(&adapter->hw); 1298 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1299 + int i; 1300 + for (i = 0; i < adapter->num_msix_vectors; i++) 1301 + synchronize_irq(adapter->msix_entries[i].vector); 1302 + } else { 1303 + synchronize_irq(adapter->pdev->irq); 1304 + } 1305 + } 1306 + 1307 + /** 1308 + * ixgbe_irq_enable - Enable default interrupt generation settings 1309 + * @adapter: board private structure 1310 + **/ 1311 + static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) 1312 + { 1313 + u32 mask; 1314 + mask = IXGBE_EIMS_ENABLE_MASK; 1315 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1316 + IXGBE_WRITE_FLUSH(&adapter->hw); 1317 + } 1291 1318 1292 1319 /** 1293 1320 * ixgbe_intr - legacy mode Interrupt Handler ··· 1418 1391 } else { 1419 1392 free_irq(adapter->pdev->irq, netdev); 1420 1393 } 1421 - } 1422 - 1423 - /** 1424 - * ixgbe_irq_disable - Mask off interrupt generation on the NIC 1425 - * @adapter: board private structure 1426 - **/ 1427 - static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 1428 - { 1429 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 1430 - IXGBE_WRITE_FLUSH(&adapter->hw); 1431 - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1432 - int i; 1433 - for (i = 0; i < adapter->num_msix_vectors; i++) 1434 - synchronize_irq(adapter->msix_entries[i].vector); 1435 - } else { 1436 - synchronize_irq(adapter->pdev->irq); 1437 - } 1438 - } 1439 - 1440 - /** 1441 - * ixgbe_irq_enable - Enable default interrupt generation settings 1442 - * @adapter: board private structure 1443 - **/ 1444 - static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) 1445 - { 1446 - u32 mask; 1447 - mask = IXGBE_EIMS_ENABLE_MASK; 1448 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1449 - IXGBE_WRITE_FLUSH(&adapter->hw); 1450 1394 } 1451 1395 1452 1396 /**
+10 -11
drivers/net/jme.c
··· 912 912 skb_put(skb, framesize); 913 913 skb->protocol = eth_type_trans(skb, jme->dev); 914 914 915 - if (jme_rxsum_ok(jme, rxdesc->descwb.flags)) 915 + if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags))) 916 916 skb->ip_summed = CHECKSUM_UNNECESSARY; 917 917 else 918 918 skb->ip_summed = CHECKSUM_NONE; 919 919 920 - if (rxdesc->descwb.flags & RXWBFLAG_TAGON) { 920 + if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { 921 921 if (jme->vlgrp) { 922 922 jme->jme_vlan_rx(skb, jme->vlgrp, 923 - le32_to_cpu(rxdesc->descwb.vlan)); 923 + le16_to_cpu(rxdesc->descwb.vlan)); 924 924 NET_STAT(jme).rx_bytes += 4; 925 925 } 926 926 } else { 927 927 jme->jme_rx(skb); 928 928 } 929 929 930 - if ((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) == 931 - RXWBFLAG_DEST_MUL) 930 + if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) == 931 + cpu_to_le16(RXWBFLAG_DEST_MUL)) 932 932 ++(NET_STAT(jme).multicast); 933 933 934 934 jme->dev->last_rx = jiffies; ··· 961 961 rxdesc = rxring->desc; 962 962 rxdesc += i; 963 963 964 - if ((rxdesc->descwb.flags & RXWBFLAG_OWN) || 964 + if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) || 965 965 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) 966 966 goto out; 967 967 ··· 1763 1763 } 1764 1764 1765 1765 static int 1766 - jme_tx_tso(struct sk_buff *skb, 1767 - u16 *mss, u8 *flags) 1766 + jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) 1768 1767 { 1769 - *mss = skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT; 1768 + *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT); 1770 1769 if (*mss) { 1771 1770 *flags |= TXFLAG_LSEN; 1772 1771 ··· 1825 1826 } 1826 1827 1827 1828 static inline void 1828 - jme_tx_vlan(struct sk_buff *skb, u16 *vlan, u8 *flags) 1829 + jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags) 1829 1830 { 1830 1831 if (vlan_tx_tag_present(skb)) { 1831 1832 *flags |= TXFLAG_TAGON; 1832 - *vlan = vlan_tx_tag_get(skb); 1833 + *vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 1833 1834 } 1834 1835 } 1835 1836
+4 -4
drivers/net/mlx4/en_netdev.c
··· 656 656 /* Configure port */ 657 657 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 658 658 priv->rx_skb_size + ETH_FCS_LEN, 659 - mdev->profile.tx_pause, 660 - mdev->profile.tx_ppp, 661 - mdev->profile.rx_pause, 662 - mdev->profile.rx_ppp); 659 + priv->prof->tx_pause, 660 + priv->prof->tx_ppp, 661 + priv->prof->rx_pause, 662 + priv->prof->rx_ppp); 663 663 if (err) { 664 664 mlx4_err(mdev, "Failed setting port general configurations" 665 665 " for port %d, with error %d\n", priv->port, err);
+16 -14
drivers/net/mlx4/en_params.c
··· 90 90 int mlx4_en_get_profile(struct mlx4_en_dev *mdev) 91 91 { 92 92 struct mlx4_en_profile *params = &mdev->profile; 93 + int i; 93 94 94 95 params->rx_moder_cnt = min_t(int, rx_moder_cnt, MLX4_EN_AUTO_CONF); 95 96 params->rx_moder_time = min_t(int, rx_moder_time, MLX4_EN_AUTO_CONF); ··· 98 97 params->rss_xor = (rss_xor != 0); 99 98 params->rss_mask = rss_mask & 0x1f; 100 99 params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS); 101 - params->rx_pause = pprx; 102 - params->rx_ppp = pfcrx; 103 - params->tx_pause = pptx; 104 - params->tx_ppp = pfctx; 105 - if (params->rx_ppp || params->tx_ppp) { 100 + for (i = 1; i <= MLX4_MAX_PORTS; i++) { 101 + params->prof[i].rx_pause = pprx; 102 + params->prof[i].rx_ppp = pfcrx; 103 + params->prof[i].tx_pause = pptx; 104 + params->prof[i].tx_ppp = pfctx; 105 + } 106 + if (pfcrx || pfctx) { 106 107 params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM; 107 108 params->prof[2].tx_ring_num = MLX4_EN_TX_RING_NUM; 108 109 } else { ··· 410 407 struct mlx4_en_dev *mdev = priv->mdev; 411 408 int err; 412 409 413 - mdev->profile.tx_pause = pause->tx_pause != 0; 414 - mdev->profile.rx_pause = pause->rx_pause != 0; 410 + priv->prof->tx_pause = pause->tx_pause != 0; 411 + priv->prof->rx_pause = pause->rx_pause != 0; 415 412 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 416 413 priv->rx_skb_size + ETH_FCS_LEN, 417 - mdev->profile.tx_pause, 418 - mdev->profile.tx_ppp, 419 - mdev->profile.rx_pause, 420 - mdev->profile.rx_ppp); 414 + priv->prof->tx_pause, 415 + priv->prof->tx_ppp, 416 + priv->prof->rx_pause, 417 + priv->prof->rx_ppp); 421 418 if (err) 422 419 mlx4_err(mdev, "Failed setting pause params to\n"); 423 420 ··· 428 425 struct ethtool_pauseparam *pause) 429 426 { 430 427 struct mlx4_en_priv *priv = netdev_priv(dev); 431 - struct mlx4_en_dev *mdev = priv->mdev; 432 428 433 - pause->tx_pause = mdev->profile.tx_pause; 434 - pause->rx_pause = mdev->profile.rx_pause; 429 + pause->tx_pause = priv->prof->tx_pause; 430 + pause->rx_pause = priv->prof->rx_pause; 435 431 } 436 432 437 433 static void mlx4_en_get_ringparam(struct net_device *dev,
+4 -4
drivers/net/mlx4/mlx4_en.h
··· 322 322 u32 rx_ring_num; 323 323 u32 tx_ring_size; 324 324 u32 rx_ring_size; 325 + u8 rx_pause; 326 + u8 rx_ppp; 327 + u8 tx_pause; 328 + u8 tx_ppp; 325 329 }; 326 330 327 331 struct mlx4_en_profile { ··· 337 333 int rx_moder_cnt; 338 334 int rx_moder_time; 339 335 int auto_moder; 340 - u8 rx_pause; 341 - u8 rx_ppp; 342 - u8 tx_pause; 343 - u8 tx_ppp; 344 336 u8 no_reset; 345 337 struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1]; 346 338 };
+3 -2
drivers/net/mv643xx_eth.c
··· 899 899 if (skb != NULL) { 900 900 if (skb_queue_len(&mp->rx_recycle) < 901 901 mp->default_rx_ring_size && 902 - skb_recycle_check(skb, mp->skb_size)) 902 + skb_recycle_check(skb, mp->skb_size + 903 + dma_get_cache_alignment() - 1)) 903 904 __skb_queue_head(&mp->rx_recycle, skb); 904 905 else 905 906 dev_kfree_skb(skb); ··· 2436 2435 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2437 2436 2438 2437 if (pd == NULL || pd->shared_smi == NULL) { 2439 - mdiobus_free(msp->smi_bus); 2440 2438 mdiobus_unregister(msp->smi_bus); 2439 + mdiobus_free(msp->smi_bus); 2441 2440 } 2442 2441 if (msp->err_interrupt != NO_IRQ) 2443 2442 free_irq(msp->err_interrupt, msp);
+282 -8
drivers/net/niu.c
··· 33 33 34 34 #define DRV_MODULE_NAME "niu" 35 35 #define PFX DRV_MODULE_NAME ": " 36 - #define DRV_MODULE_VERSION "0.9" 37 - #define DRV_MODULE_RELDATE "May 4, 2008" 36 + #define DRV_MODULE_VERSION "1.0" 37 + #define DRV_MODULE_RELDATE "Nov 14, 2008" 38 38 39 39 static char version[] __devinitdata = 40 40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; ··· 406 406 } 407 407 408 408 /* Mode is always 10G fiber. */ 409 - static int serdes_init_niu(struct niu *np) 409 + static int serdes_init_niu_10g_fiber(struct niu *np) 410 410 { 411 411 struct niu_link_config *lp = &np->link_config; 412 412 u32 tx_cfg, rx_cfg; ··· 440 440 return err; 441 441 } 442 442 443 + return 0; 444 + } 445 + 446 + static int serdes_init_niu_1g_serdes(struct niu *np) 447 + { 448 + struct niu_link_config *lp = &np->link_config; 449 + u16 pll_cfg, pll_sts; 450 + int max_retry = 100; 451 + u64 sig, mask, val; 452 + u32 tx_cfg, rx_cfg; 453 + unsigned long i; 454 + int err; 455 + 456 + tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV | 457 + PLL_TX_CFG_RATE_HALF); 458 + rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | 459 + PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | 460 + PLL_RX_CFG_RATE_HALF); 461 + 462 + if (np->port == 0) 463 + rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE; 464 + 465 + if (lp->loopback_mode == LOOPBACK_PHY) { 466 + u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; 467 + 468 + mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 469 + ESR2_TI_PLL_TEST_CFG_L, test_cfg); 470 + 471 + tx_cfg |= PLL_TX_CFG_ENTEST; 472 + rx_cfg |= PLL_RX_CFG_ENTEST; 473 + } 474 + 475 + /* Initialize PLL for 1G */ 476 + pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X); 477 + 478 + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 479 + ESR2_TI_PLL_CFG_L, pll_cfg); 480 + if (err) { 481 + dev_err(np->device, PFX "NIU Port %d " 482 + "serdes_init_niu_1g_serdes: " 483 + "mdio write to ESR2_TI_PLL_CFG_L failed", np->port); 484 + return err; 485 + } 486 + 487 + pll_sts = PLL_CFG_ENPLL; 488 + 489 + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 490 + ESR2_TI_PLL_STS_L, pll_sts); 491 + if (err) { 492 + dev_err(np->device, PFX "NIU Port %d " 493 + "serdes_init_niu_1g_serdes: " 494 + "mdio write to ESR2_TI_PLL_STS_L failed", np->port); 495 + return err; 496 + } 497 + 498 + udelay(200); 499 + 500 + /* Initialize all 4 lanes of the SERDES. */ 501 + for (i = 0; i < 4; i++) { 502 + err = esr2_set_tx_cfg(np, i, tx_cfg); 503 + if (err) 504 + return err; 505 + } 506 + 507 + for (i = 0; i < 4; i++) { 508 + err = esr2_set_rx_cfg(np, i, rx_cfg); 509 + if (err) 510 + return err; 511 + } 512 + 513 + switch (np->port) { 514 + case 0: 515 + val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); 516 + mask = val; 517 + break; 518 + 519 + case 1: 520 + val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); 521 + mask = val; 522 + break; 523 + 524 + default: 525 + return -EINVAL; 526 + } 527 + 528 + while (max_retry--) { 529 + sig = nr64(ESR_INT_SIGNALS); 530 + if ((sig & mask) == val) 531 + break; 532 + 533 + mdelay(500); 534 + } 535 + 536 + if ((sig & mask) != val) { 537 + dev_err(np->device, PFX "Port %u signal bits [%08x] are not " 538 + "[%08x]\n", np->port, (int) (sig & mask), (int) val); 539 + return -ENODEV; 540 + } 541 + 542 + return 0; 543 + } 544 + 545 + static int serdes_init_niu_10g_serdes(struct niu *np) 546 + { 547 + struct niu_link_config *lp = &np->link_config; 548 + u32 tx_cfg, rx_cfg, pll_cfg, pll_sts; 549 + int max_retry = 100; 550 + u64 sig, mask, val; 551 + unsigned long i; 552 + int err; 553 + 554 + tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); 555 + rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | 556 + PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | 557 + PLL_RX_CFG_EQ_LP_ADAPTIVE); 558 + 559 + if (lp->loopback_mode == LOOPBACK_PHY) { 560 + u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; 561 + 562 + mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 563 + ESR2_TI_PLL_TEST_CFG_L, test_cfg); 564 + 565 + tx_cfg |= PLL_TX_CFG_ENTEST; 566 + rx_cfg |= PLL_RX_CFG_ENTEST; 567 + } 568 + 569 + /* Initialize PLL for 10G */ 570 + pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X); 571 + 572 + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 573 + ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff); 574 + if (err) { 575 + dev_err(np->device, PFX "NIU Port %d " 576 + "serdes_init_niu_10g_serdes: " 577 + "mdio write to ESR2_TI_PLL_CFG_L failed", np->port); 578 + return err; 579 + } 580 + 581 + pll_sts = PLL_CFG_ENPLL; 582 + 583 + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 584 + ESR2_TI_PLL_STS_L, pll_sts & 0xffff); 585 + if (err) { 586 + dev_err(np->device, PFX "NIU Port %d " 587 + "serdes_init_niu_10g_serdes: " 588 + "mdio write to ESR2_TI_PLL_STS_L failed", np->port); 589 + return err; 590 + } 591 + 592 + udelay(200); 593 + 594 + /* Initialize all 4 lanes of the SERDES. */ 595 + for (i = 0; i < 4; i++) { 596 + err = esr2_set_tx_cfg(np, i, tx_cfg); 597 + if (err) 598 + return err; 599 + } 600 + 601 + for (i = 0; i < 4; i++) { 602 + err = esr2_set_rx_cfg(np, i, rx_cfg); 603 + if (err) 604 + return err; 605 + } 606 + 607 + /* check if serdes is ready */ 608 + 609 + switch (np->port) { 610 + case 0: 611 + mask = ESR_INT_SIGNALS_P0_BITS; 612 + val = (ESR_INT_SRDY0_P0 | 613 + ESR_INT_DET0_P0 | 614 + ESR_INT_XSRDY_P0 | 615 + ESR_INT_XDP_P0_CH3 | 616 + ESR_INT_XDP_P0_CH2 | 617 + ESR_INT_XDP_P0_CH1 | 618 + ESR_INT_XDP_P0_CH0); 619 + break; 620 + 621 + case 1: 622 + mask = ESR_INT_SIGNALS_P1_BITS; 623 + val = (ESR_INT_SRDY0_P1 | 624 + ESR_INT_DET0_P1 | 625 + ESR_INT_XSRDY_P1 | 626 + ESR_INT_XDP_P1_CH3 | 627 + ESR_INT_XDP_P1_CH2 | 628 + ESR_INT_XDP_P1_CH1 | 629 + ESR_INT_XDP_P1_CH0); 630 + break; 631 + 632 + default: 633 + return -EINVAL; 634 + } 635 + 636 + while (max_retry--) { 637 + sig = nr64(ESR_INT_SIGNALS); 638 + if ((sig & mask) == val) 639 + break; 640 + 641 + mdelay(500); 642 + } 643 + 644 + if ((sig & mask) != val) { 645 + pr_info(PFX "NIU Port %u signal bits [%08x] are not " 646 + "[%08x] for 10G...trying 1G\n", 647 + np->port, (int) (sig & mask), (int) val); 648 + 649 + /* 10G failed, try initializing at 1G */ 650 + err = serdes_init_niu_1g_serdes(np); 651 + if (!err) { 652 + np->flags &= ~NIU_FLAGS_10G; 653 + np->mac_xcvr = MAC_XCVR_PCS; 654 + } else { 655 + dev_err(np->device, PFX "Port %u 10G/1G SERDES " 656 + "Link Failed \n", np->port); 657 + return -ENODEV; 658 + } 659 + } 443 660 return 0; 444 661 } 445 662 ··· 2171 1954 .link_status = link_status_10g_serdes, 2172 1955 }; 2173 1956 1957 + static const struct niu_phy_ops phy_ops_10g_serdes_niu = { 1958 + .serdes_init = serdes_init_niu_10g_serdes, 1959 + .link_status = link_status_10g_serdes, 1960 + }; 1961 + 1962 + static const struct niu_phy_ops phy_ops_1g_serdes_niu = { 1963 + .serdes_init = serdes_init_niu_1g_serdes, 1964 + .link_status = link_status_1g_serdes, 1965 + }; 1966 + 2174 1967 static const struct niu_phy_ops phy_ops_1g_rgmii = { 2175 1968 .xcvr_init = xcvr_init_1g_rgmii, 2176 1969 .link_status = link_status_1g_rgmii, 2177 1970 }; 2178 1971 2179 1972 static const struct niu_phy_ops phy_ops_10g_fiber_niu = { 2180 - .serdes_init = serdes_init_niu, 1973 + .serdes_init = serdes_init_niu_10g_fiber, 2181 1974 .xcvr_init = xcvr_init_10g, 2182 1975 .link_status = link_status_10g, 2183 1976 }; ··· 2225 1998 u32 phy_addr_base; 2226 1999 }; 2227 2000 2228 - static const struct niu_phy_template phy_template_niu = { 2001 + static const struct niu_phy_template phy_template_niu_10g_fiber = { 2229 2002 .ops = &phy_ops_10g_fiber_niu, 2230 2003 .phy_addr_base = 16, 2004 + }; 2005 + 2006 + static const struct niu_phy_template phy_template_niu_10g_serdes = { 2007 + .ops = &phy_ops_10g_serdes_niu, 2008 + .phy_addr_base = 0, 2009 + }; 2010 + 2011 + static const struct niu_phy_template phy_template_niu_1g_serdes = { 2012 + .ops = &phy_ops_1g_serdes_niu, 2013 + .phy_addr_base = 0, 2231 2014 }; 2232 2015 2233 2016 static const struct niu_phy_template phy_template_10g_fiber = { ··· 2419 2182 u32 phy_addr_off = 0; 2420 2183 2421 2184 if (plat_type == PLAT_TYPE_NIU) { 2422 - tp = &phy_template_niu; 2423 - phy_addr_off += np->port; 2185 + switch (np->flags & 2186 + (NIU_FLAGS_10G | 2187 + NIU_FLAGS_FIBER | 2188 + NIU_FLAGS_XCVR_SERDES)) { 2189 + case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: 2190 + /* 10G Serdes */ 2191 + tp = &phy_template_niu_10g_serdes; 2192 + break; 2193 + case NIU_FLAGS_XCVR_SERDES: 2194 + /* 1G Serdes */ 2195 + tp = &phy_template_niu_1g_serdes; 2196 + break; 2197 + case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 2198 + /* 10G Fiber */ 2199 + default: 2200 + tp = &phy_template_niu_10g_fiber; 2201 + phy_addr_off += np->port; 2202 + break; 2203 + } 2424 2204 } else { 2425 2205 switch (np->flags & 2426 2206 (NIU_FLAGS_10G | ··· 7467 7213 np->flags |= NIU_FLAGS_10G; 7468 7214 np->flags &= ~NIU_FLAGS_FIBER; 7469 7215 np->mac_xcvr = MAC_XCVR_XPCS; 7216 + } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) { 7217 + /* 10G Serdes or 1G Serdes, default to 10G */ 7218 + np->flags |= NIU_FLAGS_10G; 7219 + np->flags &= ~NIU_FLAGS_FIBER; 7220 + np->flags |= NIU_FLAGS_XCVR_SERDES; 7221 + np->mac_xcvr = MAC_XCVR_XPCS; 7470 7222 } else { 7471 7223 return -EINVAL; 7472 7224 } ··· 8001 7741 u32 val; 8002 7742 int err; 8003 7743 7744 + num_10g = num_1g = 0; 7745 + 8004 7746 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 8005 7747 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 8006 7748 num_10g = 0; ··· 8019 7757 parent->num_ports = 2; 8020 7758 val = (phy_encode(PORT_TYPE_10G, 0) | 8021 7759 phy_encode(PORT_TYPE_10G, 1)); 7760 + } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) && 7761 + (parent->plat_type == PLAT_TYPE_NIU)) { 7762 + /* this is the Monza case */ 7763 + if (np->flags & NIU_FLAGS_10G) { 7764 + val = (phy_encode(PORT_TYPE_10G, 0) | 7765 + phy_encode(PORT_TYPE_10G, 1)); 7766 + } else { 7767 + val = (phy_encode(PORT_TYPE_1G, 0) | 7768 + phy_encode(PORT_TYPE_1G, 1)); 7769 + } 8022 7770 } else { 8023 7771 err = fill_phy_probe_info(np, parent, info); 8024 7772 if (err) ··· 8928 8656 dev->name, 8929 8657 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), 8930 8658 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), 8931 - (np->flags & NIU_FLAGS_FIBER ? "FIBER" : "COPPER"), 8659 + (np->flags & NIU_FLAGS_FIBER ? "FIBER" : 8660 + (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" : 8661 + "COPPER")), 8932 8662 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : 8933 8663 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), 8934 8664 np->vpd.phy_type);
+13
drivers/net/niu.h
··· 1048 1048 #define PLL_CFG_LD_SHIFT 8 1049 1049 #define PLL_CFG_MPY 0x0000001e 1050 1050 #define PLL_CFG_MPY_SHIFT 1 1051 + #define PLL_CFG_MPY_4X 0x0 1052 + #define PLL_CFG_MPY_5X 0x00000002 1053 + #define PLL_CFG_MPY_6X 0x00000004 1054 + #define PLL_CFG_MPY_8X 0x00000008 1055 + #define PLL_CFG_MPY_10X 0x0000000a 1056 + #define PLL_CFG_MPY_12X 0x0000000c 1057 + #define PLL_CFG_MPY_12P5X 0x0000000e 1051 1058 #define PLL_CFG_ENPLL 0x00000001 1052 1059 1053 1060 #define ESR2_TI_PLL_STS_L (ESR2_BASE + 0x002) ··· 1100 1093 #define PLL_TX_CFG_INVPAIR 0x00000080 1101 1094 #define PLL_TX_CFG_RATE 0x00000060 1102 1095 #define PLL_TX_CFG_RATE_SHIFT 5 1096 + #define PLL_TX_CFG_RATE_FULL 0x0 1097 + #define PLL_TX_CFG_RATE_HALF 0x20 1098 + #define PLL_TX_CFG_RATE_QUAD 0x40 1103 1099 #define PLL_TX_CFG_BUSWIDTH 0x0000001c 1104 1100 #define PLL_TX_CFG_BUSWIDTH_SHIFT 2 1105 1101 #define PLL_TX_CFG_ENTEST 0x00000002 ··· 1142 1132 #define PLL_RX_CFG_INVPAIR 0x00000080 1143 1133 #define PLL_RX_CFG_RATE 0x00000060 1144 1134 #define PLL_RX_CFG_RATE_SHIFT 5 1135 + #define PLL_RX_CFG_RATE_FULL 0x0 1136 + #define PLL_RX_CFG_RATE_HALF 0x20 1137 + #define PLL_RX_CFG_RATE_QUAD 0x40 1145 1138 #define PLL_RX_CFG_BUSWIDTH 0x0000001c 1146 1139 #define PLL_RX_CFG_BUSWIDTH_SHIFT 2 1147 1140 #define PLL_RX_CFG_ENTEST 0x00000002
+66
drivers/net/phy/marvell.c
··· 227 227 return 0; 228 228 } 229 229 230 + static int m88e1118_config_aneg(struct phy_device *phydev) 231 + { 232 + int err; 233 + 234 + err = phy_write(phydev, MII_BMCR, BMCR_RESET); 235 + if (err < 0) 236 + return err; 237 + 238 + err = phy_write(phydev, MII_M1011_PHY_SCR, 239 + MII_M1011_PHY_SCR_AUTO_CROSS); 240 + if (err < 0) 241 + return err; 242 + 243 + err = genphy_config_aneg(phydev); 244 + return 0; 245 + } 246 + 247 + static int m88e1118_config_init(struct phy_device *phydev) 248 + { 249 + int err; 250 + 251 + /* Change address */ 252 + err = phy_write(phydev, 0x16, 0x0002); 253 + if (err < 0) 254 + return err; 255 + 256 + /* Enable 1000 Mbit */ 257 + err = phy_write(phydev, 0x15, 0x1070); 258 + if (err < 0) 259 + return err; 260 + 261 + /* Change address */ 262 + err = phy_write(phydev, 0x16, 0x0003); 263 + if (err < 0) 264 + return err; 265 + 266 + /* Adjust LED Control */ 267 + err = phy_write(phydev, 0x10, 0x021e); 268 + if (err < 0) 269 + return err; 270 + 271 + /* Reset address */ 272 + err = phy_write(phydev, 0x16, 0x0); 273 + if (err < 0) 274 + return err; 275 + 276 + err = phy_write(phydev, MII_BMCR, BMCR_RESET); 277 + if (err < 0) 278 + return err; 279 + 280 + return 0; 281 + } 282 + 230 283 static int m88e1145_config_init(struct phy_device *phydev) 231 284 { 232 285 int err; ··· 467 414 .ack_interrupt = &marvell_ack_interrupt, 468 415 .config_intr = &marvell_config_intr, 469 416 .driver = { .owner = THIS_MODULE }, 417 + }, 418 + { 419 + .phy_id = 0x01410e10, 420 + .phy_id_mask = 0xfffffff0, 421 + .name = "Marvell 88E1118", 422 + .features = PHY_GBIT_FEATURES, 423 + .flags = PHY_HAS_INTERRUPT, 424 + .config_init = &m88e1118_config_init, 425 + .config_aneg = &m88e1118_config_aneg, 426 + .read_status = &genphy_read_status, 427 + .ack_interrupt = &marvell_ack_interrupt, 428 + .config_intr = &marvell_config_intr, 429 + .driver = {.owner = THIS_MODULE,}, 470 430 }, 471 431 { 472 432 .phy_id = 0x01410cd0,
+1 -1
drivers/net/phy/mdio_bus.c
··· 136 136 BUG_ON(bus->state != MDIOBUS_REGISTERED); 137 137 bus->state = MDIOBUS_UNREGISTERED; 138 138 139 - device_unregister(&bus->dev); 139 + device_del(&bus->dev); 140 140 for (i = 0; i < PHY_MAX_ADDR; i++) { 141 141 if (bus->phy_map[i]) 142 142 device_unregister(&bus->phy_map[i]->dev);
+25 -13
drivers/net/phy/phy_device.c
··· 227 227 if (r) 228 228 return ERR_PTR(r); 229 229 230 - /* If the phy_id is all Fs, there is no device there */ 231 - if (0xffffffff == phy_id) 230 + /* If the phy_id is all Fs or all 0s, there is no device there */ 231 + if ((0xffff == phy_id) || (0x00 == phy_id)) 232 232 return NULL; 233 233 234 234 dev = phy_device_create(bus, addr, phy_id); ··· 564 564 */ 565 565 int genphy_config_aneg(struct phy_device *phydev) 566 566 { 567 - int result = 0; 567 + int result; 568 568 569 - if (AUTONEG_ENABLE == phydev->autoneg) { 570 - int result = genphy_config_advert(phydev); 569 + if (AUTONEG_ENABLE != phydev->autoneg) 570 + return genphy_setup_forced(phydev); 571 571 572 - if (result < 0) /* error */ 573 - return result; 572 + result = genphy_config_advert(phydev); 574 573 575 - /* Only restart aneg if we are advertising something different 576 - * than we were before. */ 577 - if (result > 0) 578 - result = genphy_restart_aneg(phydev); 579 - } else 580 - result = genphy_setup_forced(phydev); 574 + if (result < 0) /* error */ 575 + return result; 576 + 577 + if (result == 0) { 578 + /* Advertisment hasn't changed, but maybe aneg was never on to 579 + * begin with? Or maybe phy was isolated? */ 580 + int ctl = phy_read(phydev, MII_BMCR); 581 + 582 + if (ctl < 0) 583 + return ctl; 584 + 585 + if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE)) 586 + result = 1; /* do restart aneg */ 587 + } 588 + 589 + /* Only restart aneg if we are advertising something different 590 + * than we were before. */ 591 + if (result > 0) 592 + result = genphy_restart_aneg(phydev); 581 593 582 594 return result; 583 595 }
+5 -14
drivers/net/qla3xxx.c
··· 1515 1515 linkState = LS_UP; 1516 1516 } else { 1517 1517 linkState = LS_DOWN; 1518 - if (netif_msg_link(qdev)) 1519 - printk(KERN_WARNING PFX 1520 - "%s: Link is down.\n", qdev->ndev->name); 1521 1518 } 1522 1519 return linkState; 1523 1520 } ··· 1578 1581 ql_mac_enable(qdev, 1); 1579 1582 } 1580 1583 1581 - if (netif_msg_link(qdev)) 1582 - printk(KERN_DEBUG PFX 1583 - "%s: Change port_link_state LS_DOWN to LS_UP.\n", 1584 - qdev->ndev->name); 1585 1584 qdev->port_link_state = LS_UP; 1586 1585 netif_start_queue(qdev->ndev); 1587 1586 netif_carrier_on(qdev->ndev); ··· 1648 1655 /* Fall Through */ 1649 1656 1650 1657 case LS_DOWN: 1651 - if (netif_msg_link(qdev)) 1652 - printk(KERN_DEBUG PFX 1653 - "%s: port_link_state = LS_DOWN.\n", 1654 - qdev->ndev->name); 1655 1658 if (curr_link_state == LS_UP) { 1656 1659 if (netif_msg_link(qdev)) 1657 - printk(KERN_DEBUG PFX 1658 - "%s: curr_link_state = LS_UP.\n", 1660 + printk(KERN_INFO PFX "%s: Link is up.\n", 1659 1661 qdev->ndev->name); 1660 1662 if (ql_is_auto_neg_complete(qdev)) 1661 1663 ql_finish_auto_neg(qdev); ··· 1658 1670 if (qdev->port_link_state == LS_UP) 1659 1671 ql_link_down_detect_clear(qdev); 1660 1672 1673 + qdev->port_link_state = LS_UP; 1661 1674 } 1662 1675 break; 1663 1676 ··· 1667 1678 * See if the link is currently down or went down and came 1668 1679 * back up 1669 1680 */ 1670 - if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) { 1681 + if (curr_link_state == LS_DOWN) { 1671 1682 if (netif_msg_link(qdev)) 1672 1683 printk(KERN_INFO PFX "%s: Link is down.\n", 1673 1684 qdev->ndev->name); 1674 1685 qdev->port_link_state = LS_DOWN; 1675 1686 } 1687 + if (ql_link_down_detect(qdev)) 1688 + qdev->port_link_state = LS_DOWN; 1676 1689 break; 1677 1690 } 1678 1691 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+2 -2
drivers/net/sh_eth.c
··· 927 927 struct sh_eth_private *mdp = netdev_priv(ndev); 928 928 struct sh_eth_txdesc *txdesc; 929 929 u32 entry; 930 - int flags; 930 + unsigned long flags; 931 931 932 932 spin_lock_irqsave(&mdp->lock, flags); 933 933 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { ··· 1141 1141 /* Hook up MII support for ethtool */ 1142 1142 mdp->mii_bus->name = "sh_mii"; 1143 1143 mdp->mii_bus->parent = &ndev->dev; 1144 - mdp->mii_bus->id[0] = id; 1144 + snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id); 1145 1145 1146 1146 /* PHY IRQ */ 1147 1147 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+1 -1
drivers/net/smc911x.c
··· 1813 1813 val = SMC_GET_BYTE_TEST(lp); 1814 1814 DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val); 1815 1815 if (val != 0x87654321) { 1816 - printk(KERN_ERR "Invalid chip endian 0x08%x\n",val); 1816 + printk(KERN_ERR "Invalid chip endian 0x%08x\n",val); 1817 1817 retval = -ENODEV; 1818 1818 goto err_out; 1819 1819 }
+3 -3
drivers/net/ucc_geth_ethtool.c
··· 323 323 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) { 324 324 base = (u32 __iomem *)&ugeth->ug_regs->tx64; 325 325 for (i = 0; i < UEC_HW_STATS_LEN; i++) 326 - data[j++] = (u64)in_be32(&base[i]); 326 + data[j++] = in_be32(&base[i]); 327 327 } 328 328 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { 329 329 base = (u32 __iomem *)ugeth->p_tx_fw_statistics_pram; 330 330 for (i = 0; i < UEC_TX_FW_STATS_LEN; i++) 331 - data[j++] = (u64)in_be32(&base[i]); 331 + data[j++] = base ? in_be32(&base[i]) : 0; 332 332 } 333 333 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { 334 334 base = (u32 __iomem *)ugeth->p_rx_fw_statistics_pram; 335 335 for (i = 0; i < UEC_RX_FW_STATS_LEN; i++) 336 - data[j++] = (u64)in_be32(&base[i]); 336 + data[j++] = base ? in_be32(&base[i]) : 0; 337 337 } 338 338 } 339 339
+3 -1
drivers/net/usb/asix.c
··· 1102 1102 mode = AX88178_MEDIUM_DEFAULT; 1103 1103 1104 1104 if (ecmd.speed == SPEED_1000) 1105 - mode |= AX_MEDIUM_GM | AX_MEDIUM_ENCK; 1105 + mode |= AX_MEDIUM_GM; 1106 1106 else if (ecmd.speed == SPEED_100) 1107 1107 mode |= AX_MEDIUM_PS; 1108 1108 else 1109 1109 mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM); 1110 + 1111 + mode |= AX_MEDIUM_ENCK; 1110 1112 1111 1113 if (ecmd.duplex == DUPLEX_FULL) 1112 1114 mode |= AX_MEDIUM_FD;
+1 -1
drivers/net/via-velocity.c
··· 2296 2296 } 2297 2297 2298 2298 mac_set_cam_mask(regs, vptr->mCAMmask); 2299 - rx_mode = (RCR_AM | RCR_AB); 2299 + rx_mode = RCR_AM | RCR_AB | RCR_AP; 2300 2300 } 2301 2301 if (dev->mtu > 1500) 2302 2302 rx_mode |= RCR_AL;
+3 -4
drivers/net/wireless/iwlwifi/iwl-agn.c
··· 1384 1384 1385 1385 rxq->queue[i] = NULL; 1386 1386 1387 - pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, 1387 + pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->aligned_dma_addr, 1388 1388 priv->hw_params.rx_buf_size, 1389 1389 PCI_DMA_FROMDEVICE); 1390 1390 pkt = (struct iwl_rx_packet *)rxb->skb->data; ··· 1436 1436 rxb->skb = NULL; 1437 1437 } 1438 1438 1439 - pci_unmap_single(priv->pci_dev, rxb->dma_addr, 1440 - priv->hw_params.rx_buf_size, 1439 + pci_unmap_single(priv->pci_dev, rxb->real_dma_addr, 1440 + priv->hw_params.rx_buf_size + 256, 1441 1441 PCI_DMA_FROMDEVICE); 1442 1442 spin_lock_irqsave(&rxq->lock, flags); 1443 1443 list_add_tail(&rxb->list, &priv->rxq.rx_used); ··· 2341 2341 mutex_lock(&priv->mutex); 2342 2342 iwl_alive_start(priv); 2343 2343 mutex_unlock(&priv->mutex); 2344 - ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC); 2345 2344 } 2346 2345 2347 2346 static void iwl4965_bg_rf_kill(struct work_struct *work)
+2 -1
drivers/net/wireless/iwlwifi/iwl-dev.h
··· 89 89 #define DEFAULT_LONG_RETRY_LIMIT 4U 90 90 91 91 struct iwl_rx_mem_buffer { 92 - dma_addr_t dma_addr; 92 + dma_addr_t real_dma_addr; 93 + dma_addr_t aligned_dma_addr; 93 94 struct sk_buff *skb; 94 95 struct list_head list; 95 96 };
+17 -9
drivers/net/wireless/iwlwifi/iwl-rx.c
··· 204 204 list_del(element); 205 205 206 206 /* Point to Rx buffer via next RBD in circular buffer */ 207 - rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr); 207 + rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr); 208 208 rxq->queue[rxq->write] = rxb; 209 209 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 210 210 rxq->free_count--; ··· 251 251 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 252 252 253 253 /* Alloc a new receive buffer */ 254 - rxb->skb = alloc_skb(priv->hw_params.rx_buf_size, 254 + rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256, 255 255 __GFP_NOWARN | GFP_ATOMIC); 256 256 if (!rxb->skb) { 257 257 if (net_ratelimit()) ··· 266 266 list_del(element); 267 267 268 268 /* Get physical address of RB/SKB */ 269 - rxb->dma_addr = 270 - pci_map_single(priv->pci_dev, rxb->skb->data, 271 - priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE); 269 + rxb->real_dma_addr = pci_map_single( 270 + priv->pci_dev, 271 + rxb->skb->data, 272 + priv->hw_params.rx_buf_size + 256, 273 + PCI_DMA_FROMDEVICE); 274 + /* dma address must be no more than 36 bits */ 275 + BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36)); 276 + /* and also 256 byte aligned! */ 277 + rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256); 278 + skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr); 279 + 272 280 list_add_tail(&rxb->list, &rxq->rx_free); 273 281 rxq->free_count++; 274 282 } ··· 308 300 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 309 301 if (rxq->pool[i].skb != NULL) { 310 302 pci_unmap_single(priv->pci_dev, 311 - rxq->pool[i].dma_addr, 312 - priv->hw_params.rx_buf_size, 303 + rxq->pool[i].real_dma_addr, 304 + priv->hw_params.rx_buf_size + 256, 313 305 PCI_DMA_FROMDEVICE); 314 306 dev_kfree_skb(rxq->pool[i].skb); 315 307 } ··· 362 354 * to an SKB, so we need to unmap and free potential storage */ 363 355 if (rxq->pool[i].skb != NULL) { 364 356 pci_unmap_single(priv->pci_dev, 365 - rxq->pool[i].dma_addr, 366 - priv->hw_params.rx_buf_size, 357 + rxq->pool[i].real_dma_addr, 358 + priv->hw_params.rx_buf_size + 256, 367 359 PCI_DMA_FROMDEVICE); 368 360 priv->alloc_rxb_skb--; 369 361 dev_kfree_skb(rxq->pool[i].skb);
-1
drivers/net/wireless/iwlwifi/iwl3945-base.c
··· 6012 6012 mutex_lock(&priv->mutex); 6013 6013 iwl3945_alive_start(priv); 6014 6014 mutex_unlock(&priv->mutex); 6015 - ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC); 6016 6015 } 6017 6016 6018 6017 static void iwl3945_bg_rf_kill(struct work_struct *work)
+1 -1
drivers/net/wireless/libertas_tf/if_usb.c
··· 331 331 /* Fill the receive configuration URB and initialise the Rx call back */ 332 332 usb_fill_bulk_urb(cardp->rx_urb, cardp->udev, 333 333 usb_rcvbulkpipe(cardp->udev, cardp->ep_in), 334 - (void *) (skb->tail), 334 + skb_tail_pointer(skb), 335 335 MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp); 336 336 337 337 cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
+1 -1
drivers/parport/Kconfig
··· 36 36 config PARPORT_PC 37 37 tristate "PC-style hardware" 38 38 depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && \ 39 - (!M68K || ISA) && !MN10300 && !AVR32 39 + (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN 40 40 ---help--- 41 41 You should say Y here if you have a PC-style parallel port. All 42 42 IBM PC compatible computers and some Alphas have PC-style
+4 -2
drivers/pci/intel-iommu.c
··· 1655 1655 iommu->flush.flush_context = __iommu_flush_context; 1656 1656 iommu->flush.flush_iotlb = __iommu_flush_iotlb; 1657 1657 printk(KERN_INFO "IOMMU 0x%Lx: using Register based " 1658 - "invalidation\n", drhd->reg_base_addr); 1658 + "invalidation\n", 1659 + (unsigned long long)drhd->reg_base_addr); 1659 1660 } else { 1660 1661 iommu->flush.flush_context = qi_flush_context; 1661 1662 iommu->flush.flush_iotlb = qi_flush_iotlb; 1662 1663 printk(KERN_INFO "IOMMU 0x%Lx: using Queued " 1663 - "invalidation\n", drhd->reg_base_addr); 1664 + "invalidation\n", 1665 + (unsigned long long)drhd->reg_base_addr); 1664 1666 } 1665 1667 } 1666 1668
+2 -2
drivers/pci/pci.c
··· 1832 1832 if (!(cap & PCI_EXP_DEVCAP_FLR)) 1833 1833 return -ENOTTY; 1834 1834 1835 - if (!dev->msi_enabled && !dev->msix_enabled) 1835 + if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0) 1836 1836 disable_irq(dev->irq); 1837 1837 pci_save_state(dev); 1838 1838 ··· 1841 1841 r = pci_execute_reset_function(dev); 1842 1842 1843 1843 pci_restore_state(dev); 1844 - if (!dev->msi_enabled && !dev->msix_enabled) 1844 + if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0) 1845 1845 enable_irq(dev->irq); 1846 1846 1847 1847 return r;
+2 -1
drivers/pcmcia/cistpl.c
··· 351 351 char *buf; 352 352 353 353 buf = kmalloc(256, GFP_KERNEL); 354 - if (buf == NULL) 354 + if (buf == NULL) { 355 355 dev_printk(KERN_WARNING, &s->dev, 356 356 "no memory for verifying CIS\n"); 357 357 return -ENOMEM; 358 + } 358 359 list_for_each_entry(cis, &s->cis_cache, node) { 359 360 int len = cis->len; 360 361
+7 -7
drivers/pcmcia/cs.c
··· 186 186 187 187 spin_lock_init(&socket->lock); 188 188 189 - if (socket->resource_ops->init) { 190 - ret = socket->resource_ops->init(socket); 191 - if (ret) 192 - return (ret); 193 - } 194 - 195 189 /* try to obtain a socket number [yes, it gets ugly if we 196 190 * register more than 2^sizeof(unsigned int) pcmcia 197 191 * sockets... but the socket number is deprecated ··· 220 226 /* set proper values in socket->dev */ 221 227 dev_set_drvdata(&socket->dev, socket); 222 228 socket->dev.class = &pcmcia_socket_class; 223 - snprintf(socket->dev.bus_id, BUS_ID_SIZE, "pcmcia_socket%u", socket->sock); 229 + dev_set_name(&socket->dev, "pcmcia_socket%u", socket->sock); 224 230 225 231 /* base address = 0, map = 0 */ 226 232 socket->cis_mem.flags = 0; ··· 232 238 init_completion(&socket->thread_done); 233 239 mutex_init(&socket->skt_mutex); 234 240 spin_lock_init(&socket->thread_lock); 241 + 242 + if (socket->resource_ops->init) { 243 + ret = socket->resource_ops->init(socket); 244 + if (ret) 245 + goto err; 246 + } 235 247 236 248 tsk = kthread_run(pccardd, socket, "pccardd"); 237 249 if (IS_ERR(tsk)) {
+6 -5
drivers/pcmcia/ds.c
··· 622 622 { 623 623 struct pcmcia_device *p_dev, *tmp_dev; 624 624 unsigned long flags; 625 - int bus_id_len; 626 625 627 626 s = pcmcia_get_socket(s); 628 627 if (!s) ··· 649 650 /* by default don't allow DMA */ 650 651 p_dev->dma_mask = DMA_MASK_NONE; 651 652 p_dev->dev.dma_mask = &p_dev->dma_mask; 652 - bus_id_len = sprintf (p_dev->dev.bus_id, "%d.%d", p_dev->socket->sock, p_dev->device_no); 653 - 654 - p_dev->devname = kmalloc(6 + bus_id_len + 1, GFP_KERNEL); 653 + dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no); 654 + if (!dev_name(&p_dev->dev)) 655 + goto err_free; 656 + p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev)); 655 657 if (!p_dev->devname) 656 658 goto err_free; 657 - sprintf (p_dev->devname, "pcmcia%s", p_dev->dev.bus_id); 658 659 ds_dev_dbg(3, &p_dev->dev, "devname is %s\n", p_dev->devname); 659 660 660 661 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); ··· 667 668 list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list) 668 669 if (p_dev->func == tmp_dev->func) { 669 670 p_dev->function_config = tmp_dev->function_config; 671 + p_dev->io = tmp_dev->io; 672 + p_dev->irq = tmp_dev->irq; 670 673 kref_get(&p_dev->function_config->ref); 671 674 } 672 675
+2 -1
drivers/pcmcia/pcmcia_resource.c
··· 302 302 /* We only allow changing Vpp1 and Vpp2 to the same value */ 303 303 if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) && 304 304 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 305 - if (mod->Vpp1 != mod->Vpp2) 305 + if (mod->Vpp1 != mod->Vpp2) { 306 306 ds_dbg(s, 0, "Vpp1 and Vpp2 must be the same\n"); 307 307 return -EINVAL; 308 + } 308 309 s->socket.Vpp = mod->Vpp1; 309 310 if (s->ops->set_socket(s, &s->socket)) { 310 311 dev_printk(KERN_WARNING, &s->dev,
+3 -3
drivers/pcmcia/rsrc_nonstatic.c
··· 71 71 ======================================================================*/ 72 72 73 73 static struct resource * 74 - make_resource(resource_size_t b, resource_size_t n, int flags, char *name) 74 + make_resource(resource_size_t b, resource_size_t n, int flags, const char *name) 75 75 { 76 76 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); 77 77 ··· 624 624 static struct resource *nonstatic_find_io_region(unsigned long base, int num, 625 625 unsigned long align, struct pcmcia_socket *s) 626 626 { 627 - struct resource *res = make_resource(0, num, IORESOURCE_IO, s->dev.bus_id); 627 + struct resource *res = make_resource(0, num, IORESOURCE_IO, dev_name(&s->dev)); 628 628 struct socket_data *s_data = s->resource_data; 629 629 struct pcmcia_align_data data; 630 630 unsigned long min = base; ··· 658 658 static struct resource * nonstatic_find_mem_region(u_long base, u_long num, 659 659 u_long align, int low, struct pcmcia_socket *s) 660 660 { 661 - struct resource *res = make_resource(0, num, IORESOURCE_MEM, s->dev.bus_id); 661 + struct resource *res = make_resource(0, num, IORESOURCE_MEM, dev_name(&s->dev)); 662 662 struct socket_data *s_data = s->resource_data; 663 663 struct pcmcia_align_data data; 664 664 unsigned long min, max;
+19 -50
drivers/rtc/rtc-sun4v.c
··· 1 - /* rtc-sun4c.c: Hypervisor based RTC for SUN4V systems. 1 + /* rtc-sun4v.c: Hypervisor based RTC for SUN4V systems. 2 2 * 3 3 * Copyright (C) 2008 David S. Miller <davem@davemloft.net> 4 4 */ ··· 7 7 #include <linux/module.h> 8 8 #include <linux/delay.h> 9 9 #include <linux/init.h> 10 - #include <linux/time.h> 11 10 #include <linux/rtc.h> 12 11 #include <linux/platform_device.h> 13 12 14 13 #include <asm/hypervisor.h> 15 - 16 - MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); 17 - MODULE_DESCRIPTION("SUN4V RTC driver"); 18 - MODULE_LICENSE("GPL"); 19 - 20 - struct sun4v_rtc { 21 - struct rtc_device *rtc; 22 - spinlock_t lock; 23 - }; 24 14 25 15 static unsigned long hypervisor_get_time(void) 26 16 { ··· 35 45 36 46 static int sun4v_read_time(struct device *dev, struct rtc_time *tm) 37 47 { 38 - struct sun4v_rtc *p = dev_get_drvdata(dev); 39 - unsigned long flags, secs; 40 - 41 - spin_lock_irqsave(&p->lock, flags); 42 - secs = hypervisor_get_time(); 43 - spin_unlock_irqrestore(&p->lock, flags); 44 - 45 - rtc_time_to_tm(secs, tm); 46 - 48 + rtc_time_to_tm(hypervisor_get_time(), tm); 47 49 return 0; 48 50 } 49 51 ··· 62 80 63 81 static int sun4v_set_time(struct device *dev, struct rtc_time *tm) 64 82 { 65 - struct sun4v_rtc *p = dev_get_drvdata(dev); 66 - unsigned long flags, secs; 83 + unsigned long secs; 67 84 int err; 68 85 69 86 err = rtc_tm_to_time(tm, &secs); 70 87 if (err) 71 88 return err; 72 89 73 - spin_lock_irqsave(&p->lock, flags); 74 - err = hypervisor_set_time(secs); 75 - spin_unlock_irqrestore(&p->lock, flags); 76 - 77 - return err; 90 + return hypervisor_set_time(secs); 78 91 } 79 92 80 93 static const struct rtc_class_ops sun4v_rtc_ops = { ··· 77 100 .set_time = sun4v_set_time, 78 101 }; 79 102 80 - static int __devinit sun4v_rtc_probe(struct platform_device *pdev) 103 + static int __init sun4v_rtc_probe(struct platform_device *pdev) 81 104 { 82 - struct sun4v_rtc *p = kzalloc(sizeof(*p), GFP_KERNEL); 83 - 84 - if (!p) 85 - return -ENOMEM; 86 - 87 - spin_lock_init(&p->lock); 88 - 89 - p->rtc = rtc_device_register("sun4v", &pdev->dev, 105 + struct rtc_device *rtc = rtc_device_register("sun4v", &pdev->dev, 90 106 &sun4v_rtc_ops, THIS_MODULE); 91 - if (IS_ERR(p->rtc)) { 92 - int err = PTR_ERR(p->rtc); 93 - kfree(p); 94 - return err; 95 - } 96 - platform_set_drvdata(pdev, p); 107 + if (IS_ERR(rtc)) 108 + return PTR_ERR(rtc); 109 + 110 + platform_set_drvdata(pdev, rtc); 97 111 return 0; 98 112 } 99 113 100 - static int __devexit sun4v_rtc_remove(struct platform_device *pdev) 114 + static int __exit sun4v_rtc_remove(struct platform_device *pdev) 101 115 { 102 - struct sun4v_rtc *p = platform_get_drvdata(pdev); 116 + struct rtc_device *rtc = platform_get_drvdata(pdev); 103 117 104 - rtc_device_unregister(p->rtc); 105 - kfree(p); 106 - 118 + rtc_device_unregister(rtc); 107 119 return 0; 108 120 } 109 121 ··· 101 135 .name = "rtc-sun4v", 102 136 .owner = THIS_MODULE, 103 137 }, 104 - .probe = sun4v_rtc_probe, 105 - .remove = __devexit_p(sun4v_rtc_remove), 138 + .remove = __exit_p(sun4v_rtc_remove), 106 139 }; 107 140 108 141 static int __init sun4v_rtc_init(void) 109 142 { 110 - return platform_driver_register(&sun4v_rtc_driver); 143 + return platform_driver_probe(&sun4v_rtc_driver, sun4v_rtc_probe); 111 144 } 112 145 113 146 static void __exit sun4v_rtc_exit(void) ··· 116 151 117 152 module_init(sun4v_rtc_init); 118 153 module_exit(sun4v_rtc_exit); 154 + 155 + MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); 156 + MODULE_DESCRIPTION("SUN4V RTC driver"); 157 + MODULE_LICENSE("GPL");
+12 -12
drivers/spi/pxa2xx_spi.c
··· 352 352 } else 353 353 drv_data->tx_map_len = drv_data->len; 354 354 355 - /* Stream map the rx buffer */ 356 - drv_data->rx_dma = dma_map_single(dev, drv_data->rx, 357 - drv_data->rx_map_len, 358 - DMA_FROM_DEVICE); 359 - if (dma_mapping_error(dev, drv_data->rx_dma)) 355 + /* Stream map the tx buffer. Always do DMA_TO_DEVICE first 356 + * so we flush the cache *before* invalidating it, in case 357 + * the tx and rx buffers overlap. 358 + */ 359 + drv_data->tx_dma = dma_map_single(dev, drv_data->tx, 360 + drv_data->tx_map_len, DMA_TO_DEVICE); 361 + if (dma_mapping_error(dev, drv_data->tx_dma)) 360 362 return 0; 361 363 362 - /* Stream map the tx buffer */ 363 - drv_data->tx_dma = dma_map_single(dev, drv_data->tx, 364 - drv_data->tx_map_len, 365 - DMA_TO_DEVICE); 366 - 367 - if (dma_mapping_error(dev, drv_data->tx_dma)) { 368 - dma_unmap_single(dev, drv_data->rx_dma, 364 + /* Stream map the rx buffer */ 365 + drv_data->rx_dma = dma_map_single(dev, drv_data->rx, 369 366 drv_data->rx_map_len, DMA_FROM_DEVICE); 367 + if (dma_mapping_error(dev, drv_data->rx_dma)) { 368 + dma_unmap_single(dev, drv_data->tx_dma, 369 + drv_data->tx_map_len, DMA_TO_DEVICE); 370 370 return 0; 371 371 } 372 372
+22 -23
drivers/spi/spi_imx.c
··· 506 506 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) 507 507 return -1; 508 508 509 - /* NULL rx means write-only transfer and no map needed 510 - since rx DMA will not be used */ 511 - if (drv_data->rx) { 512 - buf = drv_data->rx; 513 - drv_data->rx_dma = dma_map_single( 514 - dev, 515 - buf, 516 - drv_data->len, 517 - DMA_FROM_DEVICE); 518 - if (dma_mapping_error(dev, drv_data->rx_dma)) 519 - return -1; 520 - drv_data->rx_dma_needs_unmap = 1; 521 - } 522 - 523 509 if (drv_data->tx == NULL) { 524 510 /* Read only message --> use drv_data->dummy_dma_buf for dummy 525 511 writes to achive reads */ ··· 519 533 buf, 520 534 drv_data->tx_map_len, 521 535 DMA_TO_DEVICE); 522 - if (dma_mapping_error(dev, drv_data->tx_dma)) { 523 - if (drv_data->rx_dma) { 524 - dma_unmap_single(dev, 525 - drv_data->rx_dma, 526 - drv_data->len, 527 - DMA_FROM_DEVICE); 528 - drv_data->rx_dma_needs_unmap = 0; 529 - } 536 + if (dma_mapping_error(dev, drv_data->tx_dma)) 530 537 return -1; 531 - } 532 538 drv_data->tx_dma_needs_unmap = 1; 539 + 540 + /* NULL rx means write-only transfer and no map needed 541 + * since rx DMA will not be used */ 542 + if (drv_data->rx) { 543 + buf = drv_data->rx; 544 + drv_data->rx_dma = dma_map_single(dev, 545 + buf, 546 + drv_data->len, 547 + DMA_FROM_DEVICE); 548 + if (dma_mapping_error(dev, drv_data->rx_dma)) { 549 + if (drv_data->tx_dma) { 550 + dma_unmap_single(dev, 551 + drv_data->tx_dma, 552 + drv_data->tx_map_len, 553 + DMA_TO_DEVICE); 554 + drv_data->tx_dma_needs_unmap = 0; 555 + } 556 + return -1; 557 + } 558 + drv_data->rx_dma_needs_unmap = 1; 559 + } 533 560 534 561 return 0; 535 562 }
+1 -2
drivers/usb/gadget/f_rndis.c
··· 172 172 .bDescriptorType = USB_DT_INTERFACE, 173 173 174 174 /* .bInterfaceNumber = DYNAMIC */ 175 - .bAlternateSetting = 1, 176 175 .bNumEndpoints = 2, 177 176 .bInterfaceClass = USB_CLASS_CDC_DATA, 178 177 .bInterfaceSubClass = 0, ··· 302 303 __le32 *data = req->buf; 303 304 int status; 304 305 305 - if (atomic_inc_return(&rndis->notify_count)) 306 + if (atomic_inc_return(&rndis->notify_count) != 1) 306 307 return; 307 308 308 309 /* Send RNDIS RESPONSE_AVAILABLE notification; a
+21
drivers/usb/host/ehci-pci.c
··· 66 66 { 67 67 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 68 68 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 69 + struct pci_dev *p_smbus; 70 + u8 rev; 69 71 u32 temp; 70 72 int retval; 71 73 ··· 166 164 if (tmp & 0x20) 167 165 break; 168 166 pci_write_config_byte(pdev, 0x4b, tmp | 0x20); 167 + } 168 + break; 169 + case PCI_VENDOR_ID_ATI: 170 + /* SB700 old version has a bug in EHCI controller, 171 + * which causes usb devices lose response in some cases. 172 + */ 173 + if (pdev->device == 0x4396) { 174 + p_smbus = pci_get_device(PCI_VENDOR_ID_ATI, 175 + PCI_DEVICE_ID_ATI_SBX00_SMBUS, 176 + NULL); 177 + if (!p_smbus) 178 + break; 179 + rev = p_smbus->revision; 180 + if ((rev == 0x3a) || (rev == 0x3b)) { 181 + u8 tmp; 182 + pci_read_config_byte(pdev, 0x53, &tmp); 183 + pci_write_config_byte(pdev, 0x53, tmp | (1<<3)); 184 + } 185 + pci_dev_put(p_smbus); 169 186 } 170 187 break; 171 188 }
+4 -1
drivers/usb/mon/mon_bin.c
··· 687 687 } 688 688 689 689 if (rp->b_read >= sizeof(struct mon_bin_hdr)) { 690 - step_len = min(nbytes, (size_t)ep->len_cap); 690 + step_len = ep->len_cap; 691 + step_len -= rp->b_read - sizeof(struct mon_bin_hdr); 692 + if (step_len > nbytes) 693 + step_len = nbytes; 691 694 offset = rp->b_out + PKT_SIZE; 692 695 offset += rp->b_read - sizeof(struct mon_bin_hdr); 693 696 if (offset >= rp->b_size)
+1 -1
drivers/usb/musb/musb_host.c
··· 1757 1757 } 1758 1758 } 1759 1759 /* use bulk reserved ep1 if no other ep is free */ 1760 - if (best_end > 0 && qh->type == USB_ENDPOINT_XFER_BULK) { 1760 + if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) { 1761 1761 hw_ep = musb->bulk_ep; 1762 1762 if (is_in) 1763 1763 head = &musb->in_bulk;
+1
drivers/usb/serial/cp2101.c
··· 56 56 static int debug; 57 57 58 58 static struct usb_device_id id_table [] = { 59 + { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ 59 60 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ 60 61 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ 61 62 { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
+9 -2
drivers/usb/storage/unusual_devs.h
··· 167 167 US_SC_DEVICE, US_PR_DEVICE, NULL, 168 168 US_FL_FIX_CAPACITY ), 169 169 170 + /* Patch for Nokia 5310 capacity */ 171 + UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0591, 172 + "Nokia", 173 + "5310", 174 + US_SC_DEVICE, US_PR_DEVICE, NULL, 175 + US_FL_FIX_CAPACITY ), 176 + 170 177 /* Reported by Mario Rettig <mariorettig@web.de> */ 171 178 UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100, 172 179 "Nokia", ··· 240 233 US_FL_MAX_SECTORS_64 ), 241 234 242 235 /* Reported by Cedric Godin <cedric@belbone.be> */ 243 - UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551, 236 + UNUSUAL_DEV( 0x0421, 0x04b9, 0x0500, 0x0551, 244 237 "Nokia", 245 238 "5300", 246 239 US_SC_DEVICE, US_PR_DEVICE, NULL, 247 240 US_FL_FIX_CAPACITY ), 248 241 249 242 /* Reported by Richard Nauber <RichardNauber@web.de> */ 250 - UNUSUAL_DEV( 0x0421, 0x04fa, 0x0601, 0x0601, 243 + UNUSUAL_DEV( 0x0421, 0x04fa, 0x0550, 0x0660, 251 244 "Nokia", 252 245 "6300", 253 246 US_SC_DEVICE, US_PR_DEVICE, NULL,
+1 -1
drivers/video/atmel_lcdfb.c
··· 132 132 133 133 bl = backlight_device_register("backlight", &sinfo->pdev->dev, 134 134 sinfo, &atmel_lcdc_bl_ops); 135 - if (IS_ERR(sinfo->backlight)) { 135 + if (IS_ERR(bl)) { 136 136 dev_err(&sinfo->pdev->dev, "error %ld on backlight register\n", 137 137 PTR_ERR(bl)); 138 138 return;
+2
drivers/video/backlight/da903x.c
··· 119 119 default: 120 120 dev_err(&pdev->dev, "invalid backlight device ID(%d)\n", 121 121 pdev->id); 122 + kfree(data); 122 123 return -EINVAL; 123 124 } 124 125 ··· 131 130 data, &da903x_backlight_ops); 132 131 if (IS_ERR(bl)) { 133 132 dev_err(&pdev->dev, "failed to register backlight\n"); 133 + kfree(data); 134 134 return PTR_ERR(bl); 135 135 } 136 136
+7 -4
drivers/video/backlight/lcd.c
··· 42 42 43 43 mutex_lock(&ld->ops_lock); 44 44 if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info)) { 45 - if (event == FB_EVENT_BLANK) 46 - ld->ops->set_power(ld, *(int *)evdata->data); 47 - else 48 - ld->ops->set_mode(ld, evdata->data); 45 + if (event == FB_EVENT_BLANK) { 46 + if (ld->ops->set_power) 47 + ld->ops->set_power(ld, *(int *)evdata->data); 48 + } else { 49 + if (ld->ops->set_mode) 50 + ld->ops->set_mode(ld, evdata->data); 51 + } 49 52 } 50 53 mutex_unlock(&ld->ops_lock); 51 54 return 0;
+1 -2
drivers/video/cirrusfb.c
··· 2462 2462 2463 2463 #ifndef MODULE 2464 2464 static int __init cirrusfb_setup(char *options) { 2465 - char *this_opt, s[32]; 2466 - int i; 2465 + char *this_opt; 2467 2466 2468 2467 DPRINTK("ENTER\n"); 2469 2468
+1 -1
drivers/video/fbmem.c
··· 230 230 greenshift = info->var.green.offset; 231 231 blueshift = info->var.blue.offset; 232 232 233 - for (i = 32; i < logo->clutsize; i++) 233 + for (i = 32; i < 32 + logo->clutsize; i++) 234 234 palette[i] = i << redshift | i << greenshift | i << blueshift; 235 235 } 236 236
+7 -3
drivers/video/tmiofb.c
··· 222 222 unsigned int bbisc = tmio_ioread16(par->lcr + LCR_BBISC); 223 223 224 224 225 + tmio_iowrite16(bbisc, par->lcr + LCR_BBISC); 226 + 227 + #ifdef CONFIG_FB_TMIO_ACCELL 225 228 /* 226 229 * We were in polling mode and now we got correct irq. 227 230 * Switch back to IRQ-based sync of command FIFO ··· 234 231 par->use_polling = false; 235 232 } 236 233 237 - tmio_iowrite16(bbisc, par->lcr + LCR_BBISC); 238 - 239 - #ifdef CONFIG_FB_TMIO_ACCELL 240 234 if (bbisc & 1) 241 235 wake_up(&par->wait_acc); 242 236 #endif ··· 938 938 static int tmiofb_suspend(struct platform_device *dev, pm_message_t state) 939 939 { 940 940 struct fb_info *info = platform_get_drvdata(dev); 941 + #ifdef CONFIG_FB_TMIO_ACCELL 941 942 struct tmiofb_par *par = info->par; 943 + #endif 942 944 struct mfd_cell *cell = dev->dev.platform_data; 943 945 int retval = 0; 944 946 ··· 952 950 info->fbops->fb_sync(info); 953 951 954 952 953 + #ifdef CONFIG_FB_TMIO_ACCELL 955 954 /* 956 955 * The fb should be usable even if interrupts are disabled (and they are 957 956 * during suspend/resume). Switch temporary to forced polling. 958 957 */ 959 958 printk(KERN_INFO "tmiofb: switching to polling\n"); 960 959 par->use_polling = true; 960 + #endif 961 961 tmiofb_hw_stop(dev); 962 962 963 963 if (cell->suspend)
+9 -8
drivers/video/via/viafbdev.c
··· 2036 2036 return count; 2037 2037 } 2038 2038 2039 - static void viafb_init_proc(struct proc_dir_entry *viafb_entry) 2039 + static void viafb_init_proc(struct proc_dir_entry **viafb_entry) 2040 2040 { 2041 2041 struct proc_dir_entry *entry; 2042 - viafb_entry = proc_mkdir("viafb", NULL); 2042 + *viafb_entry = proc_mkdir("viafb", NULL); 2043 2043 if (viafb_entry) { 2044 - entry = create_proc_entry("dvp0", 0, viafb_entry); 2044 + entry = create_proc_entry("dvp0", 0, *viafb_entry); 2045 2045 if (entry) { 2046 2046 entry->owner = THIS_MODULE; 2047 2047 entry->read_proc = viafb_dvp0_proc_read; 2048 2048 entry->write_proc = viafb_dvp0_proc_write; 2049 2049 } 2050 - entry = create_proc_entry("dvp1", 0, viafb_entry); 2050 + entry = create_proc_entry("dvp1", 0, *viafb_entry); 2051 2051 if (entry) { 2052 2052 entry->owner = THIS_MODULE; 2053 2053 entry->read_proc = viafb_dvp1_proc_read; 2054 2054 entry->write_proc = viafb_dvp1_proc_write; 2055 2055 } 2056 - entry = create_proc_entry("dfph", 0, viafb_entry); 2056 + entry = create_proc_entry("dfph", 0, *viafb_entry); 2057 2057 if (entry) { 2058 2058 entry->owner = THIS_MODULE; 2059 2059 entry->read_proc = viafb_dfph_proc_read; 2060 2060 entry->write_proc = viafb_dfph_proc_write; 2061 2061 } 2062 - entry = create_proc_entry("dfpl", 0, viafb_entry); 2062 + entry = create_proc_entry("dfpl", 0, *viafb_entry); 2063 2063 if (entry) { 2064 2064 entry->owner = THIS_MODULE; 2065 2065 entry->read_proc = viafb_dfpl_proc_read; ··· 2068 2068 if (VT1636_LVDS == viaparinfo->chip_info->lvds_chip_info. 2069 2069 lvds_chip_name || VT1636_LVDS == 2070 2070 viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name) { 2071 - entry = create_proc_entry("vt1636", 0, viafb_entry); 2071 + entry = create_proc_entry("vt1636", 0, *viafb_entry); 2072 2072 if (entry) { 2073 2073 entry->owner = THIS_MODULE; 2074 2074 entry->read_proc = viafb_vt1636_proc_read; ··· 2087 2087 remove_proc_entry("dfpl", viafb_entry); 2088 2088 remove_proc_entry("vt1636", viafb_entry); 2089 2089 remove_proc_entry("vt1625", viafb_entry); 2090 + remove_proc_entry("viafb", NULL); 2090 2091 } 2091 2092 2092 2093 static int __devinit via_pci_probe(void) ··· 2349 2348 viafbinfo->node, viafbinfo->fix.id, default_var.xres, 2350 2349 default_var.yres, default_var.bits_per_pixel); 2351 2350 2352 - viafb_init_proc(viaparinfo->proc_entry); 2351 + viafb_init_proc(&viaparinfo->proc_entry); 2353 2352 viafb_init_dac(IGA2); 2354 2353 return 0; 2355 2354 }
+5 -5
drivers/w1/masters/omap_hdq.c
··· 86 86 static u8 omap_w1_read_byte(void *_hdq); 87 87 static void omap_w1_write_byte(void *_hdq, u8 byte); 88 88 static u8 omap_w1_reset_bus(void *_hdq); 89 - static void omap_w1_search_bus(void *_hdq, u8 search_type, 90 - w1_slave_found_callback slave_found); 89 + static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev, 90 + u8 search_type, w1_slave_found_callback slave_found); 91 91 92 92 93 93 static struct w1_bus_master omap_w1_master = { ··· 231 231 } 232 232 233 233 /* W1 search callback function */ 234 - static void omap_w1_search_bus(void *_hdq, u8 search_type, 235 - w1_slave_found_callback slave_found) 234 + static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev, 235 + u8 search_type, w1_slave_found_callback slave_found) 236 236 { 237 237 u64 module_id, rn_le, cs, id; 238 238 ··· 249 249 cs = w1_calc_crc8((u8 *)&rn_le, 7); 250 250 id = (cs << 56) | module_id; 251 251 252 - slave_found(_hdq, id); 252 + slave_found(master_dev, id); 253 253 } 254 254 255 255 static int _omap_hdq_reset(struct hdq_data *hdq_data)
+5 -1
fs/cifs/CHANGES
··· 8 8 sends, and also let tcp autotune the socket send and receive buffers. 9 9 This reduces the number of EAGAIN errors returned by TCP/IP in 10 10 high stress workloads (and the number of retries on socket writes 11 - when sending large SMBWriteX requests). 11 + when sending large SMBWriteX requests). Fix case in which a portion of 12 + data can in some cases not get written to the file on the server before the 13 + file is closed. Fix DFS parsing to properly handle path consumed field, 14 + and to handle certain codepage conversions better. Fix mount and 15 + umount race that can cause oops in mount or umount or reconnect. 12 16 13 17 Version 1.54 14 18 ------------
+149 -128
fs/cifs/cifs_debug.c
··· 107 107 #ifdef CONFIG_PROC_FS 108 108 static int cifs_debug_data_proc_show(struct seq_file *m, void *v) 109 109 { 110 - struct list_head *tmp; 111 - struct list_head *tmp1; 110 + struct list_head *tmp1, *tmp2, *tmp3; 112 111 struct mid_q_entry *mid_entry; 112 + struct TCP_Server_Info *server; 113 113 struct cifsSesInfo *ses; 114 114 struct cifsTconInfo *tcon; 115 - int i; 115 + int i, j; 116 + __u32 dev_type; 116 117 117 118 seq_puts(m, 118 119 "Display Internal CIFS Data Structures for Debugging\n" ··· 123 122 seq_printf(m, "Servers:"); 124 123 125 124 i = 0; 126 - read_lock(&GlobalSMBSeslock); 127 - list_for_each(tmp, &GlobalSMBSessionList) { 125 + read_lock(&cifs_tcp_ses_lock); 126 + list_for_each(tmp1, &cifs_tcp_ses_list) { 127 + server = list_entry(tmp1, struct TCP_Server_Info, 128 + tcp_ses_list); 128 129 i++; 129 - ses = list_entry(tmp, struct cifsSesInfo, cifsSessionList); 130 - if ((ses->serverDomain == NULL) || (ses->serverOS == NULL) || 131 - (ses->serverNOS == NULL)) { 132 - seq_printf(m, "\nentry for %s not fully " 133 - "displayed\n\t", ses->serverName); 134 - } else { 135 - seq_printf(m, 136 - "\n%d) Name: %s Domain: %s Mounts: %d OS:" 137 - " %s \n\tNOS: %s\tCapability: 0x%x\n\tSMB" 130 + list_for_each(tmp2, &server->smb_ses_list) { 131 + ses = list_entry(tmp2, struct cifsSesInfo, 132 + smb_ses_list); 133 + if ((ses->serverDomain == NULL) || 134 + (ses->serverOS == NULL) || 135 + (ses->serverNOS == NULL)) { 136 + seq_printf(m, "\n%d) entry for %s not fully " 137 + "displayed\n\t", i, ses->serverName); 138 + } else { 139 + seq_printf(m, 140 + "\n%d) Name: %s Domain: %s Uses: %d OS:" 141 + " %s\n\tNOS: %s\tCapability: 0x%x\n\tSMB" 138 142 " session status: %d\t", 139 143 i, ses->serverName, ses->serverDomain, 140 - atomic_read(&ses->inUse), 141 - ses->serverOS, ses->serverNOS, 144 + ses->ses_count, ses->serverOS, ses->serverNOS, 142 145 ses->capabilities, ses->status); 143 - } 144 - if (ses->server) { 146 + } 145 147 seq_printf(m, "TCP status: %d\n\tLocal Users To " 146 - "Server: %d SecMode: 0x%x Req On Wire: %d", 147 - ses->server->tcpStatus, 148 - atomic_read(&ses->server->socketUseCount), 149 - ses->server->secMode, 150 - atomic_read(&ses->server->inFlight)); 148 + "Server: %d SecMode: 0x%x Req On Wire: %d", 149 + server->tcpStatus, server->srv_count, 150 + server->secMode, 151 + atomic_read(&server->inFlight)); 151 152 152 153 #ifdef CONFIG_CIFS_STATS2 153 154 seq_printf(m, " In Send: %d In MaxReq Wait: %d", 154 - atomic_read(&ses->server->inSend), 155 - atomic_read(&ses->server->num_waiters)); 155 + atomic_read(&server->inSend), 156 + atomic_read(&server->num_waiters)); 156 157 #endif 157 158 158 - seq_puts(m, "\nMIDs:\n"); 159 + seq_puts(m, "\n\tShares:"); 160 + j = 0; 161 + list_for_each(tmp3, &ses->tcon_list) { 162 + tcon = list_entry(tmp3, struct cifsTconInfo, 163 + tcon_list); 164 + ++j; 165 + dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType); 166 + seq_printf(m, "\n\t%d) %s Mounts: %d ", j, 167 + tcon->treeName, tcon->tc_count); 168 + if (tcon->nativeFileSystem) { 169 + seq_printf(m, "Type: %s ", 170 + tcon->nativeFileSystem); 171 + } 172 + seq_printf(m, "DevInfo: 0x%x Attributes: 0x%x" 173 + "\nPathComponentMax: %d Status: 0x%d", 174 + le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics), 175 + le32_to_cpu(tcon->fsAttrInfo.Attributes), 176 + le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength), 177 + tcon->tidStatus); 178 + if (dev_type == FILE_DEVICE_DISK) 179 + seq_puts(m, " type: DISK "); 180 + else if (dev_type == FILE_DEVICE_CD_ROM) 181 + seq_puts(m, " type: CDROM "); 182 + else 183 + seq_printf(m, " type: %d ", dev_type); 184 + 185 + if (tcon->need_reconnect) 186 + seq_puts(m, "\tDISCONNECTED "); 187 + seq_putc(m, '\n'); 188 + } 189 + 190 + seq_puts(m, "\n\tMIDs:\n"); 159 191 160 192 spin_lock(&GlobalMid_Lock); 161 - list_for_each(tmp1, &ses->server->pending_mid_q) { 162 - mid_entry = list_entry(tmp1, struct 163 - mid_q_entry, 193 + list_for_each(tmp3, &server->pending_mid_q) { 194 + mid_entry = list_entry(tmp3, struct mid_q_entry, 164 195 qhead); 165 - seq_printf(m, "State: %d com: %d pid:" 196 + seq_printf(m, "\tState: %d com: %d pid:" 166 197 " %d tsk: %p mid %d\n", 167 198 mid_entry->midState, 168 199 (int)mid_entry->command, ··· 204 171 } 205 172 spin_unlock(&GlobalMid_Lock); 206 173 } 207 - 208 174 } 209 - read_unlock(&GlobalSMBSeslock); 210 - seq_putc(m, '\n'); 211 - 212 - seq_puts(m, "Shares:"); 213 - 214 - i = 0; 215 - read_lock(&GlobalSMBSeslock); 216 - list_for_each(tmp, &GlobalTreeConnectionList) { 217 - __u32 dev_type; 218 - i++; 219 - tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList); 220 - dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType); 221 - seq_printf(m, "\n%d) %s Uses: %d ", i, 222 - tcon->treeName, atomic_read(&tcon->useCount)); 223 - if (tcon->nativeFileSystem) { 224 - seq_printf(m, "Type: %s ", 225 - tcon->nativeFileSystem); 226 - } 227 - seq_printf(m, "DevInfo: 0x%x Attributes: 0x%x" 228 - "\nPathComponentMax: %d Status: %d", 229 - le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics), 230 - le32_to_cpu(tcon->fsAttrInfo.Attributes), 231 - le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength), 232 - tcon->tidStatus); 233 - if (dev_type == FILE_DEVICE_DISK) 234 - seq_puts(m, " type: DISK "); 235 - else if (dev_type == FILE_DEVICE_CD_ROM) 236 - seq_puts(m, " type: CDROM "); 237 - else 238 - seq_printf(m, " type: %d ", dev_type); 239 - 240 - if (tcon->tidStatus == CifsNeedReconnect) 241 - seq_puts(m, "\tDISCONNECTED "); 242 - } 243 - read_unlock(&GlobalSMBSeslock); 244 - 175 + read_unlock(&cifs_tcp_ses_lock); 245 176 seq_putc(m, '\n'); 246 177 247 178 /* BB add code to dump additional info such as TCP session info now */ ··· 231 234 { 232 235 char c; 233 236 int rc; 234 - struct list_head *tmp; 237 + struct list_head *tmp1, *tmp2, *tmp3; 238 + struct TCP_Server_Info *server; 239 + struct cifsSesInfo *ses; 235 240 struct cifsTconInfo *tcon; 236 241 237 242 rc = get_user(c, buffer); ··· 241 242 return rc; 242 243 243 244 if (c == '1' || c == 'y' || c == 'Y' || c == '0') { 244 - read_lock(&GlobalSMBSeslock); 245 245 #ifdef CONFIG_CIFS_STATS2 246 246 atomic_set(&totBufAllocCount, 0); 247 247 atomic_set(&totSmBufAllocCount, 0); 248 248 #endif /* CONFIG_CIFS_STATS2 */ 249 - list_for_each(tmp, &GlobalTreeConnectionList) { 250 - tcon = list_entry(tmp, struct cifsTconInfo, 251 - cifsConnectionList); 252 - atomic_set(&tcon->num_smbs_sent, 0); 253 - atomic_set(&tcon->num_writes, 0); 254 - atomic_set(&tcon->num_reads, 0); 255 - atomic_set(&tcon->num_oplock_brks, 0); 256 - atomic_set(&tcon->num_opens, 0); 257 - atomic_set(&tcon->num_closes, 0); 258 - atomic_set(&tcon->num_deletes, 0); 259 - atomic_set(&tcon->num_mkdirs, 0); 260 - atomic_set(&tcon->num_rmdirs, 0); 261 - atomic_set(&tcon->num_renames, 0); 262 - atomic_set(&tcon->num_t2renames, 0); 263 - atomic_set(&tcon->num_ffirst, 0); 264 - atomic_set(&tcon->num_fnext, 0); 265 - atomic_set(&tcon->num_fclose, 0); 266 - atomic_set(&tcon->num_hardlinks, 0); 267 - atomic_set(&tcon->num_symlinks, 0); 268 - atomic_set(&tcon->num_locks, 0); 249 + read_lock(&cifs_tcp_ses_lock); 250 + list_for_each(tmp1, &cifs_tcp_ses_list) { 251 + server = list_entry(tmp1, struct TCP_Server_Info, 252 + tcp_ses_list); 253 + list_for_each(tmp2, &server->smb_ses_list) { 254 + ses = list_entry(tmp2, struct cifsSesInfo, 255 + smb_ses_list); 256 + list_for_each(tmp3, &ses->tcon_list) { 257 + tcon = list_entry(tmp3, 258 + struct cifsTconInfo, 259 + tcon_list); 260 + atomic_set(&tcon->num_smbs_sent, 0); 261 + atomic_set(&tcon->num_writes, 0); 262 + atomic_set(&tcon->num_reads, 0); 263 + atomic_set(&tcon->num_oplock_brks, 0); 264 + atomic_set(&tcon->num_opens, 0); 265 + atomic_set(&tcon->num_closes, 0); 266 + atomic_set(&tcon->num_deletes, 0); 267 + atomic_set(&tcon->num_mkdirs, 0); 268 + atomic_set(&tcon->num_rmdirs, 0); 269 + atomic_set(&tcon->num_renames, 0); 270 + atomic_set(&tcon->num_t2renames, 0); 271 + atomic_set(&tcon->num_ffirst, 0); 272 + atomic_set(&tcon->num_fnext, 0); 273 + atomic_set(&tcon->num_fclose, 0); 274 + atomic_set(&tcon->num_hardlinks, 0); 275 + atomic_set(&tcon->num_symlinks, 0); 276 + atomic_set(&tcon->num_locks, 0); 277 + } 278 + } 269 279 } 270 - read_unlock(&GlobalSMBSeslock); 280 + read_unlock(&cifs_tcp_ses_lock); 271 281 } 272 282 273 283 return count; ··· 285 277 static int cifs_stats_proc_show(struct seq_file *m, void *v) 286 278 { 287 279 int i; 288 - struct list_head *tmp; 280 + struct list_head *tmp1, *tmp2, *tmp3; 281 + struct TCP_Server_Info *server; 282 + struct cifsSesInfo *ses; 289 283 struct cifsTconInfo *tcon; 290 284 291 285 seq_printf(m, ··· 316 306 GlobalCurrentXid, GlobalMaxActiveXid); 317 307 318 308 i = 0; 319 - read_lock(&GlobalSMBSeslock); 320 - list_for_each(tmp, &GlobalTreeConnectionList) { 321 - i++; 322 - tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList); 323 - seq_printf(m, "\n%d) %s", i, tcon->treeName); 324 - if (tcon->tidStatus == CifsNeedReconnect) 325 - seq_puts(m, "\tDISCONNECTED "); 326 - seq_printf(m, "\nSMBs: %d Oplock Breaks: %d", 327 - atomic_read(&tcon->num_smbs_sent), 328 - atomic_read(&tcon->num_oplock_brks)); 329 - seq_printf(m, "\nReads: %d Bytes: %lld", 330 - atomic_read(&tcon->num_reads), 331 - (long long)(tcon->bytes_read)); 332 - seq_printf(m, "\nWrites: %d Bytes: %lld", 333 - atomic_read(&tcon->num_writes), 334 - (long long)(tcon->bytes_written)); 335 - seq_printf(m, 336 - "\nLocks: %d HardLinks: %d Symlinks: %d", 337 - atomic_read(&tcon->num_locks), 338 - atomic_read(&tcon->num_hardlinks), 339 - atomic_read(&tcon->num_symlinks)); 340 - 341 - seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d", 342 - atomic_read(&tcon->num_opens), 343 - atomic_read(&tcon->num_closes), 344 - atomic_read(&tcon->num_deletes)); 345 - seq_printf(m, "\nMkdirs: %d Rmdirs: %d", 346 - atomic_read(&tcon->num_mkdirs), 347 - atomic_read(&tcon->num_rmdirs)); 348 - seq_printf(m, "\nRenames: %d T2 Renames %d", 349 - atomic_read(&tcon->num_renames), 350 - atomic_read(&tcon->num_t2renames)); 351 - seq_printf(m, "\nFindFirst: %d FNext %d FClose %d", 352 - atomic_read(&tcon->num_ffirst), 353 - atomic_read(&tcon->num_fnext), 354 - atomic_read(&tcon->num_fclose)); 309 + read_lock(&cifs_tcp_ses_lock); 310 + list_for_each(tmp1, &cifs_tcp_ses_list) { 311 + server = list_entry(tmp1, struct TCP_Server_Info, 312 + tcp_ses_list); 313 + list_for_each(tmp2, &server->smb_ses_list) { 314 + ses = list_entry(tmp2, struct cifsSesInfo, 315 + smb_ses_list); 316 + list_for_each(tmp3, &ses->tcon_list) { 317 + tcon = list_entry(tmp3, 318 + struct cifsTconInfo, 319 + tcon_list); 320 + i++; 321 + seq_printf(m, "\n%d) %s", i, tcon->treeName); 322 + if (tcon->need_reconnect) 323 + seq_puts(m, "\tDISCONNECTED "); 324 + seq_printf(m, "\nSMBs: %d Oplock Breaks: %d", 325 + atomic_read(&tcon->num_smbs_sent), 326 + atomic_read(&tcon->num_oplock_brks)); 327 + seq_printf(m, "\nReads: %d Bytes: %lld", 328 + atomic_read(&tcon->num_reads), 329 + (long long)(tcon->bytes_read)); 330 + seq_printf(m, "\nWrites: %d Bytes: %lld", 331 + atomic_read(&tcon->num_writes), 332 + (long long)(tcon->bytes_written)); 333 + seq_printf(m, "\nLocks: %d HardLinks: %d " 334 + "Symlinks: %d", 335 + atomic_read(&tcon->num_locks), 336 + atomic_read(&tcon->num_hardlinks), 337 + atomic_read(&tcon->num_symlinks)); 338 + seq_printf(m, "\nOpens: %d Closes: %d" 339 + "Deletes: %d", 340 + atomic_read(&tcon->num_opens), 341 + atomic_read(&tcon->num_closes), 342 + atomic_read(&tcon->num_deletes)); 343 + seq_printf(m, "\nMkdirs: %d Rmdirs: %d", 344 + atomic_read(&tcon->num_mkdirs), 345 + atomic_read(&tcon->num_rmdirs)); 346 + seq_printf(m, "\nRenames: %d T2 Renames %d", 347 + atomic_read(&tcon->num_renames), 348 + atomic_read(&tcon->num_t2renames)); 349 + seq_printf(m, "\nFindFirst: %d FNext %d " 350 + "FClose %d", 351 + atomic_read(&tcon->num_ffirst), 352 + atomic_read(&tcon->num_fnext), 353 + atomic_read(&tcon->num_fclose)); 354 + } 355 + } 355 356 } 356 - read_unlock(&GlobalSMBSeslock); 357 + read_unlock(&cifs_tcp_ses_lock); 357 358 358 359 seq_putc(m, '\n'); 359 360 return 0;
+47 -24
fs/cifs/cifs_dfs_ref.c
··· 106 106 /** 107 107 * compose_mount_options - creates mount options for refferral 108 108 * @sb_mountdata: parent/root DFS mount options (template) 109 - * @ref_unc: refferral server UNC 109 + * @dentry: point where we are going to mount 110 + * @ref: server's referral 110 111 * @devname: pointer for saving device name 111 112 * 112 113 * creates mount options for submount based on template options sb_mountdata ··· 117 116 * Caller is responcible for freeing retunrned value if it is not error. 118 117 */ 119 118 static char *compose_mount_options(const char *sb_mountdata, 120 - const char *ref_unc, 119 + struct dentry *dentry, 120 + const struct dfs_info3_param *ref, 121 121 char **devname) 122 122 { 123 123 int rc; ··· 128 126 char *srvIP = NULL; 129 127 char sep = ','; 130 128 int off, noff; 129 + char *fullpath; 131 130 132 131 if (sb_mountdata == NULL) 133 132 return ERR_PTR(-EINVAL); 134 133 135 - *devname = cifs_get_share_name(ref_unc); 134 + *devname = cifs_get_share_name(ref->node_name); 136 135 rc = dns_resolve_server_name_to_ip(*devname, &srvIP); 137 136 if (rc != 0) { 138 137 cERROR(1, ("%s: Failed to resolve server part of %s to IP", ··· 141 138 mountdata = ERR_PTR(rc); 142 139 goto compose_mount_options_out; 143 140 } 144 - md_len = strlen(sb_mountdata) + strlen(srvIP) + strlen(ref_unc) + 3; 141 + /* md_len = strlen(...) + 12 for 'sep+prefixpath=' 142 + * assuming that we have 'unc=' and 'ip=' in 143 + * the original sb_mountdata 144 + */ 145 + md_len = strlen(sb_mountdata) + strlen(srvIP) + 146 + strlen(ref->node_name) + 12; 145 147 mountdata = kzalloc(md_len+1, GFP_KERNEL); 146 148 if (mountdata == NULL) { 147 149 mountdata = ERR_PTR(-ENOMEM); ··· 160 152 strncpy(mountdata, sb_mountdata, 5); 161 153 off += 5; 162 154 } 163 - while ((tkn_e = strchr(sb_mountdata+off, sep))) { 164 - noff = (tkn_e - (sb_mountdata+off)) + 1; 165 - if (strnicmp(sb_mountdata+off, "unc=", 4) == 0) { 155 + 156 + do { 157 + tkn_e = strchr(sb_mountdata + off, sep); 158 + if (tkn_e == NULL) 159 + noff = strlen(sb_mountdata + off); 160 + else 161 + noff = tkn_e - (sb_mountdata + off) + 1; 162 + 163 + if (strnicmp(sb_mountdata + off, "unc=", 4) == 0) { 166 164 off += noff; 167 165 continue; 168 166 } 169 - if (strnicmp(sb_mountdata+off, "ip=", 3) == 0) { 167 + if (strnicmp(sb_mountdata + off, "ip=", 3) == 0) { 170 168 off += noff; 171 169 continue; 172 170 } 173 - if (strnicmp(sb_mountdata+off, "prefixpath=", 3) == 0) { 171 + if (strnicmp(sb_mountdata + off, "prefixpath=", 11) == 0) { 174 172 off += noff; 175 173 continue; 176 174 } 177 - strncat(mountdata, sb_mountdata+off, noff); 175 + strncat(mountdata, sb_mountdata + off, noff); 178 176 off += noff; 179 - } 180 - strcat(mountdata, sb_mountdata+off); 177 + } while (tkn_e); 178 + strcat(mountdata, sb_mountdata + off); 181 179 mountdata[md_len] = '\0'; 182 180 183 181 /* copy new IP and ref share name */ 184 - strcat(mountdata, ",ip="); 182 + if (mountdata[strlen(mountdata) - 1] != sep) 183 + strncat(mountdata, &sep, 1); 184 + strcat(mountdata, "ip="); 185 185 strcat(mountdata, srvIP); 186 - strcat(mountdata, ",unc="); 186 + strncat(mountdata, &sep, 1); 187 + strcat(mountdata, "unc="); 187 188 strcat(mountdata, *devname); 188 189 189 190 /* find & copy prefixpath */ 190 - tkn_e = strchr(ref_unc+2, '\\'); 191 - if (tkn_e) { 192 - tkn_e = strchr(tkn_e+1, '\\'); 193 - if (tkn_e) { 194 - strcat(mountdata, ",prefixpath="); 195 - strcat(mountdata, tkn_e+1); 196 - } 191 + tkn_e = strchr(ref->node_name + 2, '\\'); 192 + if (tkn_e == NULL) /* invalid unc, missing share name*/ 193 + goto compose_mount_options_out; 194 + 195 + fullpath = build_path_from_dentry(dentry); 196 + tkn_e = strchr(tkn_e + 1, '\\'); 197 + if (tkn_e || strlen(fullpath) - (ref->path_consumed)) { 198 + strncat(mountdata, &sep, 1); 199 + strcat(mountdata, "prefixpath="); 200 + if (tkn_e) 201 + strcat(mountdata, tkn_e + 1); 202 + strcat(mountdata, fullpath + (ref->path_consumed)); 197 203 } 204 + kfree(fullpath); 198 205 199 206 /*cFYI(1,("%s: parent mountdata: %s", __func__,sb_mountdata));*/ 200 207 /*cFYI(1, ("%s: submount mountdata: %s", __func__, mountdata ));*/ ··· 221 198 222 199 223 200 static struct vfsmount *cifs_dfs_do_refmount(const struct vfsmount *mnt_parent, 224 - struct dentry *dentry, char *ref_unc) 201 + struct dentry *dentry, const struct dfs_info3_param *ref) 225 202 { 226 203 struct cifs_sb_info *cifs_sb; 227 204 struct vfsmount *mnt; ··· 230 207 231 208 cifs_sb = CIFS_SB(dentry->d_inode->i_sb); 232 209 mountdata = compose_mount_options(cifs_sb->mountdata, 233 - ref_unc, &devname); 210 + dentry, ref, &devname); 234 211 235 212 if (IS_ERR(mountdata)) 236 213 return (struct vfsmount *)mountdata; ··· 333 310 } 334 311 mnt = cifs_dfs_do_refmount(nd->path.mnt, 335 312 nd->path.dentry, 336 - referrals[i].node_name); 313 + referrals + i); 337 314 cFYI(1, ("%s: cifs_dfs_do_refmount:%s , mnt:%p", 338 315 __func__, 339 316 referrals[i].node_name, mnt));
+2 -2
fs/cifs/cifs_spnego.c
··· 73 73 * strlen(";sec=ntlmsspi") */ 74 74 #define MAX_MECH_STR_LEN 13 75 75 76 - /* max possible addr len eg FEDC:BA98:7654:3210:FEDC:BA98:7654:3210/60 */ 77 - #define MAX_IPV6_ADDR_LEN 42 76 + /* max possible addr len eg FEDC:BA98:7654:3210:FEDC:BA98:7654:3210/128 */ 77 + #define MAX_IPV6_ADDR_LEN 43 78 78 79 79 /* strlen of "host=" */ 80 80 #define HOST_KEY_LEN 5
+15 -15
fs/cifs/cifsfs.c
··· 514 514 tcon = cifs_sb->tcon; 515 515 if (tcon == NULL) 516 516 return; 517 - down(&tcon->tconSem); 518 - if (atomic_read(&tcon->useCount) == 1) 517 + 518 + read_lock(&cifs_tcp_ses_lock); 519 + if (tcon->tc_count == 1) 519 520 tcon->tidStatus = CifsExiting; 520 - up(&tcon->tconSem); 521 + read_unlock(&cifs_tcp_ses_lock); 521 522 522 523 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ 523 524 /* cancel_notify_requests(tcon); */ ··· 1014 1013 not bother sending an oplock release if session 1015 1014 to server still is disconnected since oplock 1016 1015 already released by the server in that case */ 1017 - if (pTcon->tidStatus != CifsNeedReconnect) { 1016 + if (!pTcon->need_reconnect) { 1018 1017 rc = CIFSSMBLock(0, pTcon, netfid, 1019 1018 0 /* len */ , 0 /* offset */, 0, 1020 1019 0, LOCKING_ANDX_OPLOCK_RELEASE, ··· 1032 1031 static int cifs_dnotify_thread(void *dummyarg) 1033 1032 { 1034 1033 struct list_head *tmp; 1035 - struct cifsSesInfo *ses; 1034 + struct TCP_Server_Info *server; 1036 1035 1037 1036 do { 1038 1037 if (try_to_freeze()) 1039 1038 continue; 1040 1039 set_current_state(TASK_INTERRUPTIBLE); 1041 1040 schedule_timeout(15*HZ); 1042 - read_lock(&GlobalSMBSeslock); 1043 1041 /* check if any stuck requests that need 1044 1042 to be woken up and wakeq so the 1045 1043 thread can wake up and error out */ 1046 - list_for_each(tmp, &GlobalSMBSessionList) { 1047 - ses = list_entry(tmp, struct cifsSesInfo, 1048 - cifsSessionList); 1049 - if (ses->server && atomic_read(&ses->server->inFlight)) 1050 - wake_up_all(&ses->server->response_q); 1044 + read_lock(&cifs_tcp_ses_lock); 1045 + list_for_each(tmp, &cifs_tcp_ses_list) { 1046 + server = list_entry(tmp, struct TCP_Server_Info, 1047 + tcp_ses_list); 1048 + if (atomic_read(&server->inFlight)) 1049 + wake_up_all(&server->response_q); 1051 1050 } 1052 - read_unlock(&GlobalSMBSeslock); 1051 + read_unlock(&cifs_tcp_ses_lock); 1053 1052 } while (!kthread_should_stop()); 1054 1053 1055 1054 return 0; ··· 1060 1059 { 1061 1060 int rc = 0; 1062 1061 cifs_proc_init(); 1063 - /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */ 1064 - INIT_LIST_HEAD(&GlobalSMBSessionList); 1065 - INIT_LIST_HEAD(&GlobalTreeConnectionList); 1062 + INIT_LIST_HEAD(&cifs_tcp_ses_list); 1066 1063 INIT_LIST_HEAD(&GlobalOplock_Q); 1067 1064 #ifdef CONFIG_CIFS_EXPERIMENTAL 1068 1065 INIT_LIST_HEAD(&GlobalDnotifyReqList); ··· 1088 1089 GlobalMaxActiveXid = 0; 1089 1090 memset(Local_System_Name, 0, 15); 1090 1091 rwlock_init(&GlobalSMBSeslock); 1092 + rwlock_init(&cifs_tcp_ses_lock); 1091 1093 spin_lock_init(&GlobalMid_Lock); 1092 1094 1093 1095 if (cifs_max_pending < 2) {
+30 -19
fs/cifs/cifsglob.h
··· 85 85 }; 86 86 87 87 enum protocolEnum { 88 - IPV4 = 0, 89 - IPV6, 88 + TCP = 0, 90 89 SCTP 91 90 /* Netbios frames protocol not supported at this time */ 92 91 }; ··· 121 122 */ 122 123 123 124 struct TCP_Server_Info { 125 + struct list_head tcp_ses_list; 126 + struct list_head smb_ses_list; 127 + int srv_count; /* reference counter */ 124 128 /* 15 character server name + 0x20 16th byte indicating type = srv */ 125 129 char server_RFC1001_name[SERVER_NAME_LEN_WITH_NULL]; 126 130 char unicode_server_Name[SERVER_NAME_LEN_WITH_NULL * 2]; ··· 145 143 bool svlocal:1; /* local server or remote */ 146 144 bool noblocksnd; /* use blocking sendmsg */ 147 145 bool noautotune; /* do not autotune send buf sizes */ 148 - atomic_t socketUseCount; /* number of open cifs sessions on socket */ 149 146 atomic_t inFlight; /* number of requests on the wire to server */ 150 147 #ifdef CONFIG_CIFS_STATS2 151 148 atomic_t inSend; /* requests trying to send */ ··· 195 194 * Session structure. One of these for each uid session with a particular host 196 195 */ 197 196 struct cifsSesInfo { 198 - struct list_head cifsSessionList; 197 + struct list_head smb_ses_list; 198 + struct list_head tcon_list; 199 199 struct semaphore sesSem; 200 200 #if 0 201 201 struct cifsUidInfo *uidInfo; /* pointer to user info */ 202 202 #endif 203 203 struct TCP_Server_Info *server; /* pointer to server info */ 204 - atomic_t inUse; /* # of mounts (tree connections) on this ses */ 204 + int ses_count; /* reference counter */ 205 205 enum statusEnum status; 206 206 unsigned overrideSecFlg; /* if non-zero override global sec flags */ 207 207 __u16 ipc_tid; /* special tid for connection to IPC share */ ··· 218 216 char userName[MAX_USERNAME_SIZE + 1]; 219 217 char *domainName; 220 218 char *password; 219 + bool need_reconnect:1; /* connection reset, uid now invalid */ 221 220 }; 222 221 /* no more than one of the following three session flags may be set */ 223 222 #define CIFS_SES_NT4 1 ··· 233 230 * session 234 231 */ 235 232 struct cifsTconInfo { 236 - struct list_head cifsConnectionList; 233 + struct list_head tcon_list; 234 + int tc_count; 237 235 struct list_head openFileList; 238 - struct semaphore tconSem; 239 236 struct cifsSesInfo *ses; /* pointer to session associated with */ 240 237 char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */ 241 238 char *nativeFileSystem; 242 239 __u16 tid; /* The 2 byte tree id */ 243 240 __u16 Flags; /* optional support bits */ 244 241 enum statusEnum tidStatus; 245 - atomic_t useCount; /* how many explicit/implicit mounts to share */ 246 242 #ifdef CONFIG_CIFS_STATS 247 243 atomic_t num_smbs_sent; 248 244 atomic_t num_writes; ··· 290 288 bool unix_ext:1; /* if false disable Linux extensions to CIFS protocol 291 289 for this mount even if server would support */ 292 290 bool local_lease:1; /* check leases (only) on local system not remote */ 291 + bool need_reconnect:1; /* connection reset, tid now invalid */ 293 292 /* BB add field for back pointer to sb struct(s)? */ 294 293 }; 295 294 ··· 591 588 #endif 592 589 593 590 /* 594 - * The list of servers that did not respond with NT LM 0.12. 595 - * This list helps improve performance and eliminate the messages indicating 596 - * that we had a communications error talking to the server in this list. 591 + * the list of TCP_Server_Info structures, ie each of the sockets 592 + * connecting our client to a distinct server (ip address), is 593 + * chained together by cifs_tcp_ses_list. The list of all our SMB 594 + * sessions (and from that the tree connections) can be found 595 + * by iterating over cifs_tcp_ses_list 597 596 */ 598 - /* Feature not supported */ 599 - /* GLOBAL_EXTERN struct servers_not_supported *NotSuppList; */ 597 + GLOBAL_EXTERN struct list_head cifs_tcp_ses_list; 600 598 601 599 /* 602 - * The following is a hash table of all the users we know about. 600 + * This lock protects the cifs_tcp_ses_list, the list of smb sessions per 601 + * tcp session, and the list of tcon's per smb session. It also protects 602 + * the reference counters for the server, smb session, and tcon. Finally, 603 + * changes to the tcon->tidStatus should be done while holding this lock. 603 604 */ 604 - GLOBAL_EXTERN struct smbUidInfo *GlobalUidList[UID_HASH]; 605 + GLOBAL_EXTERN rwlock_t cifs_tcp_ses_lock; 605 606 606 - /* GLOBAL_EXTERN struct list_head GlobalServerList; BB not implemented yet */ 607 - GLOBAL_EXTERN struct list_head GlobalSMBSessionList; 608 - GLOBAL_EXTERN struct list_head GlobalTreeConnectionList; 609 - GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; /* protects list inserts on 3 above */ 607 + /* 608 + * This lock protects the cifs_file->llist and cifs_file->flist 609 + * list operations, and updates to some flags (cifs_file->invalidHandle) 610 + * It will be moved to either use the tcon->stat_lock or equivalent later. 611 + * If cifs_tcp_ses_lock and the lock below are both needed to be held, then 612 + * the cifs_tcp_ses_lock must be grabbed first and released last. 613 + */ 614 + GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; 610 615 611 616 GLOBAL_EXTERN struct list_head GlobalOplock_Q; 612 617
+72 -62
fs/cifs/cifssmb.c
··· 190 190 /* need to prevent multiple threads trying to 191 191 simultaneously reconnect the same SMB session */ 192 192 down(&tcon->ses->sesSem); 193 - if (tcon->ses->status == CifsNeedReconnect) 193 + if (tcon->ses->need_reconnect) 194 194 rc = cifs_setup_session(0, tcon->ses, 195 195 nls_codepage); 196 - if (!rc && (tcon->tidStatus == CifsNeedReconnect)) { 196 + if (!rc && (tcon->need_reconnect)) { 197 197 mark_open_files_invalid(tcon); 198 198 rc = CIFSTCon(0, tcon->ses, tcon->treeName, 199 199 tcon, nls_codepage); ··· 337 337 /* need to prevent multiple threads trying to 338 338 simultaneously reconnect the same SMB session */ 339 339 down(&tcon->ses->sesSem); 340 - if (tcon->ses->status == CifsNeedReconnect) 340 + if (tcon->ses->need_reconnect) 341 341 rc = cifs_setup_session(0, tcon->ses, 342 342 nls_codepage); 343 - if (!rc && (tcon->tidStatus == CifsNeedReconnect)) { 343 + if (!rc && (tcon->need_reconnect)) { 344 344 mark_open_files_invalid(tcon); 345 345 rc = CIFSTCon(0, tcon->ses, tcon->treeName, 346 346 tcon, nls_codepage); ··· 664 664 rc = -EIO; 665 665 goto neg_err_exit; 666 666 } 667 - 668 - if (server->socketUseCount.counter > 1) { 667 + read_lock(&cifs_tcp_ses_lock); 668 + if (server->srv_count > 1) { 669 + read_unlock(&cifs_tcp_ses_lock); 669 670 if (memcmp(server->server_GUID, 670 671 pSMBr->u.extended_response. 671 672 GUID, 16) != 0) { ··· 675 674 pSMBr->u.extended_response.GUID, 676 675 16); 677 676 } 678 - } else 677 + } else { 678 + read_unlock(&cifs_tcp_ses_lock); 679 679 memcpy(server->server_GUID, 680 680 pSMBr->u.extended_response.GUID, 16); 681 + } 681 682 682 683 if (count == 16) { 683 684 server->secType = RawNTLMSSP; ··· 742 739 int rc = 0; 743 740 744 741 cFYI(1, ("In tree disconnect")); 742 + 743 + /* BB: do we need to check this? These should never be NULL. */ 744 + if ((tcon->ses == NULL) || (tcon->ses->server == NULL)) 745 + return -EIO; 746 + 745 747 /* 746 - * If last user of the connection and 747 - * connection alive - disconnect it 748 - * If this is the last connection on the server session disconnect it 749 - * (and inside session disconnect we should check if tcp socket needs 750 - * to be freed and kernel thread woken up). 748 + * No need to return error on this operation if tid invalidated and 749 + * closed on server already e.g. due to tcp session crashing. Also, 750 + * the tcon is no longer on the list, so no need to take lock before 751 + * checking this. 751 752 */ 752 - if (tcon) 753 - down(&tcon->tconSem); 754 - else 755 - return -EIO; 756 - 757 - atomic_dec(&tcon->useCount); 758 - if (atomic_read(&tcon->useCount) > 0) { 759 - up(&tcon->tconSem); 760 - return -EBUSY; 761 - } 762 - 763 - /* No need to return error on this operation if tid invalidated and 764 - closed on server already e.g. due to tcp session crashing */ 765 - if (tcon->tidStatus == CifsNeedReconnect) { 766 - up(&tcon->tconSem); 753 + if (tcon->need_reconnect) 767 754 return 0; 768 - } 769 755 770 - if ((tcon->ses == NULL) || (tcon->ses->server == NULL)) { 771 - up(&tcon->tconSem); 772 - return -EIO; 773 - } 774 756 rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon, 775 757 (void **)&smb_buffer); 776 - if (rc) { 777 - up(&tcon->tconSem); 758 + if (rc) 778 759 return rc; 779 - } 780 760 781 761 rc = SendReceiveNoRsp(xid, tcon->ses, smb_buffer, 0); 782 762 if (rc) 783 763 cFYI(1, ("Tree disconnect failed %d", rc)); 784 764 785 - up(&tcon->tconSem); 786 - 787 765 /* No need to return error on this operation if tid invalidated and 788 - closed on server already e.g. due to tcp session crashing */ 766 + closed on server already e.g. due to tcp session crashing */ 789 767 if (rc == -EAGAIN) 790 768 rc = 0; 791 769 ··· 780 796 int rc = 0; 781 797 782 798 cFYI(1, ("In SMBLogoff for session disconnect")); 783 - if (ses) 784 - down(&ses->sesSem); 785 - else 799 + 800 + /* 801 + * BB: do we need to check validity of ses and server? They should 802 + * always be valid since we have an active reference. If not, that 803 + * should probably be a BUG() 804 + */ 805 + if (!ses || !ses->server) 786 806 return -EIO; 787 807 788 - atomic_dec(&ses->inUse); 789 - if (atomic_read(&ses->inUse) > 0) { 790 - up(&ses->sesSem); 791 - return -EBUSY; 792 - } 808 + down(&ses->sesSem); 809 + if (ses->need_reconnect) 810 + goto session_already_dead; /* no need to send SMBlogoff if uid 811 + already closed due to reconnect */ 793 812 rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB); 794 813 if (rc) { 795 814 up(&ses->sesSem); 796 815 return rc; 797 816 } 798 817 799 - if (ses->server) { 800 - pSMB->hdr.Mid = GetNextMid(ses->server); 818 + pSMB->hdr.Mid = GetNextMid(ses->server); 801 819 802 - if (ses->server->secMode & 820 + if (ses->server->secMode & 803 821 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 804 822 pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 805 - } 806 823 807 824 pSMB->hdr.Uid = ses->Suid; 808 825 809 826 pSMB->AndXCommand = 0xFF; 810 827 rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0); 811 - if (ses->server) { 812 - atomic_dec(&ses->server->socketUseCount); 813 - if (atomic_read(&ses->server->socketUseCount) == 0) { 814 - spin_lock(&GlobalMid_Lock); 815 - ses->server->tcpStatus = CifsExiting; 816 - spin_unlock(&GlobalMid_Lock); 817 - rc = -ESHUTDOWN; 818 - } 819 - } 828 + session_already_dead: 820 829 up(&ses->sesSem); 821 830 822 831 /* if session dead then we do not need to do ulogoff, ··· 3899 3922 return rc; 3900 3923 } 3901 3924 3925 + /* computes length of UCS string converted to host codepage 3926 + * @src: UCS string 3927 + * @maxlen: length of the input string in UCS characters 3928 + * (not in bytes) 3929 + * 3930 + * return: size of input string in host codepage 3931 + */ 3932 + static int hostlen_fromUCS(const __le16 *src, const int maxlen, 3933 + const struct nls_table *nls_codepage) { 3934 + int i; 3935 + int hostlen = 0; 3936 + char to[4]; 3937 + int charlen; 3938 + for (i = 0; (i < maxlen) && src[i]; ++i) { 3939 + charlen = nls_codepage->uni2char(le16_to_cpu(src[i]), 3940 + to, NLS_MAX_CHARSET_SIZE); 3941 + hostlen += charlen > 0 ? charlen : 1; 3942 + } 3943 + return hostlen; 3944 + } 3945 + 3902 3946 /* parses DFS refferal V3 structure 3903 3947 * caller is responsible for freeing target_nodes 3904 3948 * returns: ··· 3930 3932 parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr, 3931 3933 unsigned int *num_of_nodes, 3932 3934 struct dfs_info3_param **target_nodes, 3933 - const struct nls_table *nls_codepage) 3935 + const struct nls_table *nls_codepage, int remap, 3936 + const char *searchName) 3934 3937 { 3935 3938 int i, rc = 0; 3936 3939 char *data_end; ··· 3982 3983 struct dfs_info3_param *node = (*target_nodes)+i; 3983 3984 3984 3985 node->flags = le16_to_cpu(pSMBr->DFSFlags); 3985 - node->path_consumed = le16_to_cpu(pSMBr->PathConsumed); 3986 + if (is_unicode) { 3987 + __le16 *tmp = kmalloc(strlen(searchName)*2, GFP_KERNEL); 3988 + cifsConvertToUCS((__le16 *) tmp, searchName, 3989 + PATH_MAX, nls_codepage, remap); 3990 + node->path_consumed = hostlen_fromUCS(tmp, 3991 + le16_to_cpu(pSMBr->PathConsumed)/2, 3992 + nls_codepage); 3993 + kfree(tmp); 3994 + } else 3995 + node->path_consumed = le16_to_cpu(pSMBr->PathConsumed); 3996 + 3986 3997 node->server_type = le16_to_cpu(ref->ServerType); 3987 3998 node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags); 3988 3999 ··· 4125 4116 4126 4117 /* parse returned result into more usable form */ 4127 4118 rc = parse_DFS_referrals(pSMBr, num_of_nodes, 4128 - target_nodes, nls_codepage); 4119 + target_nodes, nls_codepage, remap, 4120 + searchName); 4129 4121 4130 4122 GetDFSRefExit: 4131 4123 cifs_buf_release(pSMB);
+413 -416
fs/cifs/connect.c
··· 124 124 cifs_reconnect(struct TCP_Server_Info *server) 125 125 { 126 126 int rc = 0; 127 - struct list_head *tmp; 127 + struct list_head *tmp, *tmp2; 128 128 struct cifsSesInfo *ses; 129 129 struct cifsTconInfo *tcon; 130 130 struct mid_q_entry *mid_entry; ··· 144 144 145 145 /* before reconnecting the tcp session, mark the smb session (uid) 146 146 and the tid bad so they are not used until reconnected */ 147 - read_lock(&GlobalSMBSeslock); 148 - list_for_each(tmp, &GlobalSMBSessionList) { 149 - ses = list_entry(tmp, struct cifsSesInfo, cifsSessionList); 150 - if (ses->server) { 151 - if (ses->server == server) { 152 - ses->status = CifsNeedReconnect; 153 - ses->ipc_tid = 0; 154 - } 147 + read_lock(&cifs_tcp_ses_lock); 148 + list_for_each(tmp, &server->smb_ses_list) { 149 + ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); 150 + ses->need_reconnect = true; 151 + ses->ipc_tid = 0; 152 + list_for_each(tmp2, &ses->tcon_list) { 153 + tcon = list_entry(tmp2, struct cifsTconInfo, tcon_list); 154 + tcon->need_reconnect = true; 155 155 } 156 - /* else tcp and smb sessions need reconnection */ 157 156 } 158 - list_for_each(tmp, &GlobalTreeConnectionList) { 159 - tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList); 160 - if ((tcon->ses) && (tcon->ses->server == server)) 161 - tcon->tidStatus = CifsNeedReconnect; 162 - } 163 - read_unlock(&GlobalSMBSeslock); 157 + read_unlock(&cifs_tcp_ses_lock); 164 158 /* do not want to be sending data on a socket we are freeing */ 165 159 down(&server->tcpSem); 166 160 if (server->ssocket) { ··· 187 193 while ((server->tcpStatus != CifsExiting) && 188 194 (server->tcpStatus != CifsGood)) { 189 195 try_to_freeze(); 190 - if (server->protocolType == IPV6) { 196 + if (server->addr.sockAddr6.sin6_family == AF_INET6) { 191 197 rc = ipv6_connect(&server->addr.sockAddr6, 192 198 &server->ssocket, server->noautotune); 193 199 } else { ··· 411 417 msleep(1); /* minimum sleep to prevent looping 412 418 allowing socket to clear and app threads to set 413 419 tcpStatus CifsNeedReconnect if server hung */ 414 - if (pdu_length < 4) 420 + if (pdu_length < 4) { 421 + iov.iov_base = (4 - pdu_length) + 422 + (char *)smb_buffer; 423 + iov.iov_len = pdu_length; 424 + smb_msg.msg_control = NULL; 425 + smb_msg.msg_controllen = 0; 415 426 goto incomplete_rcv; 416 - else 427 + } else 417 428 continue; 418 429 } else if (length <= 0) { 419 430 if (server->tcpStatus == CifsNew) { ··· 653 654 } 654 655 } /* end while !EXITING */ 655 656 657 + /* take it off the list, if it's not already */ 658 + write_lock(&cifs_tcp_ses_lock); 659 + list_del_init(&server->tcp_ses_list); 660 + write_unlock(&cifs_tcp_ses_lock); 661 + 656 662 spin_lock(&GlobalMid_Lock); 657 663 server->tcpStatus = CifsExiting; 658 664 spin_unlock(&GlobalMid_Lock); ··· 690 686 if (smallbuf) /* no sense logging a debug message if NULL */ 691 687 cifs_small_buf_release(smallbuf); 692 688 693 - read_lock(&GlobalSMBSeslock); 689 + /* 690 + * BB: we shouldn't have to do any of this. It shouldn't be 691 + * possible to exit from the thread with active SMB sessions 692 + */ 693 + read_lock(&cifs_tcp_ses_lock); 694 694 if (list_empty(&server->pending_mid_q)) { 695 695 /* loop through server session structures attached to this and 696 696 mark them dead */ 697 - list_for_each(tmp, &GlobalSMBSessionList) { 698 - ses = 699 - list_entry(tmp, struct cifsSesInfo, 700 - cifsSessionList); 701 - if (ses->server == server) { 702 - ses->status = CifsExiting; 703 - ses->server = NULL; 704 - } 697 + list_for_each(tmp, &server->smb_ses_list) { 698 + ses = list_entry(tmp, struct cifsSesInfo, 699 + smb_ses_list); 700 + ses->status = CifsExiting; 701 + ses->server = NULL; 705 702 } 706 - read_unlock(&GlobalSMBSeslock); 703 + read_unlock(&cifs_tcp_ses_lock); 707 704 } else { 708 705 /* although we can not zero the server struct pointer yet, 709 706 since there are active requests which may depnd on them, 710 707 mark the corresponding SMB sessions as exiting too */ 711 - list_for_each(tmp, &GlobalSMBSessionList) { 708 + list_for_each(tmp, &server->smb_ses_list) { 712 709 ses = list_entry(tmp, struct cifsSesInfo, 713 - cifsSessionList); 714 - if (ses->server == server) 715 - ses->status = CifsExiting; 710 + smb_ses_list); 711 + ses->status = CifsExiting; 716 712 } 717 713 718 714 spin_lock(&GlobalMid_Lock); ··· 727 723 } 728 724 } 729 725 spin_unlock(&GlobalMid_Lock); 730 - read_unlock(&GlobalSMBSeslock); 726 + read_unlock(&cifs_tcp_ses_lock); 731 727 /* 1/8th of sec is more than enough time for them to exit */ 732 728 msleep(125); 733 729 } ··· 749 745 if there are any pointing to this (e.g 750 746 if a crazy root user tried to kill cifsd 751 747 kernel thread explicitly this might happen) */ 752 - write_lock(&GlobalSMBSeslock); 753 - list_for_each(tmp, &GlobalSMBSessionList) { 754 - ses = list_entry(tmp, struct cifsSesInfo, 755 - cifsSessionList); 756 - if (ses->server == server) 757 - ses->server = NULL; 748 + /* BB: This shouldn't be necessary, see above */ 749 + read_lock(&cifs_tcp_ses_lock); 750 + list_for_each(tmp, &server->smb_ses_list) { 751 + ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); 752 + ses->server = NULL; 758 753 } 759 - write_unlock(&GlobalSMBSeslock); 754 + read_unlock(&cifs_tcp_ses_lock); 760 755 761 756 kfree(server->hostname); 762 757 task_to_wake = xchg(&server->tsk, NULL); ··· 1355 1352 return 0; 1356 1353 } 1357 1354 1355 + static struct TCP_Server_Info * 1356 + cifs_find_tcp_session(struct sockaddr *addr) 1357 + { 1358 + struct list_head *tmp; 1359 + struct TCP_Server_Info *server; 1360 + struct sockaddr_in *addr4 = (struct sockaddr_in *) addr; 1361 + struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) addr; 1362 + 1363 + write_lock(&cifs_tcp_ses_lock); 1364 + list_for_each(tmp, &cifs_tcp_ses_list) { 1365 + server = list_entry(tmp, struct TCP_Server_Info, 1366 + tcp_ses_list); 1367 + /* 1368 + * the demux thread can exit on its own while still in CifsNew 1369 + * so don't accept any sockets in that state. Since the 1370 + * tcpStatus never changes back to CifsNew it's safe to check 1371 + * for this without a lock. 1372 + */ 1373 + if (server->tcpStatus == CifsNew) 1374 + continue; 1375 + 1376 + if (addr->sa_family == AF_INET && 1377 + (addr4->sin_addr.s_addr != 1378 + server->addr.sockAddr.sin_addr.s_addr)) 1379 + continue; 1380 + else if (addr->sa_family == AF_INET6 && 1381 + memcmp(&server->addr.sockAddr6.sin6_addr, 1382 + &addr6->sin6_addr, sizeof(addr6->sin6_addr))) 1383 + continue; 1384 + 1385 + ++server->srv_count; 1386 + write_unlock(&cifs_tcp_ses_lock); 1387 + cFYI(1, ("Existing tcp session with server found")); 1388 + return server; 1389 + } 1390 + write_unlock(&cifs_tcp_ses_lock); 1391 + return NULL; 1392 + } 1393 + 1394 + static void 1395 + cifs_put_tcp_session(struct TCP_Server_Info *server) 1396 + { 1397 + struct task_struct *task; 1398 + 1399 + write_lock(&cifs_tcp_ses_lock); 1400 + if (--server->srv_count > 0) { 1401 + write_unlock(&cifs_tcp_ses_lock); 1402 + return; 1403 + } 1404 + 1405 + list_del_init(&server->tcp_ses_list); 1406 + write_unlock(&cifs_tcp_ses_lock); 1407 + 1408 + spin_lock(&GlobalMid_Lock); 1409 + server->tcpStatus = CifsExiting; 1410 + spin_unlock(&GlobalMid_Lock); 1411 + 1412 + task = xchg(&server->tsk, NULL); 1413 + if (task) 1414 + force_sig(SIGKILL, task); 1415 + } 1416 + 1358 1417 static struct cifsSesInfo * 1359 - cifs_find_tcp_session(struct in_addr *target_ip_addr, 1360 - struct in6_addr *target_ip6_addr, 1361 - char *userName, struct TCP_Server_Info **psrvTcp) 1418 + cifs_find_smb_ses(struct TCP_Server_Info *server, char *username) 1362 1419 { 1363 1420 struct list_head *tmp; 1364 1421 struct cifsSesInfo *ses; 1365 1422 1366 - *psrvTcp = NULL; 1367 - 1368 - read_lock(&GlobalSMBSeslock); 1369 - list_for_each(tmp, &GlobalSMBSessionList) { 1370 - ses = list_entry(tmp, struct cifsSesInfo, cifsSessionList); 1371 - if (!ses->server) 1423 + write_lock(&cifs_tcp_ses_lock); 1424 + list_for_each(tmp, &server->smb_ses_list) { 1425 + ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); 1426 + if (strncmp(ses->userName, username, MAX_USERNAME_SIZE)) 1372 1427 continue; 1373 1428 1374 - if (target_ip_addr && 1375 - ses->server->addr.sockAddr.sin_addr.s_addr != target_ip_addr->s_addr) 1376 - continue; 1377 - else if (target_ip6_addr && 1378 - memcmp(&ses->server->addr.sockAddr6.sin6_addr, 1379 - target_ip6_addr, sizeof(*target_ip6_addr))) 1380 - continue; 1381 - /* BB lock server and tcp session; increment use count here?? */ 1382 - 1383 - /* found a match on the TCP session */ 1384 - *psrvTcp = ses->server; 1385 - 1386 - /* BB check if reconnection needed */ 1387 - if (strncmp(ses->userName, userName, MAX_USERNAME_SIZE) == 0) { 1388 - read_unlock(&GlobalSMBSeslock); 1389 - /* Found exact match on both TCP and 1390 - SMB sessions */ 1391 - return ses; 1392 - } 1393 - /* else tcp and smb sessions need reconnection */ 1429 + ++ses->ses_count; 1430 + write_unlock(&cifs_tcp_ses_lock); 1431 + return ses; 1394 1432 } 1395 - read_unlock(&GlobalSMBSeslock); 1396 - 1433 + write_unlock(&cifs_tcp_ses_lock); 1397 1434 return NULL; 1398 1435 } 1399 1436 1437 + static void 1438 + cifs_put_smb_ses(struct cifsSesInfo *ses) 1439 + { 1440 + int xid; 1441 + struct TCP_Server_Info *server = ses->server; 1442 + 1443 + write_lock(&cifs_tcp_ses_lock); 1444 + if (--ses->ses_count > 0) { 1445 + write_unlock(&cifs_tcp_ses_lock); 1446 + return; 1447 + } 1448 + 1449 + list_del_init(&ses->smb_ses_list); 1450 + write_unlock(&cifs_tcp_ses_lock); 1451 + 1452 + if (ses->status == CifsGood) { 1453 + xid = GetXid(); 1454 + CIFSSMBLogoff(xid, ses); 1455 + _FreeXid(xid); 1456 + } 1457 + sesInfoFree(ses); 1458 + cifs_put_tcp_session(server); 1459 + } 1460 + 1400 1461 static struct cifsTconInfo * 1401 - find_unc(__be32 new_target_ip_addr, char *uncName, char *userName) 1462 + cifs_find_tcon(struct cifsSesInfo *ses, const char *unc) 1402 1463 { 1403 1464 struct list_head *tmp; 1404 1465 struct cifsTconInfo *tcon; 1405 - __be32 old_ip; 1406 1466 1407 - read_lock(&GlobalSMBSeslock); 1408 - 1409 - list_for_each(tmp, &GlobalTreeConnectionList) { 1410 - cFYI(1, ("Next tcon")); 1411 - tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList); 1412 - if (!tcon->ses || !tcon->ses->server) 1467 + write_lock(&cifs_tcp_ses_lock); 1468 + list_for_each(tmp, &ses->tcon_list) { 1469 + tcon = list_entry(tmp, struct cifsTconInfo, tcon_list); 1470 + if (tcon->tidStatus == CifsExiting) 1471 + continue; 1472 + if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE)) 1413 1473 continue; 1414 1474 1415 - old_ip = tcon->ses->server->addr.sockAddr.sin_addr.s_addr; 1416 - cFYI(1, ("old ip addr: %x == new ip %x ?", 1417 - old_ip, new_target_ip_addr)); 1418 - 1419 - if (old_ip != new_target_ip_addr) 1420 - continue; 1421 - 1422 - /* BB lock tcon, server, tcp session and increment use count? */ 1423 - /* found a match on the TCP session */ 1424 - /* BB check if reconnection needed */ 1425 - cFYI(1, ("IP match, old UNC: %s new: %s", 1426 - tcon->treeName, uncName)); 1427 - 1428 - if (strncmp(tcon->treeName, uncName, MAX_TREE_SIZE)) 1429 - continue; 1430 - 1431 - cFYI(1, ("and old usr: %s new: %s", 1432 - tcon->treeName, uncName)); 1433 - 1434 - if (strncmp(tcon->ses->userName, userName, MAX_USERNAME_SIZE)) 1435 - continue; 1436 - 1437 - /* matched smb session (user name) */ 1438 - read_unlock(&GlobalSMBSeslock); 1475 + ++tcon->tc_count; 1476 + write_unlock(&cifs_tcp_ses_lock); 1439 1477 return tcon; 1440 1478 } 1441 - 1442 - read_unlock(&GlobalSMBSeslock); 1479 + write_unlock(&cifs_tcp_ses_lock); 1443 1480 return NULL; 1481 + } 1482 + 1483 + static void 1484 + cifs_put_tcon(struct cifsTconInfo *tcon) 1485 + { 1486 + int xid; 1487 + struct cifsSesInfo *ses = tcon->ses; 1488 + 1489 + write_lock(&cifs_tcp_ses_lock); 1490 + if (--tcon->tc_count > 0) { 1491 + write_unlock(&cifs_tcp_ses_lock); 1492 + return; 1493 + } 1494 + 1495 + list_del_init(&tcon->tcon_list); 1496 + write_unlock(&cifs_tcp_ses_lock); 1497 + 1498 + xid = GetXid(); 1499 + CIFSSMBTDis(xid, tcon); 1500 + _FreeXid(xid); 1501 + 1502 + DeleteTconOplockQEntries(tcon); 1503 + tconInfoFree(tcon); 1504 + cifs_put_smb_ses(ses); 1444 1505 } 1445 1506 1446 1507 int ··· 1943 1876 } 1944 1877 } 1945 1878 1946 - static void 1947 - kill_cifsd(struct TCP_Server_Info *server) 1879 + static void setup_cifs_sb(struct smb_vol *pvolume_info, 1880 + struct cifs_sb_info *cifs_sb) 1948 1881 { 1949 - struct task_struct *task; 1882 + if (pvolume_info->rsize > CIFSMaxBufSize) { 1883 + cERROR(1, ("rsize %d too large, using MaxBufSize", 1884 + pvolume_info->rsize)); 1885 + cifs_sb->rsize = CIFSMaxBufSize; 1886 + } else if ((pvolume_info->rsize) && 1887 + (pvolume_info->rsize <= CIFSMaxBufSize)) 1888 + cifs_sb->rsize = pvolume_info->rsize; 1889 + else /* default */ 1890 + cifs_sb->rsize = CIFSMaxBufSize; 1950 1891 1951 - task = xchg(&server->tsk, NULL); 1952 - if (task) 1953 - force_sig(SIGKILL, task); 1892 + if (pvolume_info->wsize > PAGEVEC_SIZE * PAGE_CACHE_SIZE) { 1893 + cERROR(1, ("wsize %d too large, using 4096 instead", 1894 + pvolume_info->wsize)); 1895 + cifs_sb->wsize = 4096; 1896 + } else if (pvolume_info->wsize) 1897 + cifs_sb->wsize = pvolume_info->wsize; 1898 + else 1899 + cifs_sb->wsize = min_t(const int, 1900 + PAGEVEC_SIZE * PAGE_CACHE_SIZE, 1901 + 127*1024); 1902 + /* old default of CIFSMaxBufSize was too small now 1903 + that SMB Write2 can send multiple pages in kvec. 1904 + RFC1001 does not describe what happens when frame 1905 + bigger than 128K is sent so use that as max in 1906 + conjunction with 52K kvec constraint on arch with 4K 1907 + page size */ 1908 + 1909 + if (cifs_sb->rsize < 2048) { 1910 + cifs_sb->rsize = 2048; 1911 + /* Windows ME may prefer this */ 1912 + cFYI(1, ("readsize set to minimum: 2048")); 1913 + } 1914 + /* calculate prepath */ 1915 + cifs_sb->prepath = pvolume_info->prepath; 1916 + if (cifs_sb->prepath) { 1917 + cifs_sb->prepathlen = strlen(cifs_sb->prepath); 1918 + /* we can not convert the / to \ in the path 1919 + separators in the prefixpath yet because we do not 1920 + know (until reset_cifs_unix_caps is called later) 1921 + whether POSIX PATH CAP is available. We normalize 1922 + the / to \ after reset_cifs_unix_caps is called */ 1923 + pvolume_info->prepath = NULL; 1924 + } else 1925 + cifs_sb->prepathlen = 0; 1926 + cifs_sb->mnt_uid = pvolume_info->linux_uid; 1927 + cifs_sb->mnt_gid = pvolume_info->linux_gid; 1928 + cifs_sb->mnt_file_mode = pvolume_info->file_mode; 1929 + cifs_sb->mnt_dir_mode = pvolume_info->dir_mode; 1930 + cFYI(1, ("file mode: 0x%x dir mode: 0x%x", 1931 + cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode)); 1932 + 1933 + if (pvolume_info->noperm) 1934 + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; 1935 + if (pvolume_info->setuids) 1936 + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID; 1937 + if (pvolume_info->server_ino) 1938 + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM; 1939 + if (pvolume_info->remap) 1940 + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SPECIAL_CHR; 1941 + if (pvolume_info->no_xattr) 1942 + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_XATTR; 1943 + if (pvolume_info->sfu_emul) 1944 + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL; 1945 + if (pvolume_info->nobrl) 1946 + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL; 1947 + if (pvolume_info->cifs_acl) 1948 + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; 1949 + if (pvolume_info->override_uid) 1950 + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID; 1951 + if (pvolume_info->override_gid) 1952 + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID; 1953 + if (pvolume_info->dynperm) 1954 + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM; 1955 + if (pvolume_info->direct_io) { 1956 + cFYI(1, ("mounting share using direct i/o")); 1957 + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; 1958 + } 1959 + 1960 + if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm)) 1961 + cERROR(1, ("mount option dynperm ignored if cifsacl " 1962 + "mount option supported")); 1954 1963 } 1955 1964 1956 1965 int ··· 2035 1892 { 2036 1893 int rc = 0; 2037 1894 int xid; 2038 - int address_type = AF_INET; 2039 1895 struct socket *csocket = NULL; 2040 - struct sockaddr_in sin_server; 2041 - struct sockaddr_in6 sin_server6; 1896 + struct sockaddr addr; 1897 + struct sockaddr_in *sin_server = (struct sockaddr_in *) &addr; 1898 + struct sockaddr_in6 *sin_server6 = (struct sockaddr_in6 *) &addr; 2042 1899 struct smb_vol volume_info; 2043 1900 struct cifsSesInfo *pSesInfo = NULL; 2044 - struct cifsSesInfo *existingCifsSes = NULL; 2045 1901 struct cifsTconInfo *tcon = NULL; 2046 1902 struct TCP_Server_Info *srvTcp = NULL; 2047 1903 ··· 2048 1906 2049 1907 /* cFYI(1, ("Entering cifs_mount. Xid: %d with: %s", xid, mount_data)); */ 2050 1908 1909 + memset(&addr, 0, sizeof(struct sockaddr)); 2051 1910 memset(&volume_info, 0, sizeof(struct smb_vol)); 2052 1911 if (cifs_parse_mount_options(mount_data, devname, &volume_info)) { 2053 1912 rc = -EINVAL; ··· 2071 1928 2072 1929 if (volume_info.UNCip && volume_info.UNC) { 2073 1930 rc = cifs_inet_pton(AF_INET, volume_info.UNCip, 2074 - &sin_server.sin_addr.s_addr); 1931 + &sin_server->sin_addr.s_addr); 2075 1932 2076 1933 if (rc <= 0) { 2077 1934 /* not ipv4 address, try ipv6 */ 2078 1935 rc = cifs_inet_pton(AF_INET6, volume_info.UNCip, 2079 - &sin_server6.sin6_addr.in6_u); 1936 + &sin_server6->sin6_addr.in6_u); 2080 1937 if (rc > 0) 2081 - address_type = AF_INET6; 1938 + addr.sa_family = AF_INET6; 2082 1939 } else { 2083 - address_type = AF_INET; 1940 + addr.sa_family = AF_INET; 2084 1941 } 2085 1942 2086 1943 if (rc <= 0) { ··· 2120 1977 } 2121 1978 } 2122 1979 2123 - if (address_type == AF_INET) 2124 - existingCifsSes = cifs_find_tcp_session(&sin_server.sin_addr, 2125 - NULL /* no ipv6 addr */, 2126 - volume_info.username, &srvTcp); 2127 - else if (address_type == AF_INET6) { 2128 - cFYI(1, ("looking for ipv6 address")); 2129 - existingCifsSes = cifs_find_tcp_session(NULL /* no ipv4 addr */, 2130 - &sin_server6.sin6_addr, 2131 - volume_info.username, &srvTcp); 2132 - } else { 2133 - rc = -EINVAL; 2134 - goto out; 2135 - } 2136 - 2137 - if (srvTcp) { 2138 - cFYI(1, ("Existing tcp session with server found")); 2139 - } else { /* create socket */ 2140 - if (volume_info.port) 2141 - sin_server.sin_port = htons(volume_info.port); 2142 - else 2143 - sin_server.sin_port = 0; 2144 - if (address_type == AF_INET6) { 1980 + srvTcp = cifs_find_tcp_session(&addr); 1981 + if (!srvTcp) { /* create socket */ 1982 + if (addr.sa_family == AF_INET6) { 2145 1983 cFYI(1, ("attempting ipv6 connect")); 2146 1984 /* BB should we allow ipv6 on port 139? */ 2147 1985 /* other OS never observed in Wild doing 139 with v6 */ 2148 - rc = ipv6_connect(&sin_server6, &csocket, 1986 + sin_server6->sin6_port = htons(volume_info.port); 1987 + rc = ipv6_connect(sin_server6, &csocket, 2149 1988 volume_info.noblocksnd); 2150 - } else 2151 - rc = ipv4_connect(&sin_server, &csocket, 1989 + } else { 1990 + sin_server->sin_port = htons(volume_info.port); 1991 + rc = ipv4_connect(sin_server, &csocket, 2152 1992 volume_info.source_rfc1001_name, 2153 1993 volume_info.target_rfc1001_name, 2154 1994 volume_info.noblocksnd, 2155 1995 volume_info.noautotune); 1996 + } 2156 1997 if (rc < 0) { 2157 - cERROR(1, ("Error connecting to IPv4 socket. " 1998 + cERROR(1, ("Error connecting to socket. " 2158 1999 "Aborting operation")); 2159 2000 if (csocket != NULL) 2160 2001 sock_release(csocket); ··· 2153 2026 } else { 2154 2027 srvTcp->noblocksnd = volume_info.noblocksnd; 2155 2028 srvTcp->noautotune = volume_info.noautotune; 2156 - memcpy(&srvTcp->addr.sockAddr, &sin_server, 2157 - sizeof(struct sockaddr_in)); 2029 + if (addr.sa_family == AF_INET6) 2030 + memcpy(&srvTcp->addr.sockAddr6, sin_server6, 2031 + sizeof(struct sockaddr_in6)); 2032 + else 2033 + memcpy(&srvTcp->addr.sockAddr, sin_server, 2034 + sizeof(struct sockaddr_in)); 2158 2035 atomic_set(&srvTcp->inFlight, 0); 2159 2036 /* BB Add code for ipv6 case too */ 2160 2037 srvTcp->ssocket = csocket; 2161 - srvTcp->protocolType = IPV4; 2162 2038 srvTcp->hostname = extract_hostname(volume_info.UNC); 2163 2039 if (IS_ERR(srvTcp->hostname)) { 2164 2040 rc = PTR_ERR(srvTcp->hostname); ··· 2191 2061 memcpy(srvTcp->server_RFC1001_name, 2192 2062 volume_info.target_rfc1001_name, 16); 2193 2063 srvTcp->sequence_number = 0; 2064 + INIT_LIST_HEAD(&srvTcp->tcp_ses_list); 2065 + INIT_LIST_HEAD(&srvTcp->smb_ses_list); 2066 + ++srvTcp->srv_count; 2067 + write_lock(&cifs_tcp_ses_lock); 2068 + list_add(&srvTcp->tcp_ses_list, 2069 + &cifs_tcp_ses_list); 2070 + write_unlock(&cifs_tcp_ses_lock); 2194 2071 } 2195 2072 } 2196 2073 2197 - if (existingCifsSes) { 2198 - pSesInfo = existingCifsSes; 2074 + pSesInfo = cifs_find_smb_ses(srvTcp, volume_info.username); 2075 + if (pSesInfo) { 2199 2076 cFYI(1, ("Existing smb sess found (status=%d)", 2200 2077 pSesInfo->status)); 2078 + /* 2079 + * The existing SMB session already has a reference to srvTcp, 2080 + * so we can put back the extra one we got before 2081 + */ 2082 + cifs_put_tcp_session(srvTcp); 2083 + 2201 2084 down(&pSesInfo->sesSem); 2202 - if (pSesInfo->status == CifsNeedReconnect) { 2085 + if (pSesInfo->need_reconnect) { 2203 2086 cFYI(1, ("Session needs reconnect")); 2204 2087 rc = cifs_setup_session(xid, pSesInfo, 2205 2088 cifs_sb->local_nls); ··· 2221 2078 } else if (!rc) { 2222 2079 cFYI(1, ("Existing smb sess not found")); 2223 2080 pSesInfo = sesInfoAlloc(); 2224 - if (pSesInfo == NULL) 2081 + if (pSesInfo == NULL) { 2225 2082 rc = -ENOMEM; 2226 - else { 2227 - pSesInfo->server = srvTcp; 2228 - sprintf(pSesInfo->serverName, "%u.%u.%u.%u", 2229 - NIPQUAD(sin_server.sin_addr.s_addr)); 2083 + goto mount_fail_check; 2230 2084 } 2231 2085 2232 - if (!rc) { 2233 - /* volume_info.password freed at unmount */ 2234 - if (volume_info.password) { 2235 - pSesInfo->password = volume_info.password; 2236 - /* set to NULL to prevent freeing on exit */ 2237 - volume_info.password = NULL; 2238 - } 2239 - if (volume_info.username) 2240 - strncpy(pSesInfo->userName, 2241 - volume_info.username, 2242 - MAX_USERNAME_SIZE); 2243 - if (volume_info.domainname) { 2244 - int len = strlen(volume_info.domainname); 2245 - pSesInfo->domainName = 2246 - kmalloc(len + 1, GFP_KERNEL); 2247 - if (pSesInfo->domainName) 2248 - strcpy(pSesInfo->domainName, 2249 - volume_info.domainname); 2250 - } 2251 - pSesInfo->linux_uid = volume_info.linux_uid; 2252 - pSesInfo->overrideSecFlg = volume_info.secFlg; 2253 - down(&pSesInfo->sesSem); 2254 - /* BB FIXME need to pass vol->secFlgs BB */ 2255 - rc = cifs_setup_session(xid, pSesInfo, 2256 - cifs_sb->local_nls); 2257 - up(&pSesInfo->sesSem); 2258 - if (!rc) 2259 - atomic_inc(&srvTcp->socketUseCount); 2086 + /* new SMB session uses our srvTcp ref */ 2087 + pSesInfo->server = srvTcp; 2088 + sprintf(pSesInfo->serverName, "%u.%u.%u.%u", 2089 + NIPQUAD(sin_server->sin_addr.s_addr)); 2090 + 2091 + write_lock(&cifs_tcp_ses_lock); 2092 + list_add(&pSesInfo->smb_ses_list, &srvTcp->smb_ses_list); 2093 + write_unlock(&cifs_tcp_ses_lock); 2094 + 2095 + /* volume_info.password freed at unmount */ 2096 + if (volume_info.password) { 2097 + pSesInfo->password = volume_info.password; 2098 + /* set to NULL to prevent freeing on exit */ 2099 + volume_info.password = NULL; 2260 2100 } 2101 + if (volume_info.username) 2102 + strncpy(pSesInfo->userName, volume_info.username, 2103 + MAX_USERNAME_SIZE); 2104 + if (volume_info.domainname) { 2105 + int len = strlen(volume_info.domainname); 2106 + pSesInfo->domainName = kmalloc(len + 1, GFP_KERNEL); 2107 + if (pSesInfo->domainName) 2108 + strcpy(pSesInfo->domainName, 2109 + volume_info.domainname); 2110 + } 2111 + pSesInfo->linux_uid = volume_info.linux_uid; 2112 + pSesInfo->overrideSecFlg = volume_info.secFlg; 2113 + down(&pSesInfo->sesSem); 2114 + 2115 + /* BB FIXME need to pass vol->secFlgs BB */ 2116 + rc = cifs_setup_session(xid, pSesInfo, 2117 + cifs_sb->local_nls); 2118 + up(&pSesInfo->sesSem); 2261 2119 } 2262 2120 2263 2121 /* search for existing tcon to this server share */ 2264 2122 if (!rc) { 2265 - if (volume_info.rsize > CIFSMaxBufSize) { 2266 - cERROR(1, ("rsize %d too large, using MaxBufSize", 2267 - volume_info.rsize)); 2268 - cifs_sb->rsize = CIFSMaxBufSize; 2269 - } else if ((volume_info.rsize) && 2270 - (volume_info.rsize <= CIFSMaxBufSize)) 2271 - cifs_sb->rsize = volume_info.rsize; 2272 - else /* default */ 2273 - cifs_sb->rsize = CIFSMaxBufSize; 2123 + setup_cifs_sb(&volume_info, cifs_sb); 2274 2124 2275 - if (volume_info.wsize > PAGEVEC_SIZE * PAGE_CACHE_SIZE) { 2276 - cERROR(1, ("wsize %d too large, using 4096 instead", 2277 - volume_info.wsize)); 2278 - cifs_sb->wsize = 4096; 2279 - } else if (volume_info.wsize) 2280 - cifs_sb->wsize = volume_info.wsize; 2281 - else 2282 - cifs_sb->wsize = 2283 - min_t(const int, PAGEVEC_SIZE * PAGE_CACHE_SIZE, 2284 - 127*1024); 2285 - /* old default of CIFSMaxBufSize was too small now 2286 - that SMB Write2 can send multiple pages in kvec. 2287 - RFC1001 does not describe what happens when frame 2288 - bigger than 128K is sent so use that as max in 2289 - conjunction with 52K kvec constraint on arch with 4K 2290 - page size */ 2291 - 2292 - if (cifs_sb->rsize < 2048) { 2293 - cifs_sb->rsize = 2048; 2294 - /* Windows ME may prefer this */ 2295 - cFYI(1, ("readsize set to minimum: 2048")); 2296 - } 2297 - /* calculate prepath */ 2298 - cifs_sb->prepath = volume_info.prepath; 2299 - if (cifs_sb->prepath) { 2300 - cifs_sb->prepathlen = strlen(cifs_sb->prepath); 2301 - /* we can not convert the / to \ in the path 2302 - separators in the prefixpath yet because we do not 2303 - know (until reset_cifs_unix_caps is called later) 2304 - whether POSIX PATH CAP is available. We normalize 2305 - the / to \ after reset_cifs_unix_caps is called */ 2306 - volume_info.prepath = NULL; 2307 - } else 2308 - cifs_sb->prepathlen = 0; 2309 - cifs_sb->mnt_uid = volume_info.linux_uid; 2310 - cifs_sb->mnt_gid = volume_info.linux_gid; 2311 - cifs_sb->mnt_file_mode = volume_info.file_mode; 2312 - cifs_sb->mnt_dir_mode = volume_info.dir_mode; 2313 - cFYI(1, ("file mode: 0x%x dir mode: 0x%x", 2314 - cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode)); 2315 - 2316 - if (volume_info.noperm) 2317 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; 2318 - if (volume_info.setuids) 2319 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID; 2320 - if (volume_info.server_ino) 2321 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM; 2322 - if (volume_info.remap) 2323 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SPECIAL_CHR; 2324 - if (volume_info.no_xattr) 2325 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_XATTR; 2326 - if (volume_info.sfu_emul) 2327 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL; 2328 - if (volume_info.nobrl) 2329 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL; 2330 - if (volume_info.cifs_acl) 2331 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; 2332 - if (volume_info.override_uid) 2333 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID; 2334 - if (volume_info.override_gid) 2335 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID; 2336 - if (volume_info.dynperm) 2337 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM; 2338 - if (volume_info.direct_io) { 2339 - cFYI(1, ("mounting share using direct i/o")); 2340 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; 2341 - } 2342 - 2343 - if ((volume_info.cifs_acl) && (volume_info.dynperm)) 2344 - cERROR(1, ("mount option dynperm ignored if cifsacl " 2345 - "mount option supported")); 2346 - 2347 - tcon = 2348 - find_unc(sin_server.sin_addr.s_addr, volume_info.UNC, 2349 - volume_info.username); 2125 + tcon = cifs_find_tcon(pSesInfo, volume_info.UNC); 2350 2126 if (tcon) { 2351 2127 cFYI(1, ("Found match on UNC path")); 2352 - /* we can have only one retry value for a connection 2353 - to a share so for resources mounted more than once 2354 - to the same server share the last value passed in 2355 - for the retry flag is used */ 2356 - tcon->retry = volume_info.retry; 2357 - tcon->nocase = volume_info.nocase; 2358 - tcon->local_lease = volume_info.local_lease; 2128 + /* existing tcon already has a reference */ 2129 + cifs_put_smb_ses(pSesInfo); 2359 2130 if (tcon->seal != volume_info.seal) 2360 2131 cERROR(1, ("transport encryption setting " 2361 2132 "conflicts with existing tid")); 2362 2133 } else { 2363 2134 tcon = tconInfoAlloc(); 2364 - if (tcon == NULL) 2135 + if (tcon == NULL) { 2365 2136 rc = -ENOMEM; 2366 - else { 2367 - /* check for null share name ie connecting to 2368 - * dfs root */ 2137 + goto mount_fail_check; 2138 + } 2139 + tcon->ses = pSesInfo; 2369 2140 2370 - /* BB check if this works for exactly length 2371 - * three strings */ 2372 - if ((strchr(volume_info.UNC + 3, '\\') == NULL) 2373 - && (strchr(volume_info.UNC + 3, '/') == 2374 - NULL)) { 2375 - /* rc = connect_to_dfs_path(xid, pSesInfo, 2376 - "", cifs_sb->local_nls, 2377 - cifs_sb->mnt_cifs_flags & 2378 - CIFS_MOUNT_MAP_SPECIAL_CHR);*/ 2379 - cFYI(1, ("DFS root not supported")); 2380 - rc = -ENODEV; 2381 - goto out; 2382 - } else { 2383 - /* BB Do we need to wrap sesSem around 2384 - * this TCon call and Unix SetFS as 2385 - * we do on SessSetup and reconnect? */ 2386 - rc = CIFSTCon(xid, pSesInfo, 2387 - volume_info.UNC, 2388 - tcon, cifs_sb->local_nls); 2389 - cFYI(1, ("CIFS Tcon rc = %d", rc)); 2390 - if (volume_info.nodfs) { 2391 - tcon->Flags &= 2392 - ~SMB_SHARE_IS_IN_DFS; 2393 - cFYI(1, ("DFS disabled (%d)", 2394 - tcon->Flags)); 2395 - } 2396 - } 2397 - if (!rc) { 2398 - atomic_inc(&pSesInfo->inUse); 2399 - tcon->retry = volume_info.retry; 2400 - tcon->nocase = volume_info.nocase; 2401 - tcon->seal = volume_info.seal; 2141 + /* check for null share name ie connect to dfs root */ 2142 + if ((strchr(volume_info.UNC + 3, '\\') == NULL) 2143 + && (strchr(volume_info.UNC + 3, '/') == NULL)) { 2144 + /* rc = connect_to_dfs_path(...) */ 2145 + cFYI(1, ("DFS root not supported")); 2146 + rc = -ENODEV; 2147 + goto mount_fail_check; 2148 + } else { 2149 + /* BB Do we need to wrap sesSem around 2150 + * this TCon call and Unix SetFS as 2151 + * we do on SessSetup and reconnect? */ 2152 + rc = CIFSTCon(xid, pSesInfo, volume_info.UNC, 2153 + tcon, cifs_sb->local_nls); 2154 + cFYI(1, ("CIFS Tcon rc = %d", rc)); 2155 + if (volume_info.nodfs) { 2156 + tcon->Flags &= ~SMB_SHARE_IS_IN_DFS; 2157 + cFYI(1, ("DFS disabled (%d)", 2158 + tcon->Flags)); 2402 2159 } 2403 2160 } 2161 + if (rc) 2162 + goto mount_fail_check; 2163 + tcon->seal = volume_info.seal; 2164 + write_lock(&cifs_tcp_ses_lock); 2165 + list_add(&tcon->tcon_list, &pSesInfo->tcon_list); 2166 + write_unlock(&cifs_tcp_ses_lock); 2404 2167 } 2168 + 2169 + /* we can have only one retry value for a connection 2170 + to a share so for resources mounted more than once 2171 + to the same server share the last value passed in 2172 + for the retry flag is used */ 2173 + tcon->retry = volume_info.retry; 2174 + tcon->nocase = volume_info.nocase; 2175 + tcon->local_lease = volume_info.local_lease; 2405 2176 } 2406 2177 if (pSesInfo) { 2407 2178 if (pSesInfo->capabilities & CAP_LARGE_FILES) { ··· 2327 2270 /* BB FIXME fix time_gran to be larger for LANMAN sessions */ 2328 2271 sb->s_time_gran = 100; 2329 2272 2330 - /* on error free sesinfo and tcon struct if needed */ 2273 + mount_fail_check: 2274 + /* on error free sesinfo and tcon struct if needed */ 2331 2275 if (rc) { 2332 - /* if session setup failed, use count is zero but 2333 - we still need to free cifsd thread */ 2334 - if (atomic_read(&srvTcp->socketUseCount) == 0) { 2335 - spin_lock(&GlobalMid_Lock); 2336 - srvTcp->tcpStatus = CifsExiting; 2337 - spin_unlock(&GlobalMid_Lock); 2338 - kill_cifsd(srvTcp); 2339 - } 2340 - /* If find_unc succeeded then rc == 0 so we can not end */ 2341 - if (tcon) /* up accidently freeing someone elses tcon struct */ 2342 - tconInfoFree(tcon); 2343 - if (existingCifsSes == NULL) { 2344 - if (pSesInfo) { 2345 - if ((pSesInfo->server) && 2346 - (pSesInfo->status == CifsGood)) { 2347 - int temp_rc; 2348 - temp_rc = CIFSSMBLogoff(xid, pSesInfo); 2349 - /* if the socketUseCount is now zero */ 2350 - if ((temp_rc == -ESHUTDOWN) && 2351 - (pSesInfo->server)) 2352 - kill_cifsd(pSesInfo->server); 2353 - } else { 2354 - cFYI(1, ("No session or bad tcon")); 2355 - if (pSesInfo->server) { 2356 - spin_lock(&GlobalMid_Lock); 2357 - srvTcp->tcpStatus = CifsExiting; 2358 - spin_unlock(&GlobalMid_Lock); 2359 - kill_cifsd(pSesInfo->server); 2360 - } 2361 - } 2362 - sesInfoFree(pSesInfo); 2363 - /* pSesInfo = NULL; */ 2364 - } 2365 - } 2366 - } else { 2367 - atomic_inc(&tcon->useCount); 2368 - cifs_sb->tcon = tcon; 2369 - tcon->ses = pSesInfo; 2370 - 2371 - /* do not care if following two calls succeed - informational */ 2372 - if (!tcon->ipc) { 2373 - CIFSSMBQFSDeviceInfo(xid, tcon); 2374 - CIFSSMBQFSAttributeInfo(xid, tcon); 2375 - } 2376 - 2377 - /* tell server which Unix caps we support */ 2378 - if (tcon->ses->capabilities & CAP_UNIX) 2379 - /* reset of caps checks mount to see if unix extensions 2380 - disabled for just this mount */ 2381 - reset_cifs_unix_caps(xid, tcon, sb, &volume_info); 2276 + /* If find_unc succeeded then rc == 0 so we can not end */ 2277 + /* up accidently freeing someone elses tcon struct */ 2278 + if (tcon) 2279 + cifs_put_tcon(tcon); 2280 + else if (pSesInfo) 2281 + cifs_put_smb_ses(pSesInfo); 2382 2282 else 2383 - tcon->unix_ext = 0; /* server does not support them */ 2384 - 2385 - /* convert forward to back slashes in prepath here if needed */ 2386 - if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0) 2387 - convert_delimiter(cifs_sb->prepath, 2388 - CIFS_DIR_SEP(cifs_sb)); 2389 - 2390 - if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) { 2391 - cifs_sb->rsize = 1024 * 127; 2392 - cFYI(DBG2, 2393 - ("no very large read support, rsize now 127K")); 2394 - } 2395 - if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X)) 2396 - cifs_sb->wsize = min(cifs_sb->wsize, 2397 - (tcon->ses->server->maxBuf - 2398 - MAX_CIFS_HDR_SIZE)); 2399 - if (!(tcon->ses->capabilities & CAP_LARGE_READ_X)) 2400 - cifs_sb->rsize = min(cifs_sb->rsize, 2401 - (tcon->ses->server->maxBuf - 2402 - MAX_CIFS_HDR_SIZE)); 2283 + cifs_put_tcp_session(srvTcp); 2284 + goto out; 2403 2285 } 2286 + cifs_sb->tcon = tcon; 2287 + 2288 + /* do not care if following two calls succeed - informational */ 2289 + if (!tcon->ipc) { 2290 + CIFSSMBQFSDeviceInfo(xid, tcon); 2291 + CIFSSMBQFSAttributeInfo(xid, tcon); 2292 + } 2293 + 2294 + /* tell server which Unix caps we support */ 2295 + if (tcon->ses->capabilities & CAP_UNIX) 2296 + /* reset of caps checks mount to see if unix extensions 2297 + disabled for just this mount */ 2298 + reset_cifs_unix_caps(xid, tcon, sb, &volume_info); 2299 + else 2300 + tcon->unix_ext = 0; /* server does not support them */ 2301 + 2302 + /* convert forward to back slashes in prepath here if needed */ 2303 + if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0) 2304 + convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb)); 2305 + 2306 + if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) { 2307 + cifs_sb->rsize = 1024 * 127; 2308 + cFYI(DBG2, ("no very large read support, rsize now 127K")); 2309 + } 2310 + if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X)) 2311 + cifs_sb->wsize = min(cifs_sb->wsize, 2312 + (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)); 2313 + if (!(tcon->ses->capabilities & CAP_LARGE_READ_X)) 2314 + cifs_sb->rsize = min(cifs_sb->rsize, 2315 + (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)); 2404 2316 2405 2317 /* volume_info.password is freed above when existing session found 2406 2318 (in which case it is not needed anymore) but when new sesion is created ··· 3539 3513 /* above now done in SendReceive */ 3540 3514 if ((rc == 0) && (tcon != NULL)) { 3541 3515 tcon->tidStatus = CifsGood; 3516 + tcon->need_reconnect = false; 3542 3517 tcon->tid = smb_buffer_response->Tid; 3543 3518 bcc_ptr = pByteArea(smb_buffer_response); 3544 3519 length = strnlen(bcc_ptr, BCC(smb_buffer_response) - 2); ··· 3611 3584 cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) 3612 3585 { 3613 3586 int rc = 0; 3614 - int xid; 3615 - struct cifsSesInfo *ses = NULL; 3616 3587 char *tmp; 3617 3588 3618 - xid = GetXid(); 3619 - 3620 - if (cifs_sb->tcon) { 3621 - ses = cifs_sb->tcon->ses; /* save ptr to ses before delete tcon!*/ 3622 - rc = CIFSSMBTDis(xid, cifs_sb->tcon); 3623 - if (rc == -EBUSY) { 3624 - FreeXid(xid); 3625 - return 0; 3626 - } 3627 - DeleteTconOplockQEntries(cifs_sb->tcon); 3628 - tconInfoFree(cifs_sb->tcon); 3629 - if ((ses) && (ses->server)) { 3630 - /* save off task so we do not refer to ses later */ 3631 - cFYI(1, ("About to do SMBLogoff ")); 3632 - rc = CIFSSMBLogoff(xid, ses); 3633 - if (rc == -EBUSY) { 3634 - FreeXid(xid); 3635 - return 0; 3636 - } else if (rc == -ESHUTDOWN) { 3637 - cFYI(1, ("Waking up socket by sending signal")); 3638 - if (ses->server) 3639 - kill_cifsd(ses->server); 3640 - rc = 0; 3641 - } /* else - we have an smb session 3642 - left on this socket do not kill cifsd */ 3643 - } else 3644 - cFYI(1, ("No session or bad tcon")); 3645 - } 3589 + if (cifs_sb->tcon) 3590 + cifs_put_tcon(cifs_sb->tcon); 3646 3591 3647 3592 cifs_sb->tcon = NULL; 3648 3593 tmp = cifs_sb->prepath; 3649 3594 cifs_sb->prepathlen = 0; 3650 3595 cifs_sb->prepath = NULL; 3651 3596 kfree(tmp); 3652 - if (ses) 3653 - sesInfoFree(ses); 3654 3597 3655 - FreeXid(xid); 3656 3598 return rc; 3657 3599 } 3658 3600 ··· 3737 3741 cFYI(1, ("CIFS Session Established successfully")); 3738 3742 spin_lock(&GlobalMid_Lock); 3739 3743 pSesInfo->status = CifsGood; 3744 + pSesInfo->need_reconnect = false; 3740 3745 spin_unlock(&GlobalMid_Lock); 3741 3746 } 3742 3747
+19 -9
fs/cifs/file.c
··· 488 488 pTcon = cifs_sb->tcon; 489 489 if (pSMBFile) { 490 490 struct cifsLockInfo *li, *tmp; 491 - 491 + write_lock(&GlobalSMBSeslock); 492 492 pSMBFile->closePend = true; 493 493 if (pTcon) { 494 494 /* no sense reconnecting to close a file that is 495 495 already closed */ 496 - if (pTcon->tidStatus != CifsNeedReconnect) { 496 + if (!pTcon->need_reconnect) { 497 + write_unlock(&GlobalSMBSeslock); 497 498 timeout = 2; 498 499 while ((atomic_read(&pSMBFile->wrtPending) != 0) 499 500 && (timeout <= 2048)) { ··· 511 510 timeout *= 4; 512 511 } 513 512 if (atomic_read(&pSMBFile->wrtPending)) 514 - cERROR(1, 515 - ("close with pending writes")); 516 - rc = CIFSSMBClose(xid, pTcon, 513 + cERROR(1, ("close with pending write")); 514 + if (!pTcon->need_reconnect && 515 + !pSMBFile->invalidHandle) 516 + rc = CIFSSMBClose(xid, pTcon, 517 517 pSMBFile->netfid); 518 - } 519 - } 518 + } else 519 + write_unlock(&GlobalSMBSeslock); 520 + } else 521 + write_unlock(&GlobalSMBSeslock); 520 522 521 523 /* Delete any outstanding lock records. 522 524 We'll lose them when the file is closed anyway. */ ··· 591 587 pTcon = cifs_sb->tcon; 592 588 593 589 cFYI(1, ("Freeing private data in close dir")); 590 + write_lock(&GlobalSMBSeslock); 594 591 if (!pCFileStruct->srch_inf.endOfSearch && 595 592 !pCFileStruct->invalidHandle) { 596 593 pCFileStruct->invalidHandle = true; 594 + write_unlock(&GlobalSMBSeslock); 597 595 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid); 598 596 cFYI(1, ("Closing uncompleted readdir with rc %d", 599 597 rc)); 600 598 /* not much we can do if it fails anyway, ignore rc */ 601 599 rc = 0; 602 - } 600 + } else 601 + write_unlock(&GlobalSMBSeslock); 603 602 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start; 604 603 if (ptmp) { 605 604 cFYI(1, ("closedir free smb buf in srch struct")); ··· 1411 1404 if ((wbc->nr_to_write -= n_iov) <= 0) 1412 1405 done = 1; 1413 1406 index = next; 1414 - } 1407 + } else 1408 + /* Need to re-find the pages we skipped */ 1409 + index = pvec.pages[0]->index + 1; 1410 + 1415 1411 pagevec_release(&pvec); 1416 1412 } 1417 1413 if (!scanned && !done) {
+44 -49
fs/cifs/misc.c
··· 75 75 76 76 ret_buf = kzalloc(sizeof(struct cifsSesInfo), GFP_KERNEL); 77 77 if (ret_buf) { 78 - write_lock(&GlobalSMBSeslock); 79 78 atomic_inc(&sesInfoAllocCount); 80 79 ret_buf->status = CifsNew; 81 - list_add(&ret_buf->cifsSessionList, &GlobalSMBSessionList); 80 + ++ret_buf->ses_count; 81 + INIT_LIST_HEAD(&ret_buf->smb_ses_list); 82 + INIT_LIST_HEAD(&ret_buf->tcon_list); 82 83 init_MUTEX(&ret_buf->sesSem); 83 - write_unlock(&GlobalSMBSeslock); 84 84 } 85 85 return ret_buf; 86 86 } ··· 93 93 return; 94 94 } 95 95 96 - write_lock(&GlobalSMBSeslock); 97 96 atomic_dec(&sesInfoAllocCount); 98 - list_del(&buf_to_free->cifsSessionList); 99 - write_unlock(&GlobalSMBSeslock); 100 97 kfree(buf_to_free->serverOS); 101 98 kfree(buf_to_free->serverDomain); 102 99 kfree(buf_to_free->serverNOS); ··· 108 111 struct cifsTconInfo *ret_buf; 109 112 ret_buf = kzalloc(sizeof(struct cifsTconInfo), GFP_KERNEL); 110 113 if (ret_buf) { 111 - write_lock(&GlobalSMBSeslock); 112 114 atomic_inc(&tconInfoAllocCount); 113 - list_add(&ret_buf->cifsConnectionList, 114 - &GlobalTreeConnectionList); 115 115 ret_buf->tidStatus = CifsNew; 116 + ++ret_buf->tc_count; 116 117 INIT_LIST_HEAD(&ret_buf->openFileList); 117 - init_MUTEX(&ret_buf->tconSem); 118 + INIT_LIST_HEAD(&ret_buf->tcon_list); 118 119 #ifdef CONFIG_CIFS_STATS 119 120 spin_lock_init(&ret_buf->stat_lock); 120 121 #endif 121 - write_unlock(&GlobalSMBSeslock); 122 122 } 123 123 return ret_buf; 124 124 } ··· 127 133 cFYI(1, ("Null buffer passed to tconInfoFree")); 128 134 return; 129 135 } 130 - write_lock(&GlobalSMBSeslock); 131 136 atomic_dec(&tconInfoAllocCount); 132 - list_del(&buf_to_free->cifsConnectionList); 133 - write_unlock(&GlobalSMBSeslock); 134 137 kfree(buf_to_free->nativeFileSystem); 135 138 kfree(buf_to_free); 136 139 } ··· 341 350 if (current->fsuid != treeCon->ses->linux_uid) { 342 351 cFYI(1, ("Multiuser mode and UID " 343 352 "did not match tcon uid")); 344 - read_lock(&GlobalSMBSeslock); 345 - list_for_each(temp_item, &GlobalSMBSessionList) { 346 - ses = list_entry(temp_item, struct cifsSesInfo, cifsSessionList); 353 + read_lock(&cifs_tcp_ses_lock); 354 + list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) { 355 + ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list); 347 356 if (ses->linux_uid == current->fsuid) { 348 357 if (ses->server == treeCon->ses->server) { 349 358 cFYI(1, ("found matching uid substitute right smb_uid")); ··· 355 364 } 356 365 } 357 366 } 358 - read_unlock(&GlobalSMBSeslock); 367 + read_unlock(&cifs_tcp_ses_lock); 359 368 } 360 369 } 361 370 } ··· 488 497 is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) 489 498 { 490 499 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf; 491 - struct list_head *tmp; 492 - struct list_head *tmp1; 500 + struct list_head *tmp, *tmp1, *tmp2; 501 + struct cifsSesInfo *ses; 493 502 struct cifsTconInfo *tcon; 503 + struct cifsInodeInfo *pCifsInode; 494 504 struct cifsFileInfo *netfile; 495 505 496 506 cFYI(1, ("Checking for oplock break or dnotify response")); ··· 546 554 return false; 547 555 548 556 /* look up tcon based on tid & uid */ 549 - read_lock(&GlobalSMBSeslock); 550 - list_for_each(tmp, &GlobalTreeConnectionList) { 551 - tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList); 552 - if ((tcon->tid == buf->Tid) && (srv == tcon->ses->server)) { 557 + read_lock(&cifs_tcp_ses_lock); 558 + list_for_each(tmp, &srv->smb_ses_list) { 559 + ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); 560 + list_for_each(tmp1, &ses->tcon_list) { 561 + tcon = list_entry(tmp1, struct cifsTconInfo, tcon_list); 562 + if (tcon->tid != buf->Tid) 563 + continue; 564 + 553 565 cifs_stats_inc(&tcon->num_oplock_brks); 554 - list_for_each(tmp1, &tcon->openFileList) { 555 - netfile = list_entry(tmp1, struct cifsFileInfo, 566 + write_lock(&GlobalSMBSeslock); 567 + list_for_each(tmp2, &tcon->openFileList) { 568 + netfile = list_entry(tmp2, struct cifsFileInfo, 556 569 tlist); 557 - if (pSMB->Fid == netfile->netfid) { 558 - struct cifsInodeInfo *pCifsInode; 559 - read_unlock(&GlobalSMBSeslock); 560 - cFYI(1, 561 - ("file id match, oplock break")); 562 - pCifsInode = 563 - CIFS_I(netfile->pInode); 564 - pCifsInode->clientCanCacheAll = false; 565 - if (pSMB->OplockLevel == 0) 566 - pCifsInode->clientCanCacheRead 567 - = false; 568 - pCifsInode->oplockPending = true; 569 - AllocOplockQEntry(netfile->pInode, 570 - netfile->netfid, 571 - tcon); 572 - cFYI(1, 573 - ("about to wake up oplock thread")); 574 - if (oplockThread) 575 - wake_up_process(oplockThread); 576 - return true; 577 - } 570 + if (pSMB->Fid != netfile->netfid) 571 + continue; 572 + 573 + write_unlock(&GlobalSMBSeslock); 574 + read_unlock(&cifs_tcp_ses_lock); 575 + cFYI(1, ("file id match, oplock break")); 576 + pCifsInode = CIFS_I(netfile->pInode); 577 + pCifsInode->clientCanCacheAll = false; 578 + if (pSMB->OplockLevel == 0) 579 + pCifsInode->clientCanCacheRead = false; 580 + pCifsInode->oplockPending = true; 581 + AllocOplockQEntry(netfile->pInode, 582 + netfile->netfid, tcon); 583 + cFYI(1, ("about to wake up oplock thread")); 584 + if (oplockThread) 585 + wake_up_process(oplockThread); 586 + 587 + return true; 578 588 } 579 - read_unlock(&GlobalSMBSeslock); 589 + write_unlock(&GlobalSMBSeslock); 590 + read_unlock(&cifs_tcp_ses_lock); 580 591 cFYI(1, ("No matching file for oplock break")); 581 592 return true; 582 593 } 583 594 } 584 - read_unlock(&GlobalSMBSeslock); 595 + read_unlock(&cifs_tcp_ses_lock); 585 596 cFYI(1, ("Can not process oplock break for non-existent connection")); 586 597 return true; 587 598 }
+4 -1
fs/cifs/readdir.c
··· 741 741 (index_to_find < first_entry_in_buffer)) { 742 742 /* close and restart search */ 743 743 cFYI(1, ("search backing up - close and restart search")); 744 + write_lock(&GlobalSMBSeslock); 744 745 if (!cifsFile->srch_inf.endOfSearch && 745 746 !cifsFile->invalidHandle) { 746 747 cifsFile->invalidHandle = true; 748 + write_unlock(&GlobalSMBSeslock); 747 749 CIFSFindClose(xid, pTcon, cifsFile->netfid); 748 - } 750 + } else 751 + write_unlock(&GlobalSMBSeslock); 749 752 if (cifsFile->srch_inf.ntwrk_buf_start) { 750 753 cFYI(1, ("freeing SMB ff cache buf on search rewind")); 751 754 if (cifsFile->srch_inf.smallBuf)
+14 -17
fs/ecryptfs/keystore.c
··· 1037 1037 decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok, 1038 1038 struct ecryptfs_crypt_stat *crypt_stat) 1039 1039 { 1040 - struct scatterlist dst_sg; 1041 - struct scatterlist src_sg; 1040 + struct scatterlist dst_sg[2]; 1041 + struct scatterlist src_sg[2]; 1042 1042 struct mutex *tfm_mutex; 1043 1043 struct blkcipher_desc desc = { 1044 1044 .flags = CRYPTO_TFM_REQ_MAY_SLEEP 1045 1045 }; 1046 1046 int rc = 0; 1047 - 1048 - sg_init_table(&dst_sg, 1); 1049 - sg_init_table(&src_sg, 1); 1050 1047 1051 1048 if (unlikely(ecryptfs_verbosity > 0)) { 1052 1049 ecryptfs_printk( ··· 1063 1066 } 1064 1067 rc = virt_to_scatterlist(auth_tok->session_key.encrypted_key, 1065 1068 auth_tok->session_key.encrypted_key_size, 1066 - &src_sg, 1); 1067 - if (rc != 1) { 1069 + src_sg, 2); 1070 + if (rc < 1 || rc > 2) { 1068 1071 printk(KERN_ERR "Internal error whilst attempting to convert " 1069 1072 "auth_tok->session_key.encrypted_key to scatterlist; " 1070 1073 "expected rc = 1; got rc = [%d]. " ··· 1076 1079 auth_tok->session_key.encrypted_key_size; 1077 1080 rc = virt_to_scatterlist(auth_tok->session_key.decrypted_key, 1078 1081 auth_tok->session_key.decrypted_key_size, 1079 - &dst_sg, 1); 1080 - if (rc != 1) { 1082 + dst_sg, 2); 1083 + if (rc < 1 || rc > 2) { 1081 1084 printk(KERN_ERR "Internal error whilst attempting to convert " 1082 1085 "auth_tok->session_key.decrypted_key to scatterlist; " 1083 1086 "expected rc = 1; got rc = [%d]\n", rc); ··· 1093 1096 rc = -EINVAL; 1094 1097 goto out; 1095 1098 } 1096 - rc = crypto_blkcipher_decrypt(&desc, &dst_sg, &src_sg, 1099 + rc = crypto_blkcipher_decrypt(&desc, dst_sg, src_sg, 1097 1100 auth_tok->session_key.encrypted_key_size); 1098 1101 mutex_unlock(tfm_mutex); 1099 1102 if (unlikely(rc)) { ··· 1536 1539 size_t i; 1537 1540 size_t encrypted_session_key_valid = 0; 1538 1541 char session_key_encryption_key[ECRYPTFS_MAX_KEY_BYTES]; 1539 - struct scatterlist dst_sg; 1540 - struct scatterlist src_sg; 1542 + struct scatterlist dst_sg[2]; 1543 + struct scatterlist src_sg[2]; 1541 1544 struct mutex *tfm_mutex = NULL; 1542 1545 u8 cipher_code; 1543 1546 size_t packet_size_length; ··· 1616 1619 ecryptfs_dump_hex(session_key_encryption_key, 16); 1617 1620 } 1618 1621 rc = virt_to_scatterlist(crypt_stat->key, key_rec->enc_key_size, 1619 - &src_sg, 1); 1620 - if (rc != 1) { 1622 + src_sg, 2); 1623 + if (rc < 1 || rc > 2) { 1621 1624 ecryptfs_printk(KERN_ERR, "Error generating scatterlist " 1622 1625 "for crypt_stat session key; expected rc = 1; " 1623 1626 "got rc = [%d]. key_rec->enc_key_size = [%d]\n", ··· 1626 1629 goto out; 1627 1630 } 1628 1631 rc = virt_to_scatterlist(key_rec->enc_key, key_rec->enc_key_size, 1629 - &dst_sg, 1); 1630 - if (rc != 1) { 1632 + dst_sg, 2); 1633 + if (rc < 1 || rc > 2) { 1631 1634 ecryptfs_printk(KERN_ERR, "Error generating scatterlist " 1632 1635 "for crypt_stat encrypted session key; " 1633 1636 "expected rc = 1; got rc = [%d]. " ··· 1648 1651 rc = 0; 1649 1652 ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes of the key\n", 1650 1653 crypt_stat->key_size); 1651 - rc = crypto_blkcipher_encrypt(&desc, &dst_sg, &src_sg, 1654 + rc = crypto_blkcipher_encrypt(&desc, dst_sg, src_sg, 1652 1655 (*key_rec).enc_key_size); 1653 1656 mutex_unlock(tfm_mutex); 1654 1657 if (rc) {
+1 -1
fs/hostfs/hostfs.h
··· 81 81 extern int do_mknod(const char *file, int mode, unsigned int major, 82 82 unsigned int minor); 83 83 extern int link_file(const char *from, const char *to); 84 - extern int do_readlink(char *file, char *buf, int size); 84 + extern int hostfs_do_readlink(char *file, char *buf, int size); 85 85 extern int rename_file(char *from, char *to); 86 86 extern int do_statfs(char *root, long *bsize_out, long long *blocks_out, 87 87 long long *bfree_out, long long *bavail_out,
+2 -2
fs/hostfs/hostfs_kern.c
··· 168 168 if (name == NULL) 169 169 goto out; 170 170 171 - n = do_readlink(link, name, len); 171 + n = hostfs_do_readlink(link, name, len); 172 172 if (n < len) 173 173 break; 174 174 len *= 2; ··· 943 943 name = inode_name(page->mapping->host, 0); 944 944 if (name == NULL) 945 945 return -ENOMEM; 946 - err = do_readlink(name, buffer, PAGE_CACHE_SIZE); 946 + err = hostfs_do_readlink(name, buffer, PAGE_CACHE_SIZE); 947 947 kfree(name); 948 948 if (err == PAGE_CACHE_SIZE) 949 949 err = -E2BIG;
+1 -1
fs/hostfs/hostfs_user.c
··· 377 377 return 0; 378 378 } 379 379 380 - int do_readlink(char *file, char *buf, int size) 380 + int hostfs_do_readlink(char *file, char *buf, int size) 381 381 { 382 382 int n; 383 383
+1 -1
fs/namei.c
··· 1378 1378 if (IS_APPEND(dir)) 1379 1379 return -EPERM; 1380 1380 if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)|| 1381 - IS_IMMUTABLE(victim->d_inode)) 1381 + IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) 1382 1382 return -EPERM; 1383 1383 if (isdir) { 1384 1384 if (!S_ISDIR(victim->d_inode->i_mode))
+17 -14
fs/partitions/check.c
··· 348 348 static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH, 349 349 whole_disk_show, NULL); 350 350 351 - int add_partition(struct gendisk *disk, int partno, 352 - sector_t start, sector_t len, int flags) 351 + struct hd_struct *add_partition(struct gendisk *disk, int partno, 352 + sector_t start, sector_t len, int flags) 353 353 { 354 354 struct hd_struct *p; 355 355 dev_t devt = MKDEV(0, 0); ··· 361 361 362 362 err = disk_expand_part_tbl(disk, partno); 363 363 if (err) 364 - return err; 364 + return ERR_PTR(err); 365 365 ptbl = disk->part_tbl; 366 366 367 367 if (ptbl->part[partno]) 368 - return -EBUSY; 368 + return ERR_PTR(-EBUSY); 369 369 370 370 p = kzalloc(sizeof(*p), GFP_KERNEL); 371 371 if (!p) 372 - return -ENOMEM; 372 + return ERR_PTR(-EBUSY); 373 373 374 374 if (!init_part_stats(p)) { 375 375 err = -ENOMEM; ··· 395 395 396 396 err = blk_alloc_devt(p, &devt); 397 397 if (err) 398 - goto out_free; 398 + goto out_free_stats; 399 399 pdev->devt = devt; 400 400 401 401 /* delay uevent until 'holders' subdir is created */ ··· 424 424 if (!ddev->uevent_suppress) 425 425 kobject_uevent(&pdev->kobj, KOBJ_ADD); 426 426 427 - return 0; 427 + return p; 428 428 429 + out_free_stats: 430 + free_part_stats(p); 429 431 out_free: 430 432 kfree(p); 431 - return err; 433 + return ERR_PTR(err); 432 434 out_del: 433 435 kobject_put(p->holder_dir); 434 436 device_del(pdev); 435 437 out_put: 436 438 put_device(pdev); 437 439 blk_free_devt(devt); 438 - return err; 440 + return ERR_PTR(err); 439 441 } 440 442 441 443 /* Not exported, helper to add_disk(). */ ··· 568 566 disk->disk_name, p, (unsigned long long) size); 569 567 size = get_capacity(disk) - from; 570 568 } 571 - res = add_partition(disk, p, from, size, state->parts[p].flags); 572 - if (res) { 573 - printk(KERN_ERR " %s: p%d could not be added: %d\n", 574 - disk->disk_name, p, -res); 569 + part = add_partition(disk, p, from, size, 570 + state->parts[p].flags); 571 + if (IS_ERR(part)) { 572 + printk(KERN_ERR " %s: p%d could not be added: %ld\n", 573 + disk->disk_name, p, -PTR_ERR(part)); 575 574 continue; 576 575 } 577 576 #ifdef CONFIG_BLK_DEV_MD 578 577 if (state->parts[p].flags & ADDPART_FLAG_RAID) 579 - md_autodetect_dev(bdev->bd_dev+p); 578 + md_autodetect_dev(part_to_dev(part)->devt); 580 579 #endif 581 580 } 582 581 kfree(state);
+1
fs/proc/proc_sysctl.c
··· 31 31 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 32 32 inode->i_flags |= S_PRIVATE; /* tell selinux to ignore this inode */ 33 33 inode->i_mode = table->mode; 34 + inode->i_uid = inode->i_gid = 0; 34 35 if (!table->child) { 35 36 inode->i_mode |= S_IFREG; 36 37 inode->i_op = &proc_sys_inode_operations;
-4
include/linux/cpuset.h
··· 74 74 return current->flags & PF_SPREAD_SLAB; 75 75 } 76 76 77 - extern void cpuset_track_online_nodes(void); 78 - 79 77 extern int current_cpuset_is_being_rebound(void); 80 78 81 79 extern void rebuild_sched_domains(void); ··· 148 150 { 149 151 return 0; 150 152 } 151 - 152 - static inline void cpuset_track_online_nodes(void) {} 153 153 154 154 static inline int current_cpuset_is_being_rebound(void) 155 155 {
+3 -1
include/linux/genhd.h
··· 522 522 523 523 extern int disk_expand_part_tbl(struct gendisk *disk, int target); 524 524 extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); 525 - extern int __must_check add_partition(struct gendisk *, int, sector_t, sector_t, int); 525 + extern struct hd_struct * __must_check add_partition(struct gendisk *disk, 526 + int partno, sector_t start, 527 + sector_t len, int flags); 526 528 extern void delete_partition(struct gendisk *, int); 527 529 extern void printk_all_partitions(void); 528 530
+2
include/linux/input.h
··· 238 238 #define KEY_KPEQUAL 117 239 239 #define KEY_KPPLUSMINUS 118 240 240 #define KEY_PAUSE 119 241 + #define KEY_SCALE 120 /* AL Compiz Scale (Expose) */ 241 242 242 243 #define KEY_KPCOMMA 121 243 244 #define KEY_HANGEUL 122 ··· 323 322 #define KEY_PAUSECD 201 324 323 #define KEY_PROG3 202 325 324 #define KEY_PROG4 203 325 + #define KEY_DASHBOARD 204 /* AL Dashboard */ 326 326 #define KEY_SUSPEND 205 327 327 #define KEY_CLOSE 206 /* AC Close */ 328 328 #define KEY_PLAY 207
+3 -2
include/linux/lockdep.h
··· 331 331 # define lock_set_subclass(l, s, i) do { } while (0) 332 332 # define lockdep_init() do { } while (0) 333 333 # define lockdep_info() do { } while (0) 334 - # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) 334 + # define lockdep_init_map(lock, name, key, sub) \ 335 + do { (void)(name); (void)(key); } while (0) 335 336 # define lockdep_set_class(lock, key) do { (void)(key); } while (0) 336 337 # define lockdep_set_class_and_name(lock, key, name) \ 337 - do { (void)(key); } while (0) 338 + do { (void)(key); (void)(name); } while (0) 338 339 #define lockdep_set_class_and_subclass(lock, key, sub) \ 339 340 do { (void)(key); } while (0) 340 341 #define lockdep_set_subclass(lock, sub) do { } while (0)
+2 -4
include/linux/net.h
··· 40 40 #define SYS_GETSOCKOPT 15 /* sys_getsockopt(2) */ 41 41 #define SYS_SENDMSG 16 /* sys_sendmsg(2) */ 42 42 #define SYS_RECVMSG 17 /* sys_recvmsg(2) */ 43 - #define SYS_PACCEPT 18 /* sys_paccept(2) */ 43 + #define SYS_ACCEPT4 18 /* sys_accept4(2) */ 44 44 45 45 typedef enum { 46 46 SS_FREE = 0, /* not allocated */ ··· 100 100 * remaining bits are used as flags. */ 101 101 #define SOCK_TYPE_MASK 0xf 102 102 103 - /* Flags for socket, socketpair, paccept */ 103 + /* Flags for socket, socketpair, accept4 */ 104 104 #define SOCK_CLOEXEC O_CLOEXEC 105 105 #ifndef SOCK_NONBLOCK 106 106 #define SOCK_NONBLOCK O_NONBLOCK ··· 223 223 extern struct socket *sockfd_lookup(int fd, int *err); 224 224 #define sockfd_put(sock) fput(sock->file) 225 225 extern int net_ratelimit(void); 226 - extern long do_accept(int fd, struct sockaddr __user *upeer_sockaddr, 227 - int __user *upeer_addrlen, int flags); 228 226 229 227 #define net_random() random32() 230 228 #define net_srandom(seed) srandom32((__force u32)seed)
+3
include/linux/ring_buffer.h
··· 120 120 u64 ring_buffer_time_stamp(int cpu); 121 121 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); 122 122 123 + void tracing_on(void); 124 + void tracing_off(void); 125 + 123 126 enum ring_buffer_flags { 124 127 RB_FL_OVERWRITE = 1 << 0, 125 128 };
+1 -2
include/linux/syscalls.h
··· 410 410 asmlinkage long sys_bind(int, struct sockaddr __user *, int); 411 411 asmlinkage long sys_connect(int, struct sockaddr __user *, int); 412 412 asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *); 413 - asmlinkage long sys_paccept(int, struct sockaddr __user *, int __user *, 414 - const __user sigset_t *, size_t, int); 413 + asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int); 415 414 asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *); 416 415 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); 417 416 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
-20
include/net/mac80211.h
··· 74 74 */ 75 75 76 76 /** 77 - * enum ieee80211_notification_type - Low level driver notification 78 - * @IEEE80211_NOTIFY_RE_ASSOC: start the re-association sequence 79 - */ 80 - enum ieee80211_notification_types { 81 - IEEE80211_NOTIFY_RE_ASSOC, 82 - }; 83 - 84 - /** 85 77 * struct ieee80211_ht_bss_info - describing BSS's HT characteristics 86 78 * 87 79 * This structure describes most essential parameters needed ··· 1788 1796 */ 1789 1797 void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, const u8 *ra, 1790 1798 u16 tid); 1791 - 1792 - /** 1793 - * ieee80211_notify_mac - low level driver notification 1794 - * @hw: pointer as obtained from ieee80211_alloc_hw(). 1795 - * @notif_type: enum ieee80211_notification_types 1796 - * 1797 - * This function must be called by low level driver to inform mac80211 of 1798 - * low level driver status change or force mac80211 to re-assoc for low 1799 - * level driver internal error that require re-assoc. 1800 - */ 1801 - void ieee80211_notify_mac(struct ieee80211_hw *hw, 1802 - enum ieee80211_notification_types notif_type); 1803 1799 1804 1800 /** 1805 1801 * ieee80211_find_sta - find a station
+1 -1
include/net/sock.h
··· 815 815 */ 816 816 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ 817 817 do { \ 818 - sk->sk_lock.owned = 0; \ 818 + sk->sk_lock.owned = 0; \ 819 819 init_waitqueue_head(&sk->sk_lock.wq); \ 820 820 spin_lock_init(&(sk)->sk_lock.slock); \ 821 821 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
+1 -1
init/Kconfig
··· 354 354 setting below. If enabled, it will also make it impossible to 355 355 schedule realtime tasks for non-root users until you allocate 356 356 realtime bandwidth for them. 357 - See Documentation/sched-rt-group.txt for more information. 357 + See Documentation/scheduler/sched-rt-group.txt for more information. 358 358 359 359 choice 360 360 depends on GROUP_SCHED
+9 -5
ipc/util.c
··· 266 266 if (ids->in_use >= size) 267 267 return -ENOSPC; 268 268 269 + spin_lock_init(&new->lock); 270 + new->deleted = 0; 271 + rcu_read_lock(); 272 + spin_lock(&new->lock); 273 + 269 274 err = idr_get_new(&ids->ipcs_idr, new, &id); 270 - if (err) 275 + if (err) { 276 + spin_unlock(&new->lock); 277 + rcu_read_unlock(); 271 278 return err; 279 + } 272 280 273 281 ids->in_use++; 274 282 ··· 288 280 ids->seq = 0; 289 281 290 282 new->id = ipc_buildid(id, new->seq); 291 - spin_lock_init(&new->lock); 292 - new->deleted = 0; 293 - rcu_read_lock(); 294 - spin_lock(&new->lock); 295 283 return id; 296 284 } 297 285
+1 -3
kernel/Makefile
··· 11 11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ 12 12 notifier.o ksysfs.o pm_qos_params.o sched_clock.o 13 13 14 - CFLAGS_REMOVE_sched.o = -mno-spe 15 - 16 14 ifdef CONFIG_FUNCTION_TRACER 17 15 # Do not trace debug files and internal ftrace files 18 16 CFLAGS_REMOVE_lockdep.o = -pg ··· 19 21 CFLAGS_REMOVE_rtmutex-debug.o = -pg 20 22 CFLAGS_REMOVE_cgroup-debug.o = -pg 21 23 CFLAGS_REMOVE_sched_clock.o = -pg 22 - CFLAGS_REMOVE_sched.o = -mno-spe -pg 24 + CFLAGS_REMOVE_sched.o = -pg 23 25 endif 24 26 25 27 obj-$(CONFIG_FREEZER) += freezer.o
+14 -7
kernel/cgroup.c
··· 2039 2039 struct cgroup *cgrp; 2040 2040 struct cgroup_iter it; 2041 2041 struct task_struct *tsk; 2042 + 2042 2043 /* 2043 - * Validate dentry by checking the superblock operations 2044 + * Validate dentry by checking the superblock operations, 2045 + * and make sure it's a directory. 2044 2046 */ 2045 - if (dentry->d_sb->s_op != &cgroup_ops) 2047 + if (dentry->d_sb->s_op != &cgroup_ops || 2048 + !S_ISDIR(dentry->d_inode->i_mode)) 2046 2049 goto err; 2047 2050 2048 2051 ret = 0; ··· 2475 2472 mutex_unlock(&cgroup_mutex); 2476 2473 return -EBUSY; 2477 2474 } 2478 - 2479 - parent = cgrp->parent; 2480 - root = cgrp->root; 2481 - sb = root->sb; 2475 + mutex_unlock(&cgroup_mutex); 2482 2476 2483 2477 /* 2484 2478 * Call pre_destroy handlers of subsys. Notify subsystems ··· 2483 2483 */ 2484 2484 cgroup_call_pre_destroy(cgrp); 2485 2485 2486 - if (cgroup_has_css_refs(cgrp)) { 2486 + mutex_lock(&cgroup_mutex); 2487 + parent = cgrp->parent; 2488 + root = cgrp->root; 2489 + sb = root->sb; 2490 + 2491 + if (atomic_read(&cgrp->count) 2492 + || !list_empty(&cgrp->children) 2493 + || cgroup_has_css_refs(cgrp)) { 2487 2494 mutex_unlock(&cgroup_mutex); 2488 2495 return -EBUSY; 2489 2496 }
+24 -7
kernel/cpuset.c
··· 36 36 #include <linux/list.h> 37 37 #include <linux/mempolicy.h> 38 38 #include <linux/mm.h> 39 + #include <linux/memory.h> 39 40 #include <linux/module.h> 40 41 #include <linux/mount.h> 41 42 #include <linux/namei.h> ··· 588 587 int ndoms; /* number of sched domains in result */ 589 588 int nslot; /* next empty doms[] cpumask_t slot */ 590 589 591 - ndoms = 0; 592 590 doms = NULL; 593 591 dattr = NULL; 594 592 csa = NULL; ··· 674 674 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. 675 675 */ 676 676 doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); 677 - if (!doms) { 678 - ndoms = 0; 677 + if (!doms) 679 678 goto done; 680 - } 681 679 682 680 /* 683 681 * The rest of the code, including the scheduler, can deal with ··· 729 731 730 732 done: 731 733 kfree(csa); 734 + 735 + /* 736 + * Fallback to the default domain if kmalloc() failed. 737 + * See comments in partition_sched_domains(). 738 + */ 739 + if (doms == NULL) 740 + ndoms = 1; 732 741 733 742 *domains = doms; 734 743 *attributes = dattr; ··· 2016 2011 * Call this routine anytime after node_states[N_HIGH_MEMORY] changes. 2017 2012 * See also the previous routine cpuset_track_online_cpus(). 2018 2013 */ 2019 - void cpuset_track_online_nodes(void) 2014 + static int cpuset_track_online_nodes(struct notifier_block *self, 2015 + unsigned long action, void *arg) 2020 2016 { 2021 2017 cgroup_lock(); 2022 - top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 2023 - scan_for_empty_cpusets(&top_cpuset); 2018 + switch (action) { 2019 + case MEM_ONLINE: 2020 + top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 2021 + break; 2022 + case MEM_OFFLINE: 2023 + top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 2024 + scan_for_empty_cpusets(&top_cpuset); 2025 + break; 2026 + default: 2027 + break; 2028 + } 2024 2029 cgroup_unlock(); 2030 + return NOTIFY_OK; 2025 2031 } 2026 2032 #endif 2027 2033 ··· 2048 2032 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 2049 2033 2050 2034 hotcpu_notifier(cpuset_track_online_cpus, 0); 2035 + hotplug_memory_notifier(cpuset_track_online_nodes, 10); 2051 2036 } 2052 2037 2053 2038 /**
+12 -5
kernel/kallsyms.c
··· 304 304 char *modname; 305 305 const char *name; 306 306 unsigned long offset, size; 307 - char namebuf[KSYM_NAME_LEN]; 307 + int len; 308 308 309 - name = kallsyms_lookup(address, &size, &offset, &modname, namebuf); 309 + name = kallsyms_lookup(address, &size, &offset, &modname, buffer); 310 310 if (!name) 311 311 return sprintf(buffer, "0x%lx", address); 312 312 313 + if (name != buffer) 314 + strcpy(buffer, name); 315 + len = strlen(buffer); 316 + buffer += len; 317 + 313 318 if (modname) 314 - return sprintf(buffer, "%s+%#lx/%#lx [%s]", name, offset, 315 - size, modname); 319 + len += sprintf(buffer, "+%#lx/%#lx [%s]", 320 + offset, size, modname); 316 321 else 317 - return sprintf(buffer, "%s+%#lx/%#lx", name, offset, size); 322 + len += sprintf(buffer, "+%#lx/%#lx", offset, size); 323 + 324 + return len; 318 325 } 319 326 320 327 /* Look up a kernel symbol and print it to the kernel messages. */
+5 -2
kernel/posix-cpu-timers.c
··· 1308 1308 */ 1309 1309 static inline int fastpath_timer_check(struct task_struct *tsk) 1310 1310 { 1311 - struct signal_struct *sig = tsk->signal; 1311 + struct signal_struct *sig; 1312 1312 1313 - if (unlikely(!sig)) 1313 + /* tsk == current, ensure it is safe to use ->signal/sighand */ 1314 + if (unlikely(tsk->exit_state)) 1314 1315 return 0; 1315 1316 1316 1317 if (!task_cputime_zero(&tsk->cputime_expires)) { ··· 1324 1323 if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) 1325 1324 return 1; 1326 1325 } 1326 + 1327 + sig = tsk->signal; 1327 1328 if (!task_cputime_zero(&sig->cputime_expires)) { 1328 1329 struct task_cputime group_sample; 1329 1330
+1 -1
kernel/power/main.c
··· 174 174 * has some performance issues. The stack dump of a WARN_ON 175 175 * is more likely to get the right attention than a printk... 176 176 */ 177 - WARN_ON(msec > (TEST_SUSPEND_SECONDS * 1000)); 177 + WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label); 178 178 } 179 179 180 180 #else
+1 -1
kernel/profile.c
··· 544 544 }; 545 545 546 546 #ifdef CONFIG_SMP 547 - static void __init profile_nop(void *unused) 547 + static inline void profile_nop(void *unused) 548 548 { 549 549 } 550 550
+4 -5
kernel/relay.c
··· 400 400 } 401 401 402 402 mutex_lock(&relay_channels_mutex); 403 - for_each_online_cpu(i) 403 + for_each_possible_cpu(i) 404 404 if (chan->buf[i]) 405 405 __relay_reset(chan->buf[i], 0); 406 406 mutex_unlock(&relay_channels_mutex); ··· 611 611 return chan; 612 612 613 613 free_bufs: 614 - for_each_online_cpu(i) { 615 - if (!chan->buf[i]) 616 - break; 617 - relay_close_buf(chan->buf[i]); 614 + for_each_possible_cpu(i) { 615 + if (chan->buf[i]) 616 + relay_close_buf(chan->buf[i]); 618 617 } 619 618 620 619 kref_put(&chan->kref, relay_destroy_channel);
+7 -6
kernel/sched.c
··· 7789 7789 * 7790 7790 * The passed in 'doms_new' should be kmalloc'd. This routine takes 7791 7791 * ownership of it and will kfree it when done with it. If the caller 7792 - * failed the kmalloc call, then it can pass in doms_new == NULL, 7793 - * and partition_sched_domains() will fallback to the single partition 7794 - * 'fallback_doms', it also forces the domains to be rebuilt. 7792 + * failed the kmalloc call, then it can pass in doms_new == NULL && 7793 + * ndoms_new == 1, and partition_sched_domains() will fallback to 7794 + * the single partition 'fallback_doms', it also forces the domains 7795 + * to be rebuilt. 7795 7796 * 7796 - * If doms_new==NULL it will be replaced with cpu_online_map. 7797 - * ndoms_new==0 is a special case for destroying existing domains. 7798 - * It will not create the default domain. 7797 + * If doms_new == NULL it will be replaced with cpu_online_map. 7798 + * ndoms_new == 0 is a special case for destroying existing domains, 7799 + * and it will not create the default domain. 7799 7800 * 7800 7801 * Call with hotplug lock held 7801 7802 */
+3 -2
kernel/sched_debug.c
··· 423 423 #undef __P 424 424 425 425 { 426 + unsigned int this_cpu = raw_smp_processor_id(); 426 427 u64 t0, t1; 427 428 428 - t0 = sched_clock(); 429 - t1 = sched_clock(); 429 + t0 = cpu_clock(this_cpu); 430 + t1 = cpu_clock(this_cpu); 430 431 SEQ_printf(m, "%-35s:%21Ld\n", 431 432 "clock-delta", (long long)(t1-t0)); 432 433 }
+11 -4
kernel/sched_stats.h
··· 298 298 { 299 299 struct signal_struct *sig; 300 300 301 - sig = tsk->signal; 302 - if (unlikely(!sig)) 301 + /* tsk == current, ensure it is safe to use ->signal */ 302 + if (unlikely(tsk->exit_state)) 303 303 return; 304 + 305 + sig = tsk->signal; 304 306 if (sig->cputime.totals) { 305 307 struct task_cputime *times; 306 308 ··· 327 325 { 328 326 struct signal_struct *sig; 329 327 330 - sig = tsk->signal; 331 - if (unlikely(!sig)) 328 + /* tsk == current, ensure it is safe to use ->signal */ 329 + if (unlikely(tsk->exit_state)) 332 330 return; 331 + 332 + sig = tsk->signal; 333 333 if (sig->cputime.totals) { 334 334 struct task_cputime *times; 335 335 ··· 357 353 struct signal_struct *sig; 358 354 359 355 sig = tsk->signal; 356 + /* see __exit_signal()->task_rq_unlock_wait() */ 357 + barrier(); 360 358 if (unlikely(!sig)) 361 359 return; 360 + 362 361 if (sig->cputime.totals) { 363 362 struct task_cputime *times; 364 363
+3 -2
kernel/stop_machine.c
··· 112 112 int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) 113 113 { 114 114 struct work_struct *sm_work; 115 - int i; 115 + int i, ret; 116 116 117 117 /* Set up initial state. */ 118 118 mutex_lock(&lock); ··· 137 137 /* This will release the thread on our CPU. */ 138 138 put_cpu(); 139 139 flush_workqueue(stop_machine_wq); 140 + ret = active.fnret; 140 141 mutex_unlock(&lock); 141 - return active.fnret; 142 + return ret; 142 143 } 143 144 144 145 int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
+1 -1
kernel/sys_ni.c
··· 31 31 cond_syscall(sys_bind); 32 32 cond_syscall(sys_listen); 33 33 cond_syscall(sys_accept); 34 - cond_syscall(sys_paccept); 34 + cond_syscall(sys_accept4); 35 35 cond_syscall(sys_connect); 36 36 cond_syscall(sys_getsockname); 37 37 cond_syscall(sys_getpeername);
+73 -78
kernel/trace/ftrace.c
··· 185 185 }; 186 186 187 187 static int ftrace_filtered; 188 - static int tracing_on; 189 188 190 189 static LIST_HEAD(ftrace_new_addrs); 191 190 ··· 326 327 327 328 static int 328 329 __ftrace_replace_code(struct dyn_ftrace *rec, 329 - unsigned char *old, unsigned char *new, int enable) 330 + unsigned char *nop, int enable) 330 331 { 331 332 unsigned long ip, fl; 333 + unsigned char *call, *old, *new; 332 334 333 335 ip = rec->ip; 334 336 335 - if (ftrace_filtered && enable) { 336 - /* 337 - * If filtering is on: 338 - * 339 - * If this record is set to be filtered and 340 - * is enabled then do nothing. 341 - * 342 - * If this record is set to be filtered and 343 - * it is not enabled, enable it. 344 - * 345 - * If this record is not set to be filtered 346 - * and it is not enabled do nothing. 347 - * 348 - * If this record is set not to trace then 349 - * do nothing. 350 - * 351 - * If this record is set not to trace and 352 - * it is enabled then disable it. 353 - * 354 - * If this record is not set to be filtered and 355 - * it is enabled, disable it. 356 - */ 357 - 358 - fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE | 359 - FTRACE_FL_ENABLED); 360 - 361 - if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) || 362 - (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) || 363 - !fl || (fl == FTRACE_FL_NOTRACE)) 337 + /* 338 + * If this record is not to be traced and 339 + * it is not enabled then do nothing. 340 + * 341 + * If this record is not to be traced and 342 + * it is enabled then disabled it. 343 + * 344 + */ 345 + if (rec->flags & FTRACE_FL_NOTRACE) { 346 + if (rec->flags & FTRACE_FL_ENABLED) 347 + rec->flags &= ~FTRACE_FL_ENABLED; 348 + else 364 349 return 0; 365 350 351 + } else if (ftrace_filtered && enable) { 366 352 /* 367 - * If it is enabled disable it, 368 - * otherwise enable it! 353 + * Filtering is on: 369 354 */ 370 - if (fl & FTRACE_FL_ENABLED) { 371 - /* swap new and old */ 372 - new = old; 373 - old = ftrace_call_replace(ip, FTRACE_ADDR); 355 + 356 + fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED); 357 + 358 + /* Record is filtered and enabled, do nothing */ 359 + if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) 360 + return 0; 361 + 362 + /* Record is not filtered and is not enabled do nothing */ 363 + if (!fl) 364 + return 0; 365 + 366 + /* Record is not filtered but enabled, disable it */ 367 + if (fl == FTRACE_FL_ENABLED) 374 368 rec->flags &= ~FTRACE_FL_ENABLED; 375 - } else { 376 - new = ftrace_call_replace(ip, FTRACE_ADDR); 369 + else 370 + /* Otherwise record is filtered but not enabled, enable it */ 377 371 rec->flags |= FTRACE_FL_ENABLED; 378 - } 379 372 } else { 373 + /* Disable or not filtered */ 380 374 381 375 if (enable) { 382 - /* 383 - * If this record is set not to trace and is 384 - * not enabled, do nothing. 385 - */ 386 - fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED); 387 - if (fl == FTRACE_FL_NOTRACE) 388 - return 0; 389 - 390 - new = ftrace_call_replace(ip, FTRACE_ADDR); 391 - } else 392 - old = ftrace_call_replace(ip, FTRACE_ADDR); 393 - 394 - if (enable) { 376 + /* if record is enabled, do nothing */ 395 377 if (rec->flags & FTRACE_FL_ENABLED) 396 378 return 0; 379 + 397 380 rec->flags |= FTRACE_FL_ENABLED; 381 + 398 382 } else { 383 + 384 + /* if record is not enabled do nothing */ 399 385 if (!(rec->flags & FTRACE_FL_ENABLED)) 400 386 return 0; 387 + 401 388 rec->flags &= ~FTRACE_FL_ENABLED; 402 389 } 390 + } 391 + 392 + call = ftrace_call_replace(ip, FTRACE_ADDR); 393 + 394 + if (rec->flags & FTRACE_FL_ENABLED) { 395 + old = nop; 396 + new = call; 397 + } else { 398 + old = call; 399 + new = nop; 403 400 } 404 401 405 402 return ftrace_modify_code(ip, old, new); ··· 404 409 static void ftrace_replace_code(int enable) 405 410 { 406 411 int i, failed; 407 - unsigned char *new = NULL, *old = NULL; 412 + unsigned char *nop = NULL; 408 413 struct dyn_ftrace *rec; 409 414 struct ftrace_page *pg; 410 415 411 - if (enable) 412 - old = ftrace_nop_replace(); 413 - else 414 - new = ftrace_nop_replace(); 416 + nop = ftrace_nop_replace(); 415 417 416 418 for (pg = ftrace_pages_start; pg; pg = pg->next) { 417 419 for (i = 0; i < pg->index; i++) { ··· 426 434 unfreeze_record(rec); 427 435 } 428 436 429 - failed = __ftrace_replace_code(rec, old, new, enable); 437 + failed = __ftrace_replace_code(rec, nop, enable); 430 438 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { 431 439 rec->flags |= FTRACE_FL_FAILED; 432 440 if ((system_state == SYSTEM_BOOTING) || ··· 498 506 { 499 507 int *command = data; 500 508 501 - if (*command & FTRACE_ENABLE_CALLS) { 509 + if (*command & FTRACE_ENABLE_CALLS) 502 510 ftrace_replace_code(1); 503 - tracing_on = 1; 504 - } else if (*command & FTRACE_DISABLE_CALLS) { 511 + else if (*command & FTRACE_DISABLE_CALLS) 505 512 ftrace_replace_code(0); 506 - tracing_on = 0; 507 - } 508 513 509 514 if (*command & FTRACE_UPDATE_TRACE_FUNC) 510 515 ftrace_update_ftrace_func(ftrace_trace_function); ··· 527 538 528 539 mutex_lock(&ftrace_start_lock); 529 540 ftrace_start++; 530 - if (ftrace_start == 1) 531 - command |= FTRACE_ENABLE_CALLS; 541 + command |= FTRACE_ENABLE_CALLS; 532 542 533 543 if (saved_ftrace_func != ftrace_trace_function) { 534 544 saved_ftrace_func = ftrace_trace_function; ··· 665 677 666 678 cnt = num_to_init / ENTRIES_PER_PAGE; 667 679 pr_info("ftrace: allocating %ld entries in %d pages\n", 668 - num_to_init, cnt); 680 + num_to_init, cnt + 1); 669 681 670 682 for (i = 0; i < cnt; i++) { 671 683 pg->next = (void *)get_zeroed_page(GFP_KERNEL); ··· 726 738 ((iter->flags & FTRACE_ITER_FAILURES) && 727 739 !(rec->flags & FTRACE_FL_FAILED)) || 728 740 741 + ((iter->flags & FTRACE_ITER_FILTER) && 742 + !(rec->flags & FTRACE_FL_FILTER)) || 743 + 729 744 ((iter->flags & FTRACE_ITER_NOTRACE) && 730 745 !(rec->flags & FTRACE_FL_NOTRACE))) { 731 746 rec = NULL; ··· 748 757 void *p = NULL; 749 758 loff_t l = -1; 750 759 751 - if (*pos != iter->pos) { 752 - for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) 753 - ; 754 - } else { 755 - l = *pos; 756 - p = t_next(m, p, &l); 757 - } 760 + if (*pos > iter->pos) 761 + *pos = iter->pos; 762 + 763 + l = *pos; 764 + p = t_next(m, p, &l); 758 765 759 766 return p; 760 767 } ··· 763 774 764 775 static int t_show(struct seq_file *m, void *v) 765 776 { 777 + struct ftrace_iterator *iter = m->private; 766 778 struct dyn_ftrace *rec = v; 767 779 char str[KSYM_SYMBOL_LEN]; 780 + int ret = 0; 768 781 769 782 if (!rec) 770 783 return 0; 771 784 772 785 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 773 786 774 - seq_printf(m, "%s\n", str); 787 + ret = seq_printf(m, "%s\n", str); 788 + if (ret < 0) { 789 + iter->pos--; 790 + iter->idx--; 791 + } 775 792 776 793 return 0; 777 794 } ··· 803 808 return -ENOMEM; 804 809 805 810 iter->pg = ftrace_pages_start; 806 - iter->pos = -1; 811 + iter->pos = 0; 807 812 808 813 ret = seq_open(file, &show_ftrace_seq_ops); 809 814 if (!ret) { ··· 890 895 891 896 if (file->f_mode & FMODE_READ) { 892 897 iter->pg = ftrace_pages_start; 893 - iter->pos = -1; 898 + iter->pos = 0; 894 899 iter->flags = enable ? FTRACE_ITER_FILTER : 895 900 FTRACE_ITER_NOTRACE; 896 901 ··· 1181 1186 1182 1187 mutex_lock(&ftrace_sysctl_lock); 1183 1188 mutex_lock(&ftrace_start_lock); 1184 - if (iter->filtered && ftrace_start && ftrace_enabled) 1189 + if (ftrace_start && ftrace_enabled) 1185 1190 ftrace_run_update_code(FTRACE_ENABLE_CALLS); 1186 1191 mutex_unlock(&ftrace_start_lock); 1187 1192 mutex_unlock(&ftrace_sysctl_lock);
+115 -1
kernel/trace/ring_buffer.c
··· 16 16 #include <linux/list.h> 17 17 #include <linux/fs.h> 18 18 19 + #include "trace.h" 20 + 21 + /* Global flag to disable all recording to ring buffers */ 22 + static int ring_buffers_off __read_mostly; 23 + 24 + /** 25 + * tracing_on - enable all tracing buffers 26 + * 27 + * This function enables all tracing buffers that may have been 28 + * disabled with tracing_off. 29 + */ 30 + void tracing_on(void) 31 + { 32 + ring_buffers_off = 0; 33 + } 34 + 35 + /** 36 + * tracing_off - turn off all tracing buffers 37 + * 38 + * This function stops all tracing buffers from recording data. 39 + * It does not disable any overhead the tracers themselves may 40 + * be causing. This function simply causes all recording to 41 + * the ring buffers to fail. 42 + */ 43 + void tracing_off(void) 44 + { 45 + ring_buffers_off = 1; 46 + } 47 + 19 48 /* Up this if you want to test the TIME_EXTENTS and normalization */ 20 49 #define DEBUG_SHIFT 0 21 50 22 51 /* FIXME!!! */ 23 52 u64 ring_buffer_time_stamp(int cpu) 24 53 { 54 + u64 time; 55 + 56 + preempt_disable_notrace(); 25 57 /* shift to debug/test normalization and TIME_EXTENTS */ 26 - return sched_clock() << DEBUG_SHIFT; 58 + time = sched_clock() << DEBUG_SHIFT; 59 + preempt_enable_notrace(); 60 + 61 + return time; 27 62 } 28 63 29 64 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) ··· 538 503 LIST_HEAD(pages); 539 504 int i, cpu; 540 505 506 + /* 507 + * Always succeed at resizing a non-existent buffer: 508 + */ 509 + if (!buffer) 510 + return size; 511 + 541 512 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 542 513 size *= BUF_PAGE_SIZE; 543 514 buffer_size = buffer->pages * BUF_PAGE_SIZE; ··· 617 576 list_del_init(&page->list); 618 577 free_buffer_page(page); 619 578 } 579 + mutex_unlock(&buffer->mutex); 620 580 return -ENOMEM; 621 581 } 622 582 ··· 1175 1133 struct ring_buffer_event *event; 1176 1134 int cpu, resched; 1177 1135 1136 + if (ring_buffers_off) 1137 + return NULL; 1138 + 1178 1139 if (atomic_read(&buffer->record_disabled)) 1179 1140 return NULL; 1180 1141 ··· 1293 1248 void *body; 1294 1249 int ret = -EBUSY; 1295 1250 int cpu, resched; 1251 + 1252 + if (ring_buffers_off) 1253 + return -EBUSY; 1296 1254 1297 1255 if (atomic_read(&buffer->record_disabled)) 1298 1256 return -EBUSY; ··· 2118 2070 return 0; 2119 2071 } 2120 2072 2073 + static ssize_t 2074 + rb_simple_read(struct file *filp, char __user *ubuf, 2075 + size_t cnt, loff_t *ppos) 2076 + { 2077 + int *p = filp->private_data; 2078 + char buf[64]; 2079 + int r; 2080 + 2081 + /* !ring_buffers_off == tracing_on */ 2082 + r = sprintf(buf, "%d\n", !*p); 2083 + 2084 + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2085 + } 2086 + 2087 + static ssize_t 2088 + rb_simple_write(struct file *filp, const char __user *ubuf, 2089 + size_t cnt, loff_t *ppos) 2090 + { 2091 + int *p = filp->private_data; 2092 + char buf[64]; 2093 + long val; 2094 + int ret; 2095 + 2096 + if (cnt >= sizeof(buf)) 2097 + return -EINVAL; 2098 + 2099 + if (copy_from_user(&buf, ubuf, cnt)) 2100 + return -EFAULT; 2101 + 2102 + buf[cnt] = 0; 2103 + 2104 + ret = strict_strtoul(buf, 10, &val); 2105 + if (ret < 0) 2106 + return ret; 2107 + 2108 + /* !ring_buffers_off == tracing_on */ 2109 + *p = !val; 2110 + 2111 + (*ppos)++; 2112 + 2113 + return cnt; 2114 + } 2115 + 2116 + static struct file_operations rb_simple_fops = { 2117 + .open = tracing_open_generic, 2118 + .read = rb_simple_read, 2119 + .write = rb_simple_write, 2120 + }; 2121 + 2122 + 2123 + static __init int rb_init_debugfs(void) 2124 + { 2125 + struct dentry *d_tracer; 2126 + struct dentry *entry; 2127 + 2128 + d_tracer = tracing_init_dentry(); 2129 + 2130 + entry = debugfs_create_file("tracing_on", 0644, d_tracer, 2131 + &ring_buffers_off, &rb_simple_fops); 2132 + if (!entry) 2133 + pr_warning("Could not create debugfs 'tracing_on' entry\n"); 2134 + 2135 + return 0; 2136 + } 2137 + 2138 + fs_initcall(rb_init_debugfs);
+1
kernel/trace/trace.c
··· 1936 1936 ring_buffer_read_finish(iter->buffer_iter[cpu]); 1937 1937 } 1938 1938 mutex_unlock(&trace_types_lock); 1939 + kfree(iter); 1939 1940 1940 1941 return ERR_PTR(-ENOMEM); 1941 1942 }
+1 -1
lib/scatterlist.c
··· 395 395 WARN_ON(!irqs_disabled()); 396 396 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); 397 397 } else 398 - kunmap(miter->addr); 398 + kunmap(miter->page); 399 399 400 400 miter->page = NULL; 401 401 miter->addr = NULL;
+7 -3
lib/swiotlb.c
··· 467 467 dma_addr_t dev_addr; 468 468 void *ret; 469 469 int order = get_order(size); 470 + u64 dma_mask = DMA_32BIT_MASK; 471 + 472 + if (hwdev && hwdev->coherent_dma_mask) 473 + dma_mask = hwdev->coherent_dma_mask; 470 474 471 475 ret = (void *)__get_free_pages(flags, order); 472 - if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { 476 + if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) { 473 477 /* 474 478 * The allocated memory isn't reachable by the device. 475 479 * Fall back on swiotlb_map_single(). ··· 497 493 dev_addr = virt_to_bus(ret); 498 494 499 495 /* Confirm address can be DMA'd by device */ 500 - if (address_needs_mapping(hwdev, dev_addr, size)) { 496 + if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { 501 497 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 502 - (unsigned long long)*hwdev->dma_mask, 498 + (unsigned long long)dma_mask, 503 499 (unsigned long long)dev_addr); 504 500 505 501 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
-3
mm/memory_hotplug.c
··· 22 22 #include <linux/highmem.h> 23 23 #include <linux/vmalloc.h> 24 24 #include <linux/ioport.h> 25 - #include <linux/cpuset.h> 26 25 #include <linux/delay.h> 27 26 #include <linux/migrate.h> 28 27 #include <linux/page-isolation.h> ··· 496 497 497 498 /* we online node here. we can't roll back from here. */ 498 499 node_set_online(nid); 499 - 500 - cpuset_track_online_nodes(); 501 500 502 501 if (new_pgdat) { 503 502 ret = register_one_node(nid);
+1 -4
mm/migrate.c
··· 522 522 remove_migration_ptes(page, page); 523 523 524 524 rc = mapping->a_ops->writepage(page, &wbc); 525 - if (rc < 0) 526 - /* I/O Error writing */ 527 - return -EIO; 528 525 529 526 if (rc != AOP_WRITEPAGE_ACTIVATE) 530 527 /* unlocked. Relock */ 531 528 lock_page(page); 532 529 533 - return -EAGAIN; 530 + return (rc < 0) ? -EIO : -EAGAIN; 534 531 } 535 532 536 533 /*
+1 -1
mm/mlock.c
··· 162 162 unsigned long addr = start; 163 163 struct page *pages[16]; /* 16 gives a reasonable batch */ 164 164 int nr_pages = (end - start) / PAGE_SIZE; 165 - int ret; 165 + int ret = 0; 166 166 int gup_flags = 0; 167 167 168 168 VM_BUG_ON(start & ~PAGE_MASK);
+16 -5
mm/vmalloc.c
··· 324 324 325 325 BUG_ON(size & ~PAGE_MASK); 326 326 327 - addr = ALIGN(vstart, align); 328 - 329 327 va = kmalloc_node(sizeof(struct vmap_area), 330 328 gfp_mask & GFP_RECLAIM_MASK, node); 331 329 if (unlikely(!va)) 332 330 return ERR_PTR(-ENOMEM); 333 331 334 332 retry: 333 + addr = ALIGN(vstart, align); 334 + 335 335 spin_lock(&vmap_area_lock); 336 336 /* XXX: could have a last_hole cache */ 337 337 n = vmap_area_root.rb_node; ··· 362 362 goto found; 363 363 } 364 364 365 - while (addr + size >= first->va_start && addr + size <= vend) { 365 + while (addr + size > first->va_start && addr + size <= vend) { 366 366 addr = ALIGN(first->va_end + PAGE_SIZE, align); 367 367 368 368 n = rb_next(&first->rb_node); ··· 522 522 } 523 523 524 524 /* 525 + * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 526 + * is already purging. 527 + */ 528 + static void try_purge_vmap_area_lazy(void) 529 + { 530 + unsigned long start = ULONG_MAX, end = 0; 531 + 532 + __purge_vmap_area_lazy(&start, &end, 0, 0); 533 + } 534 + 535 + /* 525 536 * Kick off a purge of the outstanding lazy areas. 526 537 */ 527 538 static void purge_vmap_area_lazy(void) 528 539 { 529 540 unsigned long start = ULONG_MAX, end = 0; 530 541 531 - __purge_vmap_area_lazy(&start, &end, 0, 0); 542 + __purge_vmap_area_lazy(&start, &end, 1, 0); 532 543 } 533 544 534 545 /* ··· 550 539 va->flags |= VM_LAZY_FREE; 551 540 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 552 541 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) 553 - purge_vmap_area_lazy(); 542 + try_purge_vmap_area_lazy(); 554 543 } 555 544 556 545 static struct vmap_area *find_vmap_area(unsigned long addr)
+6 -3
mm/vmscan.c
··· 623 623 * Try to allocate it some swap space here. 624 624 */ 625 625 if (PageAnon(page) && !PageSwapCache(page)) { 626 + if (!(sc->gfp_mask & __GFP_IO)) 627 + goto keep_locked; 626 628 switch (try_to_munlock(page)) { 627 629 case SWAP_FAIL: /* shouldn't happen */ 628 630 case SWAP_AGAIN: ··· 636 634 } 637 635 if (!add_to_swap(page, GFP_ATOMIC)) 638 636 goto activate_locked; 637 + may_enter_fs = 1; 639 638 } 640 639 #endif /* CONFIG_SWAP */ 641 640 ··· 1389 1386 file_prio = 200 - sc->swappiness; 1390 1387 1391 1388 /* 1392 - * anon recent_rotated[0] 1393 - * %anon = 100 * ----------- / ----------------- * IO cost 1394 - * anon + file rotate_sum 1389 + * The amount of pressure on anon vs file pages is inversely 1390 + * proportional to the fraction of recently scanned pages on 1391 + * each list that were recently referenced and in active use. 1395 1392 */ 1396 1393 ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); 1397 1394 ap /= zone->recent_rotated[0] + 1;
+5 -45
net/compat.c
··· 725 725 static unsigned char nas[19]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), 726 726 AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), 727 727 AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), 728 - AL(6)}; 728 + AL(4)}; 729 729 #undef AL 730 730 731 731 asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) ··· 738 738 return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 739 739 } 740 740 741 - asmlinkage long compat_sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr, 742 - int __user *upeer_addrlen, 743 - const compat_sigset_t __user *sigmask, 744 - compat_size_t sigsetsize, int flags) 745 - { 746 - compat_sigset_t ss32; 747 - sigset_t ksigmask, sigsaved; 748 - int ret; 749 - 750 - if (sigmask) { 751 - if (sigsetsize != sizeof(compat_sigset_t)) 752 - return -EINVAL; 753 - if (copy_from_user(&ss32, sigmask, sizeof(ss32))) 754 - return -EFAULT; 755 - sigset_from_compat(&ksigmask, &ss32); 756 - 757 - sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 758 - sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 759 - } 760 - 761 - ret = do_accept(fd, upeer_sockaddr, upeer_addrlen, flags); 762 - 763 - if (ret == -ERESTARTNOHAND) { 764 - /* 765 - * Don't restore the signal mask yet. Let do_signal() deliver 766 - * the signal on the way back to userspace, before the signal 767 - * mask is restored. 768 - */ 769 - if (sigmask) { 770 - memcpy(&current->saved_sigmask, &sigsaved, 771 - sizeof(sigsaved)); 772 - set_restore_sigmask(); 773 - } 774 - } else if (sigmask) 775 - sigprocmask(SIG_SETMASK, &sigsaved, NULL); 776 - 777 - return ret; 778 - } 779 - 780 741 asmlinkage long compat_sys_socketcall(int call, u32 __user *args) 781 742 { 782 743 int ret; 783 744 u32 a[6]; 784 745 u32 a0, a1; 785 746 786 - if (call < SYS_SOCKET || call > SYS_PACCEPT) 747 + if (call < SYS_SOCKET || call > SYS_ACCEPT4) 787 748 return -EINVAL; 788 749 if (copy_from_user(a, args, nas[call])) 789 750 return -EFAULT; ··· 765 804 ret = sys_listen(a0, a1); 766 805 break; 767 806 case SYS_ACCEPT: 768 - ret = do_accept(a0, compat_ptr(a1), compat_ptr(a[2]), 0); 807 + ret = sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), 0); 769 808 break; 770 809 case SYS_GETSOCKNAME: 771 810 ret = sys_getsockname(a0, compat_ptr(a1), compat_ptr(a[2])); ··· 805 844 case SYS_RECVMSG: 806 845 ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]); 807 846 break; 808 - case SYS_PACCEPT: 809 - ret = compat_sys_paccept(a0, compat_ptr(a1), compat_ptr(a[2]), 810 - compat_ptr(a[3]), a[4], a[5]); 847 + case SYS_ACCEPT4: 848 + ret = sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), a[3]); 811 849 break; 812 850 default: 813 851 ret = -EINVAL;
+2 -7
net/core/pktgen.c
··· 1973 1973 1974 1974 /* make sure that we don't pick a non-existing transmit queue */ 1975 1975 ntxq = pkt_dev->odev->real_num_tx_queues; 1976 - if (ntxq > num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) { 1977 - printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU " 1978 - "disabled because CPU count (%d) exceeds number " 1979 - "of tx queues (%d) on %s\n", num_online_cpus(), ntxq, 1980 - pkt_dev->odev->name); 1981 - pkt_dev->flags &= ~F_QUEUE_MAP_CPU; 1982 - } 1976 + 1983 1977 if (ntxq <= pkt_dev->queue_map_min) { 1984 1978 printk(KERN_WARNING "pktgen: WARNING: Requested " 1985 1979 "queue_map_min (zero-based) (%d) exceeds valid range " ··· 2196 2202 } 2197 2203 pkt_dev->cur_queue_map = t; 2198 2204 } 2205 + pkt_dev->cur_queue_map = pkt_dev->cur_queue_map % pkt_dev->odev->real_num_tx_queues; 2199 2206 } 2200 2207 2201 2208 /* Increment/randomize headers according to flags and current values
+3 -1
net/core/rtnetlink.c
··· 878 878 if (ifm->ifi_change) 879 879 flags = (flags & ifm->ifi_change) | 880 880 (dev->flags & ~ifm->ifi_change); 881 - dev_change_flags(dev, flags); 881 + err = dev_change_flags(dev, flags); 882 + if (err < 0) 883 + goto errout; 882 884 } 883 885 884 886 if (tb[IFLA_TXQLEN])
-2
net/core/scm.c
··· 75 75 if (!fpl) 76 76 return -ENOMEM; 77 77 *fplp = fpl; 78 - INIT_LIST_HEAD(&fpl->list); 79 78 fpl->count = 0; 80 79 } 81 80 fpp = &fpl->fp[fpl->count]; ··· 300 301 301 302 new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL); 302 303 if (new_fpl) { 303 - INIT_LIST_HEAD(&new_fpl->list); 304 304 for (i=fpl->count-1; i>=0; i--) 305 305 get_file(fpl->fp[i]); 306 306 memcpy(new_fpl, fpl, sizeof(*fpl));
-2
net/core/sock.c
··· 136 136 static struct lock_class_key af_family_keys[AF_MAX]; 137 137 static struct lock_class_key af_family_slock_keys[AF_MAX]; 138 138 139 - #ifdef CONFIG_DEBUG_LOCK_ALLOC 140 139 /* 141 140 * Make lock validator output more readable. (we pre-construct these 142 141 * strings build-time, so that runtime initialization of socket ··· 186 187 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 187 188 "clock-AF_MAX" 188 189 }; 189 - #endif 190 190 191 191 /* 192 192 * sk_callback_lock locking rules are per-address-family,
+1
net/ipv4/af_inet.c
··· 1117 1117 }, 1118 1118 }, 1119 1119 .proto = sk->sk_protocol, 1120 + .flags = inet_sk_flowi_flags(sk), 1120 1121 .uli_u = { 1121 1122 .ports = { 1122 1123 .sport = inet->sport,
+9 -1
net/ipv4/ip_input.c
··· 209 209 210 210 hash = protocol & (MAX_INET_PROTOS - 1); 211 211 ipprot = rcu_dereference(inet_protos[hash]); 212 - if (ipprot != NULL && (net == &init_net || ipprot->netns_ok)) { 212 + if (ipprot != NULL) { 213 213 int ret; 214 + 215 + if (!net_eq(net, &init_net) && !ipprot->netns_ok) { 216 + if (net_ratelimit()) 217 + printk("%s: proto %d isn't netns-ready\n", 218 + __func__, protocol); 219 + kfree_skb(skb); 220 + goto out; 221 + } 214 222 215 223 if (!ipprot->no_policy) { 216 224 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+5 -4
net/ipv4/ipmr.c
··· 1945 1945 goto proc_cache_fail; 1946 1946 #endif 1947 1947 return 0; 1948 - reg_notif_fail: 1949 - kmem_cache_destroy(mrt_cachep); 1950 1948 #ifdef CONFIG_PROC_FS 1951 - proc_vif_fail: 1952 - unregister_netdevice_notifier(&ip_mr_notifier); 1953 1949 proc_cache_fail: 1954 1950 proc_net_remove(&init_net, "ip_mr_vif"); 1951 + proc_vif_fail: 1952 + unregister_netdevice_notifier(&ip_mr_notifier); 1955 1953 #endif 1954 + reg_notif_fail: 1955 + del_timer(&ipmr_expire_timer); 1956 + kmem_cache_destroy(mrt_cachep); 1956 1957 return err; 1957 1958 }
+1
net/ipv4/udp.c
··· 633 633 .saddr = saddr, 634 634 .tos = tos } }, 635 635 .proto = sk->sk_protocol, 636 + .flags = inet_sk_flowi_flags(sk), 636 637 .uli_u = { .ports = 637 638 { .sport = inet->sport, 638 639 .dport = dport } } };
+5
net/ipv6/datagram.c
··· 661 661 switch (rthdr->type) { 662 662 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 663 663 case IPV6_SRCRT_TYPE_2: 664 + if (rthdr->hdrlen != 2 || 665 + rthdr->segments_left != 1) { 666 + err = -EINVAL; 667 + goto exit_f; 668 + } 664 669 break; 665 670 #endif 666 671 default:
+2 -2
net/ipv6/ip6mr.c
··· 224 224 .open = ip6mr_vif_open, 225 225 .read = seq_read, 226 226 .llseek = seq_lseek, 227 - .release = seq_release, 227 + .release = seq_release_private, 228 228 }; 229 229 230 230 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) ··· 338 338 .open = ipmr_mfc_open, 339 339 .read = seq_read, 340 340 .llseek = seq_lseek, 341 - .release = seq_release, 341 + .release = seq_release_private, 342 342 }; 343 343 #endif 344 344
+5
net/ipv6/ipv6_sockglue.c
··· 366 366 } 367 367 368 368 /* routing header option needs extra check */ 369 + retv = -EINVAL; 369 370 if (optname == IPV6_RTHDR && opt && opt->srcrt) { 370 371 struct ipv6_rt_hdr *rthdr = opt->srcrt; 371 372 switch (rthdr->type) { 372 373 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 373 374 case IPV6_SRCRT_TYPE_2: 375 + if (rthdr->hdrlen != 2 || 376 + rthdr->segments_left != 1) 377 + goto sticky_done; 378 + 374 379 break; 375 380 #endif 376 381 default:
+3 -3
net/ipv6/proc.c
··· 132 132 133 133 static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib) 134 134 { 135 - static char name[32]; 135 + char name[32]; 136 136 int i; 137 137 138 138 /* print by name -- deprecated items */ ··· 144 144 p = icmp6type2name[icmptype]; 145 145 if (!p) /* don't print un-named types here */ 146 146 continue; 147 - (void) snprintf(name, sizeof(name)-1, "Icmp6%s%s", 147 + snprintf(name, sizeof(name), "Icmp6%s%s", 148 148 i & 0x100 ? "Out" : "In", p); 149 149 seq_printf(seq, "%-32s\t%lu\n", name, 150 150 snmp_fold_field(mib, i)); ··· 157 157 val = snmp_fold_field(mib, i); 158 158 if (!val) 159 159 continue; 160 - (void) snprintf(name, sizeof(name)-1, "Icmp6%sType%u", 160 + snprintf(name, sizeof(name), "Icmp6%sType%u", 161 161 i & 0x100 ? "Out" : "In", i & 0xff); 162 162 seq_printf(seq, "%-32s\t%lu\n", name, val); 163 163 }
-22
net/mac80211/mlme.c
··· 2560 2560 ieee80211_restart_sta_timer(sdata); 2561 2561 rcu_read_unlock(); 2562 2562 } 2563 - 2564 - /* driver notification call */ 2565 - void ieee80211_notify_mac(struct ieee80211_hw *hw, 2566 - enum ieee80211_notification_types notif_type) 2567 - { 2568 - struct ieee80211_local *local = hw_to_local(hw); 2569 - struct ieee80211_sub_if_data *sdata; 2570 - 2571 - switch (notif_type) { 2572 - case IEEE80211_NOTIFY_RE_ASSOC: 2573 - rtnl_lock(); 2574 - list_for_each_entry(sdata, &local->interfaces, list) { 2575 - if (sdata->vif.type != NL80211_IFTYPE_STATION) 2576 - continue; 2577 - 2578 - ieee80211_sta_req_auth(sdata, &sdata->u.sta); 2579 - } 2580 - rtnl_unlock(); 2581 - break; 2582 - } 2583 - } 2584 - EXPORT_SYMBOL(ieee80211_notify_mac);
+26 -30
net/phonet/af_phonet.c
··· 33 33 #include <net/phonet/phonet.h> 34 34 #include <net/phonet/pn_dev.h> 35 35 36 - static struct net_proto_family phonet_proto_family; 37 - static struct phonet_protocol *phonet_proto_get(int protocol); 38 - static inline void phonet_proto_put(struct phonet_protocol *pp); 36 + /* Transport protocol registration */ 37 + static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; 38 + static DEFINE_SPINLOCK(proto_tab_lock); 39 + 40 + static struct phonet_protocol *phonet_proto_get(int protocol) 41 + { 42 + struct phonet_protocol *pp; 43 + 44 + if (protocol >= PHONET_NPROTO) 45 + return NULL; 46 + 47 + spin_lock(&proto_tab_lock); 48 + pp = proto_tab[protocol]; 49 + if (pp && !try_module_get(pp->prot->owner)) 50 + pp = NULL; 51 + spin_unlock(&proto_tab_lock); 52 + 53 + return pp; 54 + } 55 + 56 + static inline void phonet_proto_put(struct phonet_protocol *pp) 57 + { 58 + module_put(pp->prot->owner); 59 + } 39 60 40 61 /* protocol family functions */ 41 62 ··· 165 144 struct phonethdr *ph; 166 145 int err; 167 146 168 - if (skb->len + 2 > 0xffff) { 169 - /* Phonet length field would overflow */ 147 + if (skb->len + 2 > 0xffff /* Phonet length field limit */ || 148 + skb->len + sizeof(struct phonethdr) > dev->mtu) { 170 149 err = -EMSGSIZE; 171 150 goto drop; 172 151 } ··· 396 375 .func = phonet_rcv, 397 376 }; 398 377 399 - /* Transport protocol registration */ 400 - static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; 401 - static DEFINE_SPINLOCK(proto_tab_lock); 402 - 403 378 int __init_or_module phonet_proto_register(int protocol, 404 379 struct phonet_protocol *pp) 405 380 { ··· 428 411 proto_unregister(pp->prot); 429 412 } 430 413 EXPORT_SYMBOL(phonet_proto_unregister); 431 - 432 - static struct phonet_protocol *phonet_proto_get(int protocol) 433 - { 434 - struct phonet_protocol *pp; 435 - 436 - if (protocol >= PHONET_NPROTO) 437 - return NULL; 438 - 439 - spin_lock(&proto_tab_lock); 440 - pp = proto_tab[protocol]; 441 - if (pp && !try_module_get(pp->prot->owner)) 442 - pp = NULL; 443 - spin_unlock(&proto_tab_lock); 444 - 445 - return pp; 446 - } 447 - 448 - static inline void phonet_proto_put(struct phonet_protocol *pp) 449 - { 450 - module_put(pp->prot->owner); 451 - } 452 414 453 415 /* Module registration */ 454 416 static int __init phonet_init(void)
+2
net/sched/sch_api.c
··· 417 417 struct nlattr *nest; 418 418 419 419 nest = nla_nest_start(skb, TCA_STAB); 420 + if (nest == NULL) 421 + goto nla_put_failure; 420 422 NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts); 421 423 nla_nest_end(skb, nest); 422 424
+6 -1
net/sched/sch_generic.c
··· 270 270 void netif_carrier_on(struct net_device *dev) 271 271 { 272 272 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 273 + if (dev->reg_state == NETREG_UNINITIALIZED) 274 + return; 273 275 linkwatch_fire_event(dev); 274 276 if (netif_running(dev)) 275 277 __netdev_watchdog_up(dev); ··· 287 285 */ 288 286 void netif_carrier_off(struct net_device *dev) 289 287 { 290 - if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) 288 + if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 289 + if (dev->reg_state == NETREG_UNINITIALIZED) 290 + return; 291 291 linkwatch_fire_event(dev); 292 + } 292 293 } 293 294 EXPORT_SYMBOL(netif_carrier_off); 294 295
+10 -70
net/socket.c
··· 1426 1426 * clean when we restucture accept also. 1427 1427 */ 1428 1428 1429 - long do_accept(int fd, struct sockaddr __user *upeer_sockaddr, 1430 - int __user *upeer_addrlen, int flags) 1429 + asmlinkage long sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, 1430 + int __user *upeer_addrlen, int flags) 1431 1431 { 1432 1432 struct socket *sock, *newsock; 1433 1433 struct file *newfile; ··· 1510 1510 goto out_put; 1511 1511 } 1512 1512 1513 - #if 0 1514 - #ifdef HAVE_SET_RESTORE_SIGMASK 1515 - asmlinkage long sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr, 1516 - int __user *upeer_addrlen, 1517 - const sigset_t __user *sigmask, 1518 - size_t sigsetsize, int flags) 1519 - { 1520 - sigset_t ksigmask, sigsaved; 1521 - int ret; 1522 - 1523 - if (sigmask) { 1524 - /* XXX: Don't preclude handling different sized sigset_t's. */ 1525 - if (sigsetsize != sizeof(sigset_t)) 1526 - return -EINVAL; 1527 - if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) 1528 - return -EFAULT; 1529 - 1530 - sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1531 - sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 1532 - } 1533 - 1534 - ret = do_accept(fd, upeer_sockaddr, upeer_addrlen, flags); 1535 - 1536 - if (ret < 0 && signal_pending(current)) { 1537 - /* 1538 - * Don't restore the signal mask yet. Let do_signal() deliver 1539 - * the signal on the way back to userspace, before the signal 1540 - * mask is restored. 1541 - */ 1542 - if (sigmask) { 1543 - memcpy(&current->saved_sigmask, &sigsaved, 1544 - sizeof(sigsaved)); 1545 - set_restore_sigmask(); 1546 - } 1547 - } else if (sigmask) 1548 - sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1549 - 1550 - return ret; 1551 - } 1552 - #else 1553 - asmlinkage long sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr, 1554 - int __user *upeer_addrlen, 1555 - const sigset_t __user *sigmask, 1556 - size_t sigsetsize, int flags) 1557 - { 1558 - /* The platform does not support restoring the signal mask in the 1559 - * return path. So we do not allow using paccept() with a signal 1560 - * mask. */ 1561 - if (sigmask) 1562 - return -EINVAL; 1563 - 1564 - return do_accept(fd, upeer_sockaddr, upeer_addrlen, flags); 1565 - } 1566 - #endif 1567 - #endif 1568 - 1569 1513 asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, 1570 1514 int __user *upeer_addrlen) 1571 1515 { 1572 - return do_accept(fd, upeer_sockaddr, upeer_addrlen, 0); 1516 + return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); 1573 1517 } 1574 1518 1575 1519 /* ··· 2040 2096 AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), 2041 2097 AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), 2042 2098 AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), 2043 - AL(6) 2099 + AL(4) 2044 2100 }; 2045 2101 2046 2102 #undef AL ··· 2059 2115 unsigned long a0, a1; 2060 2116 int err; 2061 2117 2062 - if (call < 1 || call > SYS_PACCEPT) 2118 + if (call < 1 || call > SYS_ACCEPT4) 2063 2119 return -EINVAL; 2064 2120 2065 2121 /* copy_from_user should be SMP safe. */ ··· 2087 2143 err = sys_listen(a0, a1); 2088 2144 break; 2089 2145 case SYS_ACCEPT: 2090 - err = 2091 - do_accept(a0, (struct sockaddr __user *)a1, 2092 - (int __user *)a[2], 0); 2146 + err = sys_accept4(a0, (struct sockaddr __user *)a1, 2147 + (int __user *)a[2], 0); 2093 2148 break; 2094 2149 case SYS_GETSOCKNAME: 2095 2150 err = ··· 2135 2192 case SYS_RECVMSG: 2136 2193 err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); 2137 2194 break; 2138 - case SYS_PACCEPT: 2139 - err = 2140 - sys_paccept(a0, (struct sockaddr __user *)a1, 2141 - (int __user *)a[2], 2142 - (const sigset_t __user *) a[3], 2143 - a[4], a[5]); 2195 + case SYS_ACCEPT4: 2196 + err = sys_accept4(a0, (struct sockaddr __user *)a1, 2197 + (int __user *)a[2], a[3]); 2144 2198 break; 2145 2199 default: 2146 2200 err = -EINVAL;
+18 -2
net/sunrpc/auth_generic.c
··· 133 133 generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags) 134 134 { 135 135 struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base); 136 + int i; 136 137 137 138 if (gcred->acred.uid != acred->uid || 138 139 gcred->acred.gid != acred->gid || 139 - gcred->acred.group_info != acred->group_info || 140 140 gcred->acred.machine_cred != acred->machine_cred) 141 - return 0; 141 + goto out_nomatch; 142 + 143 + /* Optimisation in the case where pointers are identical... */ 144 + if (gcred->acred.group_info == acred->group_info) 145 + goto out_match; 146 + 147 + /* Slow path... */ 148 + if (gcred->acred.group_info->ngroups != acred->group_info->ngroups) 149 + goto out_nomatch; 150 + for (i = 0; i < gcred->acred.group_info->ngroups; i++) { 151 + if (GROUP_AT(gcred->acred.group_info, i) != 152 + GROUP_AT(acred->group_info, i)) 153 + goto out_nomatch; 154 + } 155 + out_match: 142 156 return 1; 157 + out_nomatch: 158 + return 0; 143 159 } 144 160 145 161 void __init rpc_init_generic_auth(void)
+8
sound/pci/hda/hda_beep.c
··· 37 37 container_of(work, struct hda_beep, beep_work); 38 38 struct hda_codec *codec = beep->codec; 39 39 40 + if (!beep->enabled) 41 + return; 42 + 40 43 /* generate tone */ 41 44 snd_hda_codec_write_cache(codec, beep->nid, 0, 42 45 AC_VERB_SET_BEEP_CONTROL, beep->tone); ··· 88 85 snprintf(beep->phys, sizeof(beep->phys), 89 86 "card%d/codec#%d/beep0", codec->bus->card->number, codec->addr); 90 87 input_dev = input_allocate_device(); 88 + if (!input_dev) { 89 + kfree(beep); 90 + return -ENOMEM; 91 + } 91 92 92 93 /* setup digital beep device */ 93 94 input_dev->name = "HDA Digital PCBeep"; ··· 122 115 beep->nid = nid; 123 116 beep->dev = input_dev; 124 117 beep->codec = codec; 118 + beep->enabled = 1; 125 119 codec->beep = beep; 126 120 127 121 INIT_WORK(&beep->beep_work, &snd_hda_generate_beep);
+1
sound/pci/hda/hda_beep.h
··· 31 31 char phys[32]; 32 32 int tone; 33 33 int nid; 34 + int enabled; 34 35 struct work_struct beep_work; /* scheduled task for beep event */ 35 36 }; 36 37
+81 -18
sound/pci/hda/patch_sigmatel.c
··· 36 36 #include "hda_beep.h" 37 37 38 38 #define NUM_CONTROL_ALLOC 32 39 + 40 + #define STAC_VREF_EVENT 0x00 41 + #define STAC_INSERT_EVENT 0x10 39 42 #define STAC_PWR_EVENT 0x20 40 43 #define STAC_HP_EVENT 0x30 41 - #define STAC_VREF_EVENT 0x40 42 44 43 45 enum { 44 46 STAC_REF, ··· 1688 1686 /* SigmaTel reference board */ 1689 1687 SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, 1690 1688 "DFI LanParty", STAC_92HD71BXX_REF), 1689 + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30f2, 1690 + "HP dv5", STAC_HP_M4), 1691 + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30f4, 1692 + "HP dv7", STAC_HP_M4), 1691 1693 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x361a, 1692 1694 "unknown HP", STAC_HP_M4), 1693 1695 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233, ··· 2593 2587 }; 2594 2588 2595 2589 /* add dynamic controls */ 2596 - static int stac92xx_add_control_idx(struct sigmatel_spec *spec, int type, 2597 - int idx, const char *name, unsigned long val) 2590 + static int stac92xx_add_control_temp(struct sigmatel_spec *spec, 2591 + struct snd_kcontrol_new *ktemp, 2592 + int idx, const char *name, 2593 + unsigned long val) 2598 2594 { 2599 2595 struct snd_kcontrol_new *knew; 2600 2596 ··· 2615 2607 } 2616 2608 2617 2609 knew = &spec->kctl_alloc[spec->num_kctl_used]; 2618 - *knew = stac92xx_control_templates[type]; 2610 + *knew = *ktemp; 2619 2611 knew->index = idx; 2620 2612 knew->name = kstrdup(name, GFP_KERNEL); 2621 - if (! knew->name) 2613 + if (!knew->name) 2622 2614 return -ENOMEM; 2623 2615 knew->private_value = val; 2624 2616 spec->num_kctl_used++; 2625 2617 return 0; 2626 2618 } 2627 2619 2620 + static inline int stac92xx_add_control_idx(struct sigmatel_spec *spec, 2621 + int type, int idx, const char *name, 2622 + unsigned long val) 2623 + { 2624 + return stac92xx_add_control_temp(spec, 2625 + &stac92xx_control_templates[type], 2626 + idx, name, val); 2627 + } 2628 + 2628 2629 2629 2630 /* add dynamic controls */ 2630 - static int stac92xx_add_control(struct sigmatel_spec *spec, int type, 2631 - const char *name, unsigned long val) 2631 + static inline int stac92xx_add_control(struct sigmatel_spec *spec, int type, 2632 + const char *name, unsigned long val) 2632 2633 { 2633 2634 return stac92xx_add_control_idx(spec, type, 0, name, val); 2634 2635 } ··· 3079 3062 return 0; 3080 3063 } 3081 3064 3065 + #ifdef CONFIG_SND_HDA_INPUT_BEEP 3066 + #define stac92xx_dig_beep_switch_info snd_ctl_boolean_mono_info 3067 + 3068 + static int stac92xx_dig_beep_switch_get(struct snd_kcontrol *kcontrol, 3069 + struct snd_ctl_elem_value *ucontrol) 3070 + { 3071 + struct hda_codec *codec = snd_kcontrol_chip(kcontrol); 3072 + ucontrol->value.integer.value[0] = codec->beep->enabled; 3073 + return 0; 3074 + } 3075 + 3076 + static int stac92xx_dig_beep_switch_put(struct snd_kcontrol *kcontrol, 3077 + struct snd_ctl_elem_value *ucontrol) 3078 + { 3079 + struct hda_codec *codec = snd_kcontrol_chip(kcontrol); 3080 + int enabled = !!ucontrol->value.integer.value[0]; 3081 + if (codec->beep->enabled != enabled) { 3082 + codec->beep->enabled = enabled; 3083 + return 1; 3084 + } 3085 + return 0; 3086 + } 3087 + 3088 + static struct snd_kcontrol_new stac92xx_dig_beep_ctrl = { 3089 + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 3090 + .info = stac92xx_dig_beep_switch_info, 3091 + .get = stac92xx_dig_beep_switch_get, 3092 + .put = stac92xx_dig_beep_switch_put, 3093 + }; 3094 + 3095 + static int stac92xx_beep_switch_ctl(struct hda_codec *codec) 3096 + { 3097 + return stac92xx_add_control_temp(codec->spec, &stac92xx_dig_beep_ctrl, 3098 + 0, "PC Beep Playback Switch", 0); 3099 + } 3100 + #endif 3101 + 3082 3102 static int stac92xx_auto_create_mux_input_ctls(struct hda_codec *codec) 3083 3103 { 3084 3104 struct sigmatel_spec *spec = codec->spec; ··· 3422 3368 #ifdef CONFIG_SND_HDA_INPUT_BEEP 3423 3369 if (spec->digbeep_nid > 0) { 3424 3370 hda_nid_t nid = spec->digbeep_nid; 3371 + unsigned int caps; 3425 3372 3426 3373 err = stac92xx_auto_create_beep_ctls(codec, nid); 3427 3374 if (err < 0) ··· 3430 3375 err = snd_hda_attach_beep_device(codec, nid); 3431 3376 if (err < 0) 3432 3377 return err; 3378 + /* if no beep switch is available, make its own one */ 3379 + caps = query_amp_caps(codec, nid, HDA_OUTPUT); 3380 + if (codec->beep && 3381 + !((caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT)) { 3382 + err = stac92xx_beep_switch_ctl(codec); 3383 + if (err < 0) 3384 + return err; 3385 + } 3433 3386 } 3434 3387 #endif 3435 3388 ··· 4482 4419 stac92xx_set_config_regs(codec); 4483 4420 } 4484 4421 4422 + if (spec->board_config > STAC_92HD71BXX_REF) { 4423 + /* GPIO0 = EAPD */ 4424 + spec->gpio_mask = 0x01; 4425 + spec->gpio_dir = 0x01; 4426 + spec->gpio_data = 0x01; 4427 + } 4428 + 4485 4429 switch (codec->vendor_id) { 4486 4430 case 0x111d76b6: /* 4 Port without Analog Mixer */ 4487 4431 case 0x111d76b7: ··· 4499 4429 codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs; 4500 4430 break; 4501 4431 case 0x111d7608: /* 5 Port with Analog Mixer */ 4502 - switch (codec->subsystem_id) { 4503 - case 0x103c361a: 4432 + switch (spec->board_config) { 4433 + case STAC_HP_M4: 4504 4434 /* Enable VREF power saving on GPIO1 detect */ 4505 - snd_hda_codec_write(codec, codec->afg, 0, 4435 + snd_hda_codec_write_cache(codec, codec->afg, 0, 4506 4436 AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x02); 4507 4437 snd_hda_codec_write_cache(codec, codec->afg, 0, 4508 4438 AC_VERB_SET_UNSOLICITED_ENABLE, ··· 4547 4477 4548 4478 spec->aloopback_mask = 0x50; 4549 4479 spec->aloopback_shift = 0; 4550 - 4551 - if (spec->board_config > STAC_92HD71BXX_REF) { 4552 - /* GPIO0 = EAPD */ 4553 - spec->gpio_mask = 0x01; 4554 - spec->gpio_dir = 0x01; 4555 - spec->gpio_data = 0x01; 4556 - } 4557 4480 4558 4481 spec->powerdown_adcs = 1; 4559 4482 spec->digbeep_nid = 0x26; ··· 4895 4832 stac92xx_set_config_reg(codec, 0x20, 0x1c410030); 4896 4833 4897 4834 /* Enable unsol response for GPIO4/Dock HP connection */ 4898 - snd_hda_codec_write(codec, codec->afg, 0, 4835 + snd_hda_codec_write_cache(codec, codec->afg, 0, 4899 4836 AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x10); 4900 4837 snd_hda_codec_write_cache(codec, codec->afg, 0, 4901 4838 AC_VERB_SET_UNSOLICITED_ENABLE,
+4 -1
sound/pci/pcxhr/pcxhr.c
··· 1229 1229 return -ENOMEM; 1230 1230 } 1231 1231 1232 - if (snd_BUG_ON(pci_id->driver_data >= PCI_ID_LAST)) 1232 + if (snd_BUG_ON(pci_id->driver_data >= PCI_ID_LAST)) { 1233 + kfree(mgr); 1234 + pci_disable_device(pci); 1233 1235 return -ENODEV; 1236 + } 1234 1237 card_name = pcxhr_board_params[pci_id->driver_data].board_name; 1235 1238 mgr->playback_chips = pcxhr_board_params[pci_id->driver_data].playback_chips; 1236 1239 mgr->capture_chips = pcxhr_board_params[pci_id->driver_data].capture_chips;