Merge branch 'linux-2.6' into merge

+172 -246
+8 -3
arch/powerpc/mm/hash_utils_64.c
··· 351 mmu_vmalloc_psize = MMU_PAGE_64K; 352 if (mmu_linear_psize == MMU_PAGE_4K) 353 mmu_linear_psize = MMU_PAGE_64K; 354 - if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) 355 - mmu_io_psize = MMU_PAGE_64K; 356 - else 357 mmu_ci_restrictions = 1; 358 } 359 #endif /* CONFIG_PPC_64K_PAGES */
··· 351 mmu_vmalloc_psize = MMU_PAGE_64K; 352 if (mmu_linear_psize == MMU_PAGE_4K) 353 mmu_linear_psize = MMU_PAGE_64K; 354 + if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) { 355 + /* 356 + * Don't use 64k pages for ioremap on pSeries, since 357 + * that would stop us accessing the HEA ethernet. 358 + */ 359 + if (!machine_is(pseries)) 360 + mmu_io_psize = MMU_PAGE_64K; 361 + } else 362 mmu_ci_restrictions = 1; 363 } 364 #endif /* CONFIG_PPC_64K_PAGES */
+6 -2
arch/powerpc/sysdev/bestcomm/bestcomm.c
··· 52 int i, tasknum = -1; 53 struct bcom_task *tsk; 54 55 /* Get and reserve a task num */ 56 spin_lock(&bcom_eng->lock); 57 ··· 488 } 489 490 static struct of_device_id mpc52xx_bcom_of_match[] = { 491 - { .type = "dma-controller", .compatible = "fsl,mpc5200-bestcomm", }, 492 - { .type = "dma-controller", .compatible = "mpc5200-bestcomm", }, 493 {}, 494 }; 495
··· 52 int i, tasknum = -1; 53 struct bcom_task *tsk; 54 55 + /* Don't try to do anything if bestcomm init failed */ 56 + if (!bcom_eng) 57 + return NULL; 58 + 59 /* Get and reserve a task num */ 60 spin_lock(&bcom_eng->lock); 61 ··· 484 } 485 486 static struct of_device_id mpc52xx_bcom_of_match[] = { 487 + { .compatible = "fsl,mpc5200-bestcomm", }, 488 + { .compatible = "mpc5200-bestcomm", }, 489 {}, 490 }; 491
+1 -1
arch/powerpc/sysdev/ipic.c
··· 906 { 907 int rc; 908 909 - if (!primary_ipic->regs) 910 return -ENODEV; 911 printk(KERN_DEBUG "Registering ipic with sysfs...\n"); 912
··· 906 { 907 int rc; 908 909 + if (!primary_ipic || !primary_ipic->regs) 910 return -ENODEV; 911 printk(KERN_DEBUG "Registering ipic with sysfs...\n"); 912
+1 -2
arch/sparc64/kernel/ds.c
··· 972 LIST_HEAD(todo); 973 974 spin_lock_irqsave(&ds_lock, flags); 975 - list_splice(&ds_work_list, &todo); 976 - INIT_LIST_HEAD(&ds_work_list); 977 spin_unlock_irqrestore(&ds_lock, flags); 978 979 list_for_each_entry_safe(qp, tmp, &todo, list) {
··· 972 LIST_HEAD(todo); 973 974 spin_lock_irqsave(&ds_lock, flags); 975 + list_splice_init(&ds_work_list, &todo); 976 spin_unlock_irqrestore(&ds_lock, flags); 977 978 list_for_each_entry_safe(qp, tmp, &todo, list) {
+6 -2
arch/sparc64/kernel/head.S
··· 288 /* Leave arg2 as-is, prom_mmu_ihandle_cache */ 289 mov -1, %l3 290 stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default) 291 - sethi %hi(8 * 1024 * 1024), %l3 292 - stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: size (8MB) 293 sethi %hi(KERNBASE), %l3 294 stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE) 295 stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty
··· 288 /* Leave arg2 as-is, prom_mmu_ihandle_cache */ 289 mov -1, %l3 290 stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default) 291 + /* 4MB align the kernel image size. */ 292 + set (_end - KERNBASE), %l3 293 + set ((4 * 1024 * 1024) - 1), %l4 294 + add %l3, %l4, %l3 295 + andn %l3, %l4, %l3 296 + stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: roundup(ksize, 4MB) 297 sethi %hi(KERNBASE), %l3 298 stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE) 299 stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty
-3
arch/sparc64/kernel/process.c
··· 731 current_thread_info()->xfsr[0] = 0; 732 current_thread_info()->fpsaved[0] = 0; 733 regs->tstate &= ~TSTATE_PEF; 734 - task_lock(current); 735 - current->ptrace &= ~PT_DTRACE; 736 - task_unlock(current); 737 } 738 out: 739 return error;
··· 731 current_thread_info()->xfsr[0] = 0; 732 current_thread_info()->fpsaved[0] = 0; 733 regs->tstate &= ~TSTATE_PEF; 734 } 735 out: 736 return error;
+9 -8
arch/sparc64/kernel/smp.c
··· 284 { 285 extern unsigned long sparc64_ttable_tl0; 286 extern unsigned long kern_locked_tte_data; 287 - extern int bigkernel; 288 struct hvtramp_descr *hdesc; 289 unsigned long trampoline_ra; 290 struct trap_per_cpu *tb; 291 u64 tte_vaddr, tte_data; 292 unsigned long hv_err; 293 294 - hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL); 295 if (!hdesc) { 296 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " 297 "hvtramp_descr.\n"); ··· 302 } 303 304 hdesc->cpu = cpu; 305 - hdesc->num_mappings = (bigkernel ? 2 : 1); 306 307 tb = &trap_block[cpu]; 308 tb->hdesc = hdesc; ··· 315 tte_vaddr = (unsigned long) KERNBASE; 316 tte_data = kern_locked_tte_data; 317 318 - hdesc->maps[0].vaddr = tte_vaddr; 319 - hdesc->maps[0].tte = tte_data; 320 - if (bigkernel) { 321 tte_vaddr += 0x400000; 322 tte_data += 0x400000; 323 - hdesc->maps[1].vaddr = tte_vaddr; 324 - hdesc->maps[1].tte = tte_data; 325 } 326 327 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
··· 284 { 285 extern unsigned long sparc64_ttable_tl0; 286 extern unsigned long kern_locked_tte_data; 287 struct hvtramp_descr *hdesc; 288 unsigned long trampoline_ra; 289 struct trap_per_cpu *tb; 290 u64 tte_vaddr, tte_data; 291 unsigned long hv_err; 292 + int i; 293 294 + hdesc = kzalloc(sizeof(*hdesc) + 295 + (sizeof(struct hvtramp_mapping) * 296 + num_kernel_image_mappings - 1), 297 + GFP_KERNEL); 298 if (!hdesc) { 299 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " 300 "hvtramp_descr.\n"); ··· 299 } 300 301 hdesc->cpu = cpu; 302 + hdesc->num_mappings = num_kernel_image_mappings; 303 304 tb = &trap_block[cpu]; 305 tb->hdesc = hdesc; ··· 312 tte_vaddr = (unsigned long) KERNBASE; 313 tte_data = kern_locked_tte_data; 314 315 + for (i = 0; i < hdesc->num_mappings; i++) { 316 + hdesc->maps[i].vaddr = tte_vaddr; 317 + hdesc->maps[i].tte = tte_data; 318 tte_vaddr += 0x400000; 319 tte_data += 0x400000; 320 } 321 322 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
-3
arch/sparc64/kernel/sys_sparc32.c
··· 678 current_thread_info()->xfsr[0] = 0; 679 current_thread_info()->fpsaved[0] = 0; 680 regs->tstate &= ~TSTATE_PEF; 681 - task_lock(current); 682 - current->ptrace &= ~PT_DTRACE; 683 - task_unlock(current); 684 } 685 out: 686 return error;
··· 678 current_thread_info()->xfsr[0] = 0; 679 current_thread_info()->fpsaved[0] = 0; 680 regs->tstate &= ~TSTATE_PEF; 681 } 682 out: 683 return error;
+65 -125
arch/sparc64/kernel/trampoline.S
··· 105 wr %g2, 0, %tick_cmpr 106 107 /* Call OBP by hand to lock KERNBASE into i/d tlbs. 108 - * We lock 2 consequetive entries if we are 'bigkernel'. 109 */ 110 sethi %hi(prom_entry_lock), %g2 111 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 ··· 119 add %l2, -(192 + 128), %sp 120 flushw 121 122 sethi %hi(call_method), %g2 123 or %g2, %lo(call_method), %g2 124 stx %g2, [%sp + 2047 + 128 + 0x00] ··· 155 sethi %hi(prom_mmu_ihandle_cache), %g2 156 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 157 stx %g2, [%sp + 2047 + 128 + 0x20] 158 - sethi %hi(KERNBASE), %g2 159 - stx %g2, [%sp + 2047 + 128 + 0x28] 160 - sethi %hi(kern_locked_tte_data), %g2 161 - ldx [%g2 + %lo(kern_locked_tte_data)], %g2 162 - stx %g2, [%sp + 2047 + 128 + 0x30] 163 164 - mov 15, %g2 165 - BRANCH_IF_ANY_CHEETAH(g1,g5,1f) 166 167 - mov 63, %g2 168 - 1: 169 stx %g2, [%sp + 2047 + 128 + 0x38] 170 sethi %hi(p1275buf), %g2 171 or %g2, %lo(p1275buf), %g2 172 ldx [%g2 + 0x08], %o1 173 call %o1 174 add %sp, (2047 + 128), %o0 175 176 - sethi %hi(bigkernel), %g2 177 - lduw [%g2 + %lo(bigkernel)], %g2 178 - brz,pt %g2, do_dtlb 179 - nop 180 - 181 - sethi %hi(call_method), %g2 182 - or %g2, %lo(call_method), %g2 183 - stx %g2, [%sp + 2047 + 128 + 0x00] 184 - mov 5, %g2 185 - stx %g2, [%sp + 2047 + 128 + 0x08] 186 - mov 1, %g2 187 - stx %g2, [%sp + 2047 + 128 + 0x10] 188 - sethi %hi(itlb_load), %g2 189 - or %g2, %lo(itlb_load), %g2 190 - stx %g2, [%sp + 2047 + 128 + 0x18] 191 - sethi %hi(prom_mmu_ihandle_cache), %g2 192 - lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 193 - stx %g2, [%sp + 2047 + 128 + 0x20] 194 - sethi %hi(KERNBASE + 0x400000), %g2 195 - stx %g2, [%sp + 2047 + 128 + 0x28] 196 - sethi %hi(kern_locked_tte_data), %g2 197 - ldx [%g2 + %lo(kern_locked_tte_data)], %g2 198 - sethi %hi(0x400000), %g1 199 - add %g2, %g1, %g2 200 - stx %g2, [%sp + 2047 + 128 + 0x30] 201 - 202 - mov 14, %g2 203 - BRANCH_IF_ANY_CHEETAH(g1,g5,1f) 204 - 205 - mov 62, %g2 206 - 1: 207 - stx %g2, [%sp + 2047 + 128 + 0x38] 208 - sethi %hi(p1275buf), %g2 209 - or %g2, %lo(p1275buf), %g2 210 - ldx [%g2 + 0x08], %o1 211 - call %o1 212 - add %sp, (2047 + 128), %o0 213 - 214 - do_dtlb: 215 sethi %hi(call_method), %g2 216 or %g2, %lo(call_method), %g2 217 stx %g2, [%sp + 2047 + 128 + 0x00] ··· 188 sethi %hi(prom_mmu_ihandle_cache), %g2 189 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 190 stx %g2, [%sp + 2047 + 128 + 0x20] 191 - sethi %hi(KERNBASE), %g2 192 - stx %g2, [%sp + 2047 + 128 + 0x28] 193 - sethi %hi(kern_locked_tte_data), %g2 194 - ldx [%g2 + %lo(kern_locked_tte_data)], %g2 195 - stx %g2, [%sp + 2047 + 128 + 0x30] 196 197 - mov 15, %g2 198 - BRANCH_IF_ANY_CHEETAH(g1,g5,1f) 199 200 - mov 63, %g2 201 - 1: 202 203 stx %g2, [%sp + 2047 + 128 + 0x38] 204 sethi %hi(p1275buf), %g2 205 or %g2, %lo(p1275buf), %g2 206 ldx [%g2 + 0x08], %o1 207 call %o1 208 add %sp, (2047 + 128), %o0 209 210 - sethi %hi(bigkernel), %g2 211 - lduw [%g2 + %lo(bigkernel)], %g2 212 - brz,pt %g2, do_unlock 213 nop 214 215 - sethi %hi(call_method), %g2 216 - or %g2, %lo(call_method), %g2 217 - stx %g2, [%sp + 2047 + 128 + 0x00] 218 - mov 5, %g2 219 - stx %g2, [%sp + 2047 + 128 + 0x08] 220 - mov 1, %g2 221 - stx %g2, [%sp + 2047 + 128 + 0x10] 222 - sethi %hi(dtlb_load), %g2 223 - or %g2, %lo(dtlb_load), %g2 224 - stx %g2, [%sp + 2047 + 128 + 0x18] 225 - sethi %hi(prom_mmu_ihandle_cache), %g2 226 - lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 227 - stx %g2, [%sp + 2047 + 128 + 0x20] 228 - sethi %hi(KERNBASE + 0x400000), %g2 229 - stx %g2, [%sp + 2047 + 128 + 0x28] 230 - sethi %hi(kern_locked_tte_data), %g2 231 - ldx [%g2 + %lo(kern_locked_tte_data)], %g2 232 - sethi %hi(0x400000), %g1 233 - add %g2, %g1, %g2 234 - stx %g2, [%sp + 2047 + 128 + 0x30] 235 - 236 - mov 14, %g2 237 - BRANCH_IF_ANY_CHEETAH(g1,g5,1f) 238 - 239 - mov 62, %g2 240 - 1: 241 - 242 - stx %g2, [%sp + 2047 + 128 + 0x38] 243 - sethi %hi(p1275buf), %g2 244 - or %g2, %lo(p1275buf), %g2 245 - ldx [%g2 + 0x08], %o1 246 - call %o1 247 - add %sp, (2047 + 128), %o0 248 - 249 - do_unlock: 250 sethi %hi(prom_entry_lock), %g2 251 stb %g0, [%g2 + %lo(prom_entry_lock)] 252 membar #StoreStore | #StoreLoad ··· 220 nop 221 222 niagara_lock_tlb: 223 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 224 - sethi %hi(KERNBASE), %o0 225 clr %o1 226 - sethi %hi(kern_locked_tte_data), %o2 227 - ldx [%o2 + %lo(kern_locked_tte_data)], %o2 228 mov HV_MMU_IMMU, %o3 229 ta HV_FAST_TRAP 230 231 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 232 - sethi %hi(KERNBASE), %o0 233 clr %o1 234 - sethi %hi(kern_locked_tte_data), %o2 235 - ldx [%o2 + %lo(kern_locked_tte_data)], %o2 236 mov HV_MMU_DMMU, %o3 237 ta HV_FAST_TRAP 238 239 - sethi %hi(bigkernel), %g2 240 - lduw [%g2 + %lo(bigkernel)], %g2 241 - brz,pt %g2, after_lock_tlb 242 nop 243 - 244 - mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 245 - sethi %hi(KERNBASE + 0x400000), %o0 246 - clr %o1 247 - sethi %hi(kern_locked_tte_data), %o2 248 - ldx [%o2 + %lo(kern_locked_tte_data)], %o2 249 - sethi %hi(0x400000), %o3 250 - add %o2, %o3, %o2 251 - mov HV_MMU_IMMU, %o3 252 - ta HV_FAST_TRAP 253 - 254 - mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 255 - sethi %hi(KERNBASE + 0x400000), %o0 256 - clr %o1 257 - sethi %hi(kern_locked_tte_data), %o2 258 - ldx [%o2 + %lo(kern_locked_tte_data)], %o2 259 - sethi %hi(0x400000), %o3 260 - add %o2, %o3, %o2 261 - mov HV_MMU_DMMU, %o3 262 - ta HV_FAST_TRAP 263 264 after_lock_tlb: 265 wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
··· 105 wr %g2, 0, %tick_cmpr 106 107 /* Call OBP by hand to lock KERNBASE into i/d tlbs. 108 + * We lock 'num_kernel_image_mappings' consequetive entries. 109 */ 110 sethi %hi(prom_entry_lock), %g2 111 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 ··· 119 add %l2, -(192 + 128), %sp 120 flushw 121 122 + /* Setup the loop variables: 123 + * %l3: VADDR base 124 + * %l4: TTE base 125 + * %l5: Loop iterator, iterates from 0 to 'num_kernel_image_mappings' 126 + * %l6: Number of TTE entries to map 127 + * %l7: Highest TTE entry number, we count down 128 + */ 129 + sethi %hi(KERNBASE), %l3 130 + sethi %hi(kern_locked_tte_data), %l4 131 + ldx [%l4 + %lo(kern_locked_tte_data)], %l4 132 + clr %l5 133 + sethi %hi(num_kernel_image_mappings), %l6 134 + lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 135 + add %l6, 1, %l6 136 + 137 + mov 15, %l7 138 + BRANCH_IF_ANY_CHEETAH(g1,g5,2f) 139 + 140 + mov 63, %l7 141 + 2: 142 + 143 + 3: 144 + /* Lock into I-MMU */ 145 sethi %hi(call_method), %g2 146 or %g2, %lo(call_method), %g2 147 stx %g2, [%sp + 2047 + 128 + 0x00] ··· 132 sethi %hi(prom_mmu_ihandle_cache), %g2 133 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 134 stx %g2, [%sp + 2047 + 128 + 0x20] 135 136 + /* Each TTE maps 4MB, convert index to offset. */ 137 + sllx %l5, 22, %g1 138 139 + add %l3, %g1, %g2 140 + stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR 141 + add %l4, %g1, %g2 142 + stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE 143 + 144 + /* TTE index is highest minus loop index. */ 145 + sub %l7, %l5, %g2 146 stx %g2, [%sp + 2047 + 128 + 0x38] 147 + 148 sethi %hi(p1275buf), %g2 149 or %g2, %lo(p1275buf), %g2 150 ldx [%g2 + 0x08], %o1 151 call %o1 152 add %sp, (2047 + 128), %o0 153 154 + /* Lock into D-MMU */ 155 sethi %hi(call_method), %g2 156 or %g2, %lo(call_method), %g2 157 stx %g2, [%sp + 2047 + 128 + 0x00] ··· 202 sethi %hi(prom_mmu_ihandle_cache), %g2 203 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 204 stx %g2, [%sp + 2047 + 128 + 0x20] 205 206 + /* Each TTE maps 4MB, convert index to offset. */ 207 + sllx %l5, 22, %g1 208 209 + add %l3, %g1, %g2 210 + stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR 211 + add %l4, %g1, %g2 212 + stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE 213 214 + /* TTE index is highest minus loop index. */ 215 + sub %l7, %l5, %g2 216 stx %g2, [%sp + 2047 + 128 + 0x38] 217 + 218 sethi %hi(p1275buf), %g2 219 or %g2, %lo(p1275buf), %g2 220 ldx [%g2 + 0x08], %o1 221 call %o1 222 add %sp, (2047 + 128), %o0 223 224 + add %l5, 1, %l5 225 + cmp %l5, %l6 226 + bne,pt %xcc, 3b 227 nop 228 229 sethi %hi(prom_entry_lock), %g2 230 stb %g0, [%g2 + %lo(prom_entry_lock)] 231 membar #StoreStore | #StoreLoad ··· 269 nop 270 271 niagara_lock_tlb: 272 + sethi %hi(KERNBASE), %l3 273 + sethi %hi(kern_locked_tte_data), %l4 274 + ldx [%l4 + %lo(kern_locked_tte_data)], %l4 275 + clr %l5 276 + sethi %hi(num_kernel_image_mappings), %l6 277 + lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 278 + add %l6, 1, %l6 279 + 280 + 1: 281 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 282 + sllx %l5, 22, %g2 283 + add %l3, %g2, %o0 284 clr %o1 285 + add %l4, %g2, %o2 286 mov HV_MMU_IMMU, %o3 287 ta HV_FAST_TRAP 288 289 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 290 + sllx %l5, 22, %g2 291 + add %l3, %g2, %o0 292 clr %o1 293 + add %l4, %g2, %o2 294 mov HV_MMU_DMMU, %o3 295 ta HV_FAST_TRAP 296 297 + add %l5, 1, %l5 298 + cmp %l5, %l6 299 + bne,pt %xcc, 1b 300 nop 301 302 after_lock_tlb: 303 wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
+14 -24
arch/sparc64/mm/init.c
··· 166 unsigned long sparc64_kern_pri_nuc_bits __read_mostly; 167 unsigned long sparc64_kern_sec_context __read_mostly; 168 169 - int bigkernel = 0; 170 171 #ifdef CONFIG_DEBUG_DCFLUSH 172 atomic_t dcpage_flushes = ATOMIC_INIT(0); ··· 572 static void __init remap_kernel(void) 573 { 574 unsigned long phys_page, tte_vaddr, tte_data; 575 - int tlb_ent = sparc64_highest_locked_tlbent(); 576 577 tte_vaddr = (unsigned long) KERNBASE; 578 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; ··· 582 583 /* Now lock us into the TLBs via Hypervisor or OBP. */ 584 if (tlb_type == hypervisor) { 585 - hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); 586 - hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); 587 - if (bigkernel) { 588 - tte_vaddr += 0x400000; 589 - tte_data += 0x400000; 590 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); 591 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); 592 } 593 } else { 594 - prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); 595 - prom_itlb_load(tlb_ent, tte_data, tte_vaddr); 596 - if (bigkernel) { 597 - tlb_ent -= 1; 598 - prom_dtlb_load(tlb_ent, 599 - tte_data + 0x400000, 600 - tte_vaddr + 0x400000); 601 - prom_itlb_load(tlb_ent, 602 - tte_data + 0x400000, 603 - tte_vaddr + 0x400000); 604 } 605 - sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; 606 } 607 if (tlb_type == cheetah_plus) { 608 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | ··· 1345 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); 1346 1347 real_end = (unsigned long)_end; 1348 - if ((real_end > ((unsigned long)KERNBASE + 0x400000))) 1349 - bigkernel = 1; 1350 - if ((real_end > ((unsigned long)KERNBASE + 0x800000))) { 1351 - prom_printf("paging_init: Kernel > 8MB, too large.\n"); 1352 - prom_halt(); 1353 - } 1354 1355 /* Set kernel pgd to upper alias so physical page computations 1356 * work.
··· 166 unsigned long sparc64_kern_pri_nuc_bits __read_mostly; 167 unsigned long sparc64_kern_sec_context __read_mostly; 168 169 + int num_kernel_image_mappings; 170 171 #ifdef CONFIG_DEBUG_DCFLUSH 172 atomic_t dcpage_flushes = ATOMIC_INIT(0); ··· 572 static void __init remap_kernel(void) 573 { 574 unsigned long phys_page, tte_vaddr, tte_data; 575 + int i, tlb_ent = sparc64_highest_locked_tlbent(); 576 577 tte_vaddr = (unsigned long) KERNBASE; 578 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; ··· 582 583 /* Now lock us into the TLBs via Hypervisor or OBP. */ 584 if (tlb_type == hypervisor) { 585 + for (i = 0; i < num_kernel_image_mappings; i++) { 586 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); 587 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); 588 + tte_vaddr += 0x400000; 589 + tte_data += 0x400000; 590 } 591 } else { 592 + for (i = 0; i < num_kernel_image_mappings; i++) { 593 + prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); 594 + prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); 595 + tte_vaddr += 0x400000; 596 + tte_data += 0x400000; 597 } 598 + sparc64_highest_unlocked_tlb_ent = tlb_ent - i; 599 } 600 if (tlb_type == cheetah_plus) { 601 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | ··· 1352 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); 1353 1354 real_end = (unsigned long)_end; 1355 + num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); 1356 + printk("Kernel: Using %d locked TLB entries for main kernel image.\n", 1357 + num_kernel_image_mappings); 1358 1359 /* Set kernel pgd to upper alias so physical page computations 1360 * work.
+3 -3
arch/x86/mm/ioremap.c
··· 106 * have to convert them into an offset in a page-aligned mapping, but the 107 * caller shouldn't need to know that small detail. 108 */ 109 - static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, 110 enum ioremap_mode mode) 111 { 112 unsigned long pfn, offset, last_addr, vaddr; ··· 193 * 194 * Must be freed with iounmap. 195 */ 196 - void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) 197 { 198 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED); 199 } 200 EXPORT_SYMBOL(ioremap_nocache); 201 202 - void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size) 203 { 204 return __ioremap(phys_addr, size, IOR_MODE_CACHED); 205 }
··· 106 * have to convert them into an offset in a page-aligned mapping, but the 107 * caller shouldn't need to know that small detail. 108 */ 109 + static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, 110 enum ioremap_mode mode) 111 { 112 unsigned long pfn, offset, last_addr, vaddr; ··· 193 * 194 * Must be freed with iounmap. 195 */ 196 + void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) 197 { 198 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED); 199 } 200 EXPORT_SYMBOL(ioremap_nocache); 201 202 + void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) 203 { 204 return __ioremap(phys_addr, size, IOR_MODE_CACHED); 205 }
+1 -1
drivers/connector/cn_queue.c
··· 146 147 dev->nls = nls; 148 149 - dev->cn_queue = create_workqueue(dev->name); 150 if (!dev->cn_queue) { 151 kfree(dev); 152 return NULL;
··· 146 147 dev->nls = nls; 148 149 + dev->cn_queue = create_singlethread_workqueue(dev->name); 150 if (!dev->cn_queue) { 151 kfree(dev); 152 return NULL;
+2 -34
drivers/net/bnx2x.c
··· 63 #include "bnx2x.h" 64 #include "bnx2x_init.h" 65 66 - #define DRV_MODULE_VERSION "1.40.22" 67 - #define DRV_MODULE_RELDATE "2007/11/27" 68 #define BNX2X_BC_VER 0x040200 69 70 /* Time in jiffies before concluding the transmitter is hung. */ ··· 8007 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, 8008 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, 8009 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); 8010 - 8011 - switch (cmd->port) { 8012 - case PORT_TP: 8013 - if (!(bp->supported & SUPPORTED_TP)) { 8014 - DP(NETIF_MSG_LINK, "TP not supported\n"); 8015 - return -EINVAL; 8016 - } 8017 - 8018 - if (bp->phy_flags & PHY_XGXS_FLAG) { 8019 - bnx2x_link_reset(bp); 8020 - bnx2x_link_settings_supported(bp, SWITCH_CFG_1G); 8021 - bnx2x_phy_deassert(bp); 8022 - } 8023 - break; 8024 - 8025 - case PORT_FIBRE: 8026 - if (!(bp->supported & SUPPORTED_FIBRE)) { 8027 - DP(NETIF_MSG_LINK, "FIBRE not supported\n"); 8028 - return -EINVAL; 8029 - } 8030 - 8031 - if (!(bp->phy_flags & PHY_XGXS_FLAG)) { 8032 - bnx2x_link_reset(bp); 8033 - bnx2x_link_settings_supported(bp, SWITCH_CFG_10G); 8034 - bnx2x_phy_deassert(bp); 8035 - } 8036 - break; 8037 - 8038 - default: 8039 - DP(NETIF_MSG_LINK, "Unknown port type\n"); 8040 - return -EINVAL; 8041 - } 8042 8043 if (cmd->autoneg == AUTONEG_ENABLE) { 8044 if (!(bp->supported & SUPPORTED_Autoneg)) {
··· 63 #include "bnx2x.h" 64 #include "bnx2x_init.h" 65 66 + #define DRV_MODULE_VERSION "1.42.3" 67 + #define DRV_MODULE_RELDATE "2008/3/9" 68 #define BNX2X_BC_VER 0x040200 69 70 /* Time in jiffies before concluding the transmitter is hung. */ ··· 8007 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, 8008 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, 8009 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); 8010 8011 if (cmd->autoneg == AUTONEG_ENABLE) { 8012 if (!(bp->supported & SUPPORTED_Autoneg)) {
+2 -1
drivers/net/fec_mpc52xx_phy.c
··· 109 int irq = irq_of_parse_and_map(child, 0); 110 if (irq != NO_IRQ) { 111 const u32 *id = of_get_property(child, "reg", NULL); 112 - bus->irq[*id] = irq; 113 } 114 } 115
··· 109 int irq = irq_of_parse_and_map(child, 0); 110 if (irq != NO_IRQ) { 111 const u32 *id = of_get_property(child, "reg", NULL); 112 + if (id) 113 + bus->irq[*id] = irq; 114 } 115 } 116
+1 -1
drivers/net/sungem.c
··· 912 * rx ring - must call napi_disable(), which 913 * schedule_timeout()'s if polling is already disabled. 914 */ 915 - work_done += gem_rx(gp, budget); 916 917 if (work_done >= budget) 918 return work_done;
··· 912 * rx ring - must call napi_disable(), which 913 * schedule_timeout()'s if polling is already disabled. 914 */ 915 + work_done += gem_rx(gp, budget - work_done); 916 917 if (work_done >= budget) 918 return work_done;
+1 -1
include/asm-sparc64/hvtramp.h
··· 16 __u64 fault_info_va; 17 __u64 fault_info_pa; 18 __u64 thread_reg; 19 - struct hvtramp_mapping maps[2]; 20 }; 21 22 extern void hv_cpu_startup(unsigned long hvdescr_pa);
··· 16 __u64 fault_info_va; 17 __u64 fault_info_pa; 18 __u64 thread_reg; 19 + struct hvtramp_mapping maps[1]; 20 }; 21 22 extern void hv_cpu_startup(unsigned long hvdescr_pa);
+2
include/asm-sparc64/spitfire.h
··· 63 SPITFIRE_HIGHEST_LOCKED_TLBENT : \ 64 CHEETAH_HIGHEST_LOCKED_TLBENT) 65 66 /* The data cache is write through, so this just invalidates the 67 * specified line. 68 */
··· 63 SPITFIRE_HIGHEST_LOCKED_TLBENT : \ 64 CHEETAH_HIGHEST_LOCKED_TLBENT) 65 66 + extern int num_kernel_image_mappings; 67 + 68 /* The data cache is write through, so this just invalidates the 69 * specified line. 70 */
+3 -3
include/asm-x86/io_32.h
··· 114 * If the area you are trying to map is a PCI BAR you should have a 115 * look at pci_iomap(). 116 */ 117 - extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size); 118 - extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size); 119 120 /* 121 * The default ioremap() behavior is non-cached: 122 */ 123 - static inline void __iomem *ioremap(unsigned long offset, unsigned long size) 124 { 125 return ioremap_nocache(offset, size); 126 }
··· 114 * If the area you are trying to map is a PCI BAR you should have a 115 * look at pci_iomap(). 116 */ 117 + extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 118 + extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 119 120 /* 121 * The default ioremap() behavior is non-cached: 122 */ 123 + static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) 124 { 125 return ioremap_nocache(offset, size); 126 }
+3 -3
include/asm-x86/io_64.h
··· 158 * it's useful if some control registers are in such an area and write combining 159 * or read caching is not desirable: 160 */ 161 - extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size); 162 - extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size); 163 164 /* 165 * The default ioremap() behavior is non-cached: 166 */ 167 - static inline void __iomem *ioremap(unsigned long offset, unsigned long size) 168 { 169 return ioremap_nocache(offset, size); 170 }
··· 158 * it's useful if some control registers are in such an area and write combining 159 * or read caching is not desirable: 160 */ 161 + extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); 162 + extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 163 164 /* 165 * The default ioremap() behavior is non-cached: 166 */ 167 + static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) 168 { 169 return ioremap_nocache(offset, size); 170 }
+1 -1
include/net/sctp/sctp.h
··· 389 390 #else /* #ifdef defined(CONFIG_IPV6) */ 391 392 - static inline void sctp_v6_pf_init(void) { return 0; } 393 static inline void sctp_v6_pf_exit(void) { return; } 394 static inline int sctp_v6_protosw_init(void) { return 0; } 395 static inline void sctp_v6_protosw_exit(void) { return; }
··· 389 390 #else /* #ifdef defined(CONFIG_IPV6) */ 391 392 + static inline void sctp_v6_pf_init(void) { return; } 393 static inline void sctp_v6_pf_exit(void) { return; } 394 static inline int sctp_v6_protosw_init(void) { return 0; } 395 static inline void sctp_v6_protosw_exit(void) { return; }
+4
kernel/time/timekeeping.c
··· 191 192 tick_clock_notify(); 193 194 printk(KERN_INFO "Time: %s clocksource has been installed.\n", 195 clock->name); 196 } 197 #else 198 static inline void change_clocksource(void) { }
··· 191 192 tick_clock_notify(); 193 194 + /* 195 + * We're holding xtime lock and waking up klogd would deadlock 196 + * us on enqueue. So no printing! 197 printk(KERN_INFO "Time: %s clocksource has been installed.\n", 198 clock->name); 199 + */ 200 } 201 #else 202 static inline void change_clocksource(void) { }
+1 -1
lib/iomap.c
··· 256 * */ 257 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) 258 { 259 - unsigned long start = pci_resource_start(dev, bar); 260 unsigned long len = pci_resource_len(dev, bar); 261 unsigned long flags = pci_resource_flags(dev, bar); 262
··· 256 * */ 257 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) 258 { 259 + resource_size_t start = pci_resource_start(dev, bar); 260 unsigned long len = pci_resource_len(dev, bar); 261 unsigned long flags = pci_resource_flags(dev, bar); 262
-2
net/9p/trans_fd.c
··· 861 862 static void p9_mux_flush_cb(struct p9_req *freq, void *a) 863 { 864 - p9_conn_req_callback cb; 865 int tag; 866 struct p9_conn *m; 867 struct p9_req *req, *rreq, *rptr; ··· 871 freq->tcall->params.tflush.oldtag); 872 873 spin_lock(&m->lock); 874 - cb = NULL; 875 tag = freq->tcall->params.tflush.oldtag; 876 req = NULL; 877 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
··· 861 862 static void p9_mux_flush_cb(struct p9_req *freq, void *a) 863 { 864 int tag; 865 struct p9_conn *m; 866 struct p9_req *req, *rreq, *rptr; ··· 872 freq->tcall->params.tflush.oldtag); 873 874 spin_lock(&m->lock); 875 tag = freq->tcall->params.tflush.oldtag; 876 req = NULL; 877 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
+16 -3
net/atm/clip.c
··· 947 }; 948 #endif 949 950 static int __init atm_clip_init(void) 951 { 952 neigh_table_init_no_netlink(&clip_tbl); ··· 965 struct proc_dir_entry *p; 966 967 p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops); 968 } 969 #endif 970 971 return 0; 972 } 973 974 - static void __exit atm_clip_exit(void) 975 { 976 struct net_device *dev, *next; 977 - 978 - remove_proc_entry("arp", atm_proc_root); 979 980 unregister_inetaddr_notifier(&clip_inet_notifier); 981 unregister_netdevice_notifier(&clip_dev_notifier); ··· 1009 neigh_table_clear(&clip_tbl); 1010 1011 clip_tbl_hook = NULL; 1012 } 1013 1014 module_init(atm_clip_init);
··· 947 }; 948 #endif 949 950 + static void atm_clip_exit_noproc(void); 951 + 952 static int __init atm_clip_init(void) 953 { 954 neigh_table_init_no_netlink(&clip_tbl); ··· 963 struct proc_dir_entry *p; 964 965 p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops); 966 + if (!p) { 967 + printk(KERN_ERR "Unable to initialize " 968 + "/proc/net/atm/arp\n"); 969 + atm_clip_exit_noproc(); 970 + return -ENOMEM; 971 + } 972 } 973 #endif 974 975 return 0; 976 } 977 978 + static void atm_clip_exit_noproc(void) 979 { 980 struct net_device *dev, *next; 981 982 unregister_inetaddr_notifier(&clip_inet_notifier); 983 unregister_netdevice_notifier(&clip_dev_notifier); ··· 1003 neigh_table_clear(&clip_tbl); 1004 1005 clip_tbl_hook = NULL; 1006 + } 1007 + 1008 + static void __exit atm_clip_exit(void) 1009 + { 1010 + remove_proc_entry("arp", atm_proc_root); 1011 + 1012 + atm_clip_exit_noproc(); 1013 } 1014 1015 module_init(atm_clip_init);
+4
net/atm/lec.c
··· 1250 struct proc_dir_entry *p; 1251 1252 p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops); 1253 #endif 1254 1255 register_atm_ioctl(&lane_ioctl_ops);
··· 1250 struct proc_dir_entry *p; 1251 1252 p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops); 1253 + if (!p) { 1254 + printk(KERN_ERR "Unable to initialize /proc/net/atm/lec\n"); 1255 + return -ENOMEM; 1256 + } 1257 #endif 1258 1259 register_atm_ioctl(&lane_ioctl_ops);
+5 -2
net/ipv4/fib_trie.c
··· 177 return rcu_dereference(ret); 178 } 179 180 static inline void node_set_parent(struct node *node, struct tnode *ptr) 181 { 182 - rcu_assign_pointer(node->parent, 183 - (unsigned long)ptr | NODE_TYPE(node)); 184 } 185 186 static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
··· 177 return rcu_dereference(ret); 178 } 179 180 + /* Same as rcu_assign_pointer 181 + * but that macro() assumes that value is a pointer. 182 + */ 183 static inline void node_set_parent(struct node *node, struct tnode *ptr) 184 { 185 + smp_wmb(); 186 + node->parent = (unsigned long)ptr | NODE_TYPE(node); 187 } 188 189 static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
+1 -1
net/ipv4/ip_fragment.c
··· 568 569 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); 570 571 - net = skb->dev->nd_net; 572 /* Start by cleaning up the memory. */ 573 if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) 574 ip_evictor(net);
··· 568 569 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); 570 571 + net = skb->dev ? skb->dev->nd_net : skb->dst->dev->nd_net; 572 /* Start by cleaning up the memory. */ 573 if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) 574 ip_evictor(net);
+2 -2
net/ipv4/tcp.c
··· 735 if (!(psize -= copy)) 736 goto out; 737 738 - if (skb->len < mss_now || (flags & MSG_OOB)) 739 continue; 740 741 if (forced_push(tp)) { ··· 981 if ((seglen -= copy) == 0 && iovlen == 0) 982 goto out; 983 984 - if (skb->len < mss_now || (flags & MSG_OOB)) 985 continue; 986 987 if (forced_push(tp)) {
··· 735 if (!(psize -= copy)) 736 goto out; 737 738 + if (skb->len < size_goal || (flags & MSG_OOB)) 739 continue; 740 741 if (forced_push(tp)) { ··· 981 if ((seglen -= copy) == 0 && iovlen == 0) 982 goto out; 983 984 + if (skb->len < size_goal || (flags & MSG_OOB)) 985 continue; 986 987 if (forced_push(tp)) {
-2
net/ipv6/ndisc.c
··· 1420 u8 *opt; 1421 int rd_len; 1422 int err; 1423 - int hlen; 1424 u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; 1425 1426 dev = skb->dev; ··· 1490 return; 1491 } 1492 1493 - hlen = 0; 1494 1495 skb_reserve(buff, LL_RESERVED_SPACE(dev)); 1496 ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr,
··· 1420 u8 *opt; 1421 int rd_len; 1422 int err; 1423 u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; 1424 1425 dev = skb->dev; ··· 1491 return; 1492 } 1493 1494 1495 skb_reserve(buff, LL_RESERVED_SPACE(dev)); 1496 ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr,
+7 -6
net/sched/sch_htb.c
··· 711 */ 712 static psched_time_t htb_do_events(struct htb_sched *q, int level) 713 { 714 - int i; 715 - 716 - for (i = 0; i < 500; i++) { 717 struct htb_class *cl; 718 long diff; 719 struct rb_node *p = rb_first(&q->wait_pq[level]); ··· 733 if (cl->cmode != HTB_CAN_SEND) 734 htb_add_to_wait_tree(q, cl, diff); 735 } 736 - if (net_ratelimit()) 737 - printk(KERN_WARNING "htb: too many events !\n"); 738 - return q->now + PSCHED_TICKS_PER_SEC / 10; 739 } 740 741 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
··· 711 */ 712 static psched_time_t htb_do_events(struct htb_sched *q, int level) 713 { 714 + /* don't run for longer than 2 jiffies; 2 is used instead of 715 + 1 to simplify things when jiffy is going to be incremented 716 + too soon */ 717 + unsigned long stop_at = jiffies + 2; 718 + while (time_before(jiffies, stop_at)) { 719 struct htb_class *cl; 720 long diff; 721 struct rb_node *p = rb_first(&q->wait_pq[level]); ··· 731 if (cl->cmode != HTB_CAN_SEND) 732 htb_add_to_wait_tree(q, cl, diff); 733 } 734 + /* too much load - let's continue on next jiffie */ 735 + return q->now + PSCHED_TICKS_PER_SEC / HZ; 736 } 737 738 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
+3 -4
net/socket.c
··· 909 if (!dlci_ioctl_hook) 910 request_module("dlci"); 911 912 - if (dlci_ioctl_hook) { 913 - mutex_lock(&dlci_ioctl_mutex); 914 err = dlci_ioctl_hook(cmd, argp); 915 - mutex_unlock(&dlci_ioctl_mutex); 916 - } 917 break; 918 default: 919 err = sock->ops->ioctl(sock, cmd, arg);
··· 909 if (!dlci_ioctl_hook) 910 request_module("dlci"); 911 912 + mutex_lock(&dlci_ioctl_mutex); 913 + if (dlci_ioctl_hook) 914 err = dlci_ioctl_hook(cmd, argp); 915 + mutex_unlock(&dlci_ioctl_mutex); 916 break; 917 default: 918 err = sock->ops->ioctl(sock, cmd, arg);
-2
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
··· 237 238 static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count) 239 { 240 - #ifdef RDMA_TRANSPORT_IWARP 241 if ((RDMA_TRANSPORT_IWARP == 242 rdma_node_get_transport(xprt->sc_cm_id-> 243 device->node_type)) 244 && sge_count > 1) 245 return 1; 246 else 247 - #endif 248 return min_t(int, sge_count, xprt->sc_max_sge); 249 } 250
··· 237 238 static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count) 239 { 240 if ((RDMA_TRANSPORT_IWARP == 241 rdma_node_get_transport(xprt->sc_cm_id-> 242 device->node_type)) 243 && sge_count > 1) 244 return 1; 245 else 246 return min_t(int, sge_count, xprt->sc_max_sge); 247 } 248