Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[IA64] make mmu_context.h and tlb.c 80-column friendly

wrap_mmu_context(), delayed_tlb_flush(), get_mmu_context() all
have an extra { } block which cause one extra indentation.
get_mmu_context() is particularly bad with 5 indentations to
the most inner "if". It finally gets on my nerve that I can't
keep the code within 80 columns. Remove the extra { } block
and while I'm at it, reformat all the comments to 80-column
friendly. No functional change at all with this patch.

Signed-off-by: Ken Chen <kenneth.w.chen@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>

authored by

Chen, Kenneth W and committed by
Tony Luck
58cd9082 dcc17d1b

+59 -54
+18 -15
arch/ia64/mm/tlb.c
··· 29 29 30 30 static struct { 31 31 unsigned long mask; /* mask of supported purge page-sizes */ 32 - unsigned long max_bits; /* log2() of largest supported purge page-size */ 32 + unsigned long max_bits; /* log2 of largest supported purge page-size */ 33 33 } purge; 34 34 35 35 struct ia64_ctx ia64_ctx = { ··· 58 58 void 59 59 wrap_mmu_context (struct mm_struct *mm) 60 60 { 61 - int i; 61 + int i, cpu; 62 62 unsigned long flush_bit; 63 63 64 64 for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) { ··· 72 72 ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap, 73 73 ia64_ctx.max_ctx, ia64_ctx.next); 74 74 75 - /* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */ 76 - { 77 - int cpu = get_cpu(); /* prevent preemption/migration */ 78 - for_each_online_cpu(i) { 79 - if (i != cpu) 80 - per_cpu(ia64_need_tlb_flush, i) = 1; 81 - } 82 - put_cpu(); 83 - } 75 + /* 76 + * can't call flush_tlb_all() here because of race condition 77 + * with O(1) scheduler [EF] 78 + */ 79 + cpu = get_cpu(); /* prevent preemption/migration */ 80 + for_each_online_cpu(i) 81 + if (i != cpu) 82 + per_cpu(ia64_need_tlb_flush, i) = 1; 83 + put_cpu(); 84 84 local_flush_tlb_all(); 85 85 } 86 86 87 87 void 88 - ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long nbits) 88 + ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start, 89 + unsigned long end, unsigned long nbits) 89 90 { 90 91 static DEFINE_SPINLOCK(ptcg_lock); 91 92 ··· 134 133 } 135 134 136 135 void 137 - flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end) 136 + flush_tlb_range (struct vm_area_struct *vma, unsigned long start, 137 + unsigned long end) 138 138 { 139 139 struct mm_struct *mm = vma->vm_mm; 140 140 unsigned long size = end - start; ··· 149 147 #endif 150 148 151 149 nbits = ia64_fls(size + 0xfff); 152 - while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < purge.max_bits)) 150 + while (unlikely (((1UL << nbits) & purge.mask) == 0) && 151 + (nbits < purge.max_bits)) 153 152 ++nbits; 154 153 if (nbits > purge.max_bits) 155 154 nbits = purge.max_bits; ··· 192 189 local_cpu_data->ptce_stride[0] = ptce_info.stride[0]; 193 190 local_cpu_data->ptce_stride[1] = ptce_info.stride[1]; 194 191 195 - local_flush_tlb_all(); /* nuke left overs from bootstrapping... */ 192 + local_flush_tlb_all(); /* nuke left overs from bootstrapping... */ 196 193 }
+41 -39
include/asm-ia64/mmu_context.h
··· 7 7 */ 8 8 9 9 /* 10 - * Routines to manage the allocation of task context numbers. Task context numbers are 11 - * used to reduce or eliminate the need to perform TLB flushes due to context switches. 12 - * Context numbers are implemented using ia-64 region ids. Since the IA-64 TLB does not 13 - * consider the region number when performing a TLB lookup, we need to assign a unique 14 - * region id to each region in a process. We use the least significant three bits in a 15 - * region id for this purpose. 10 + * Routines to manage the allocation of task context numbers. Task context 11 + * numbers are used to reduce or eliminate the need to perform TLB flushes 12 + * due to context switches. Context numbers are implemented using ia-64 13 + * region ids. Since the IA-64 TLB does not consider the region number when 14 + * performing a TLB lookup, we need to assign a unique region id to each 15 + * region in a process. We use the least significant three bits in aregion 16 + * id for this purpose. 16 17 */ 17 18 18 19 #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */ ··· 52 51 } 53 52 54 53 /* 55 - * When the context counter wraps around all TLBs need to be flushed because an old 56 - * context number might have been reused. This is signalled by the ia64_need_tlb_flush 57 - * per-CPU variable, which is checked in the routine below. Called by activate_mm(). 58 - * <efocht@ess.nec.de> 54 + * When the context counter wraps around all TLBs need to be flushed because 55 + * an old context number might have been reused. This is signalled by the 56 + * ia64_need_tlb_flush per-CPU variable, which is checked in the routine 57 + * below. Called by activate_mm(). <efocht@ess.nec.de> 59 58 */ 60 59 static inline void 61 60 delayed_tlb_flush (void) ··· 65 64 66 65 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { 67 66 spin_lock_irqsave(&ia64_ctx.lock, flags); 68 - { 69 - if (__ia64_per_cpu_var(ia64_need_tlb_flush)) { 70 - local_flush_tlb_all(); 71 - __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; 72 - } 67 + if (__ia64_per_cpu_var(ia64_need_tlb_flush)) { 68 + local_flush_tlb_all(); 69 + __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; 73 70 } 74 71 spin_unlock_irqrestore(&ia64_ctx.lock, flags); 75 72 } ··· 79 80 unsigned long flags; 80 81 nv_mm_context_t context = mm->context; 81 82 82 - if (unlikely(!context)) { 83 - spin_lock_irqsave(&ia64_ctx.lock, flags); 84 - { 85 - /* re-check, now that we've got the lock: */ 86 - context = mm->context; 87 - if (context == 0) { 88 - cpus_clear(mm->cpu_vm_mask); 89 - if (ia64_ctx.next >= ia64_ctx.limit) { 90 - ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, 91 - ia64_ctx.max_ctx, ia64_ctx.next); 92 - ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap, 93 - ia64_ctx.max_ctx, ia64_ctx.next); 94 - if (ia64_ctx.next >= ia64_ctx.max_ctx) 95 - wrap_mmu_context(mm); 96 - } 97 - mm->context = context = ia64_ctx.next++; 98 - __set_bit(context, ia64_ctx.bitmap); 99 - } 83 + if (likely(context)) 84 + goto out; 85 + 86 + spin_lock_irqsave(&ia64_ctx.lock, flags); 87 + /* re-check, now that we've got the lock: */ 88 + context = mm->context; 89 + if (context == 0) { 90 + cpus_clear(mm->cpu_vm_mask); 91 + if (ia64_ctx.next >= ia64_ctx.limit) { 92 + ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, 93 + ia64_ctx.max_ctx, ia64_ctx.next); 94 + ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap, 95 + ia64_ctx.max_ctx, ia64_ctx.next); 96 + if (ia64_ctx.next >= ia64_ctx.max_ctx) 97 + wrap_mmu_context(mm); 100 98 } 101 - spin_unlock_irqrestore(&ia64_ctx.lock, flags); 99 + mm->context = context = ia64_ctx.next++; 100 + __set_bit(context, ia64_ctx.bitmap); 102 101 } 102 + spin_unlock_irqrestore(&ia64_ctx.lock, flags); 103 + out: 103 104 /* 104 105 * Ensure we're not starting to use "context" before any old 105 106 * uses of it are gone from our TLB. ··· 110 111 } 111 112 112 113 /* 113 - * Initialize context number to some sane value. MM is guaranteed to be a brand-new 114 - * address-space, so no TLB flushing is needed, ever. 114 + * Initialize context number to some sane value. MM is guaranteed to be a 115 + * brand-new address-space, so no TLB flushing is needed, ever. 115 116 */ 116 117 static inline int 117 118 init_new_context (struct task_struct *p, struct mm_struct *mm) ··· 172 173 if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) 173 174 cpu_set(smp_processor_id(), mm->cpu_vm_mask); 174 175 reload_context(context); 175 - /* in the unlikely event of a TLB-flush by another thread, redo the load: */ 176 + /* 177 + * in the unlikely event of a TLB-flush by another thread, 178 + * redo the load. 179 + */ 176 180 } while (unlikely(context != mm->context)); 177 181 } 178 182 ··· 188 186 activate_mm (struct mm_struct *prev, struct mm_struct *next) 189 187 { 190 188 /* 191 - * We may get interrupts here, but that's OK because interrupt handlers cannot 192 - * touch user-space. 189 + * We may get interrupts here, but that's OK because interrupt 190 + * handlers cannot touch user-space. 193 191 */ 194 192 ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd)); 195 193 activate_context(next);