Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/ldt: Rename ldt_struct::size to ::nr_entries

... because this is exactly what it is: the number of entries in the
LDT. Calling it "size" is simply confusing and it is actually begging
to be called "nr_entries" or somesuch, especially if you see constructs
like:

alloc_size = size * LDT_ENTRY_SIZE;

since LDT_ENTRY_SIZE is the size of a single entry.

There should be no functionality change resulting from this patch, as
the before/after output from tools/testing/selftests/x86/ldt_gdt.c
shows.

Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Andy Lutomirski <luto@amacapital.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20170606173116.13977-1-bp@alien8.de
[ Renamed 'n_entries' to 'nr_entries' ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Borislav Petkov and committed by
Ingo Molnar
bbf79d21 5dd0b16c

+31 -30
+1 -1
arch/x86/events/core.c
··· 2333 2333 2334 2334 /* IRQs are off, so this synchronizes with smp_store_release */ 2335 2335 ldt = lockless_dereference(current->active_mm->context.ldt); 2336 - if (!ldt || idx > ldt->size) 2336 + if (!ldt || idx > ldt->nr_entries) 2337 2337 return 0; 2338 2338 2339 2339 desc = &ldt->entries[idx];
+2 -2
arch/x86/include/asm/mmu_context.h
··· 47 47 * allocations, but it's not worth trying to optimize. 48 48 */ 49 49 struct desc_struct *entries; 50 - unsigned int size; 50 + unsigned int nr_entries; 51 51 }; 52 52 53 53 /* ··· 87 87 */ 88 88 89 89 if (unlikely(ldt)) 90 - set_ldt(ldt->entries, ldt->size); 90 + set_ldt(ldt->entries, ldt->nr_entries); 91 91 else 92 92 clear_LDT(); 93 93 #else
+25 -24
arch/x86/kernel/ldt.c
··· 31 31 return; 32 32 33 33 pc = &mm->context; 34 - set_ldt(pc->ldt->entries, pc->ldt->size); 34 + set_ldt(pc->ldt->entries, pc->ldt->nr_entries); 35 35 } 36 36 37 37 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ 38 - static struct ldt_struct *alloc_ldt_struct(unsigned int size) 38 + static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries) 39 39 { 40 40 struct ldt_struct *new_ldt; 41 41 unsigned int alloc_size; 42 42 43 - if (size > LDT_ENTRIES) 43 + if (num_entries > LDT_ENTRIES) 44 44 return NULL; 45 45 46 46 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); ··· 48 48 return NULL; 49 49 50 50 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); 51 - alloc_size = size * LDT_ENTRY_SIZE; 51 + alloc_size = num_entries * LDT_ENTRY_SIZE; 52 52 53 53 /* 54 54 * Xen is very picky: it requires a page-aligned LDT that has no ··· 66 66 return NULL; 67 67 } 68 68 69 - new_ldt->size = size; 69 + new_ldt->nr_entries = num_entries; 70 70 return new_ldt; 71 71 } 72 72 73 73 /* After calling this, the LDT is immutable. */ 74 74 static void finalize_ldt_struct(struct ldt_struct *ldt) 75 75 { 76 - paravirt_alloc_ldt(ldt->entries, ldt->size); 76 + paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); 77 77 } 78 78 79 79 /* context.lock is held */ ··· 92 92 if (likely(!ldt)) 93 93 return; 94 94 95 - paravirt_free_ldt(ldt->entries, ldt->size); 96 - if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) 95 + paravirt_free_ldt(ldt->entries, ldt->nr_entries); 96 + if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE) 97 97 vfree_atomic(ldt->entries); 98 98 else 99 99 free_page((unsigned long)ldt->entries); ··· 123 123 goto out_unlock; 124 124 } 125 125 126 - new_ldt = alloc_ldt_struct(old_mm->context.ldt->size); 126 + new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries); 127 127 if (!new_ldt) { 128 128 retval = -ENOMEM; 129 129 goto out_unlock; 130 130 } 131 131 132 132 memcpy(new_ldt->entries, old_mm->context.ldt->entries, 133 - new_ldt->size * LDT_ENTRY_SIZE); 133 + new_ldt->nr_entries * LDT_ENTRY_SIZE); 134 134 finalize_ldt_struct(new_ldt); 135 135 136 136 mm->context.ldt = new_ldt; ··· 153 153 154 154 static int read_ldt(void __user *ptr, unsigned long bytecount) 155 155 { 156 - int retval; 157 - unsigned long size; 158 156 struct mm_struct *mm = current->mm; 157 + unsigned long entries_size; 158 + int retval; 159 159 160 160 mutex_lock(&mm->context.lock); 161 161 ··· 167 167 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) 168 168 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; 169 169 170 - size = mm->context.ldt->size * LDT_ENTRY_SIZE; 171 - if (size > bytecount) 172 - size = bytecount; 170 + entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE; 171 + if (entries_size > bytecount) 172 + entries_size = bytecount; 173 173 174 - if (copy_to_user(ptr, mm->context.ldt->entries, size)) { 174 + if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) { 175 175 retval = -EFAULT; 176 176 goto out_unlock; 177 177 } 178 178 179 - if (size != bytecount) { 179 + if (entries_size != bytecount) { 180 180 /* Zero-fill the rest and pretend we read bytecount bytes. */ 181 - if (clear_user(ptr + size, bytecount - size)) { 181 + if (clear_user(ptr + entries_size, bytecount - entries_size)) { 182 182 retval = -EFAULT; 183 183 goto out_unlock; 184 184 } ··· 209 209 { 210 210 struct mm_struct *mm = current->mm; 211 211 struct ldt_struct *new_ldt, *old_ldt; 212 - unsigned int oldsize, newsize; 212 + unsigned int old_nr_entries, new_nr_entries; 213 213 struct user_desc ldt_info; 214 214 struct desc_struct ldt; 215 215 int error; ··· 248 248 249 249 mutex_lock(&mm->context.lock); 250 250 251 - old_ldt = mm->context.ldt; 252 - oldsize = old_ldt ? old_ldt->size : 0; 253 - newsize = max(ldt_info.entry_number + 1, oldsize); 251 + old_ldt = mm->context.ldt; 252 + old_nr_entries = old_ldt ? old_ldt->nr_entries : 0; 253 + new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries); 254 254 255 255 error = -ENOMEM; 256 - new_ldt = alloc_ldt_struct(newsize); 256 + new_ldt = alloc_ldt_struct(new_nr_entries); 257 257 if (!new_ldt) 258 258 goto out_unlock; 259 259 260 260 if (old_ldt) 261 - memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE); 261 + memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE); 262 + 262 263 new_ldt->entries[ldt_info.entry_number] = ldt; 263 264 finalize_ldt_struct(new_ldt); 264 265
+1 -1
arch/x86/kernel/process_64.c
··· 142 142 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", 143 143 dead_task->comm, 144 144 dead_task->mm->context.ldt->entries, 145 - dead_task->mm->context.ldt->size); 145 + dead_task->mm->context.ldt->nr_entries); 146 146 BUG(); 147 147 } 148 148 #endif
+1 -1
arch/x86/kernel/step.c
··· 34 34 35 35 mutex_lock(&child->mm->context.lock); 36 36 if (unlikely(!child->mm->context.ldt || 37 - seg >= child->mm->context.ldt->size)) 37 + seg >= child->mm->context.ldt->nr_entries)) 38 38 addr = -1L; /* bogus selector, access would fault */ 39 39 else { 40 40 desc = &child->mm->context.ldt->entries[seg];
+1 -1
arch/x86/math-emu/fpu_system.h
··· 27 27 #ifdef CONFIG_MODIFY_LDT_SYSCALL 28 28 seg >>= 3; 29 29 mutex_lock(&current->mm->context.lock); 30 - if (current->mm->context.ldt && seg < current->mm->context.ldt->size) 30 + if (current->mm->context.ldt && seg < current->mm->context.ldt->nr_entries) 31 31 ret = current->mm->context.ldt->entries[seg]; 32 32 mutex_unlock(&current->mm->context.lock); 33 33 #endif