Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Ingo writes:
"x86 fixes

An intel_rdt memory access fix and a VLA fix in pgd_alloc()."

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mm: Avoid VLA in pgd_alloc()
x86/intel_rdt: Fix out-of-bounds memory access in CBM tests

+45 -27
+3 -3
arch/x86/kernel/cpu/intel_rdt.h
··· 529 529 int rdtgroup_schemata_show(struct kernfs_open_file *of, 530 530 struct seq_file *s, void *v); 531 531 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, 532 - u32 _cbm, int closid, bool exclusive); 532 + unsigned long cbm, int closid, bool exclusive); 533 533 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, 534 - u32 cbm); 534 + unsigned long cbm); 535 535 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); 536 536 int rdtgroup_tasks_assigned(struct rdtgroup *r); 537 537 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); 538 538 int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); 539 - bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm); 539 + bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm); 540 540 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); 541 541 int rdt_pseudo_lock_init(void); 542 542 void rdt_pseudo_lock_release(void);
+11 -9
arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
··· 797 797 /** 798 798 * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked 799 799 * @d: RDT domain 800 - * @_cbm: CBM to test 800 + * @cbm: CBM to test 801 801 * 802 - * @d represents a cache instance and @_cbm a capacity bitmask that is 803 - * considered for it. Determine if @_cbm overlaps with any existing 802 + * @d represents a cache instance and @cbm a capacity bitmask that is 803 + * considered for it. Determine if @cbm overlaps with any existing 804 804 * pseudo-locked region on @d. 805 805 * 806 - * Return: true if @_cbm overlaps with pseudo-locked region on @d, false 806 + * @cbm is unsigned long, even if only 32 bits are used, to make the 807 + * bitmap functions work correctly. 808 + * 809 + * Return: true if @cbm overlaps with pseudo-locked region on @d, false 807 810 * otherwise. 808 811 */ 809 - bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm) 812 + bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) 810 813 { 811 - unsigned long *cbm = (unsigned long *)&_cbm; 812 - unsigned long *cbm_b; 813 814 unsigned int cbm_len; 815 + unsigned long cbm_b; 814 816 815 817 if (d->plr) { 816 818 cbm_len = d->plr->r->cache.cbm_len; 817 - cbm_b = (unsigned long *)&d->plr->cbm; 818 - if (bitmap_intersects(cbm, cbm_b, cbm_len)) 819 + cbm_b = d->plr->cbm; 820 + if (bitmap_intersects(&cbm, &cbm_b, cbm_len)) 819 821 return true; 820 822 } 821 823 return false;
+23 -13
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
··· 975 975 * is false then overlaps with any resource group or hardware entities 976 976 * will be considered. 977 977 * 978 + * @cbm is unsigned long, even if only 32 bits are used, to make the 979 + * bitmap functions work correctly. 980 + * 978 981 * Return: false if CBM does not overlap, true if it does. 979 982 */ 980 983 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, 981 - u32 _cbm, int closid, bool exclusive) 984 + unsigned long cbm, int closid, bool exclusive) 982 985 { 983 - unsigned long *cbm = (unsigned long *)&_cbm; 984 - unsigned long *ctrl_b; 985 986 enum rdtgrp_mode mode; 987 + unsigned long ctrl_b; 986 988 u32 *ctrl; 987 989 int i; 988 990 989 991 /* Check for any overlap with regions used by hardware directly */ 990 992 if (!exclusive) { 991 - if (bitmap_intersects(cbm, 992 - (unsigned long *)&r->cache.shareable_bits, 993 - r->cache.cbm_len)) 993 + ctrl_b = r->cache.shareable_bits; 994 + if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) 994 995 return true; 995 996 } 996 997 997 998 /* Check for overlap with other resource groups */ 998 999 ctrl = d->ctrl_val; 999 1000 for (i = 0; i < closids_supported(); i++, ctrl++) { 1000 - ctrl_b = (unsigned long *)ctrl; 1001 + ctrl_b = *ctrl; 1001 1002 mode = rdtgroup_mode_by_closid(i); 1002 1003 if (closid_allocated(i) && i != closid && 1003 1004 mode != RDT_MODE_PSEUDO_LOCKSETUP) { 1004 - if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) { 1005 + if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { 1005 1006 if (exclusive) { 1006 1007 if (mode == RDT_MODE_EXCLUSIVE) 1007 1008 return true; ··· 1139 1138 * computed by first dividing the total cache size by the CBM length to 1140 1139 * determine how many bytes each bit in the bitmask represents. The result 1141 1140 * is multiplied with the number of bits set in the bitmask. 1141 + * 1142 + * @cbm is unsigned long, even if only 32 bits are used to make the 1143 + * bitmap functions work correctly. 1142 1144 */ 1143 1145 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, 1144 - struct rdt_domain *d, u32 cbm) 1146 + struct rdt_domain *d, unsigned long cbm) 1145 1147 { 1146 1148 struct cpu_cacheinfo *ci; 1147 1149 unsigned int size = 0; 1148 1150 int num_b, i; 1149 1151 1150 - num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len); 1152 + num_b = bitmap_weight(&cbm, r->cache.cbm_len); 1151 1153 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); 1152 1154 for (i = 0; i < ci->num_leaves; i++) { 1153 1155 if (ci->info_list[i].level == r->cache_level) { ··· 2357 2353 u32 used_b = 0, unused_b = 0; 2358 2354 u32 closid = rdtgrp->closid; 2359 2355 struct rdt_resource *r; 2356 + unsigned long tmp_cbm; 2360 2357 enum rdtgrp_mode mode; 2361 2358 struct rdt_domain *d; 2362 2359 int i, ret; ··· 2395 2390 * modify the CBM based on system availability. 2396 2391 */ 2397 2392 cbm_ensure_valid(&d->new_ctrl, r); 2398 - if (bitmap_weight((unsigned long *) &d->new_ctrl, 2399 - r->cache.cbm_len) < 2400 - r->cache.min_cbm_bits) { 2393 + /* 2394 + * Assign the u32 CBM to an unsigned long to ensure 2395 + * that bitmap_weight() does not access out-of-bound 2396 + * memory. 2397 + */ 2398 + tmp_cbm = d->new_ctrl; 2399 + if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < 2400 + r->cache.min_cbm_bits) { 2401 2401 rdt_last_cmd_printf("no space on %s:%d\n", 2402 2402 r->name, d->id); 2403 2403 return -ENOSPC;
+8 -2
arch/x86/mm/pgtable.c
··· 115 115 116 116 #define UNSHARED_PTRS_PER_PGD \ 117 117 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) 118 + #define MAX_UNSHARED_PTRS_PER_PGD \ 119 + max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD) 118 120 119 121 120 122 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) ··· 183 181 * and initialize the kernel pmds here. 184 182 */ 185 183 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD 184 + #define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD 186 185 187 186 /* 188 187 * We allocate separate PMDs for the kernel part of the user page-table ··· 192 189 */ 193 190 #define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \ 194 191 KERNEL_PGD_PTRS : 0) 192 + #define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS 195 193 196 194 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) 197 195 { ··· 214 210 215 211 /* No need to prepopulate any pagetable entries in non-PAE modes. */ 216 212 #define PREALLOCATED_PMDS 0 213 + #define MAX_PREALLOCATED_PMDS 0 217 214 #define PREALLOCATED_USER_PMDS 0 215 + #define MAX_PREALLOCATED_USER_PMDS 0 218 216 #endif /* CONFIG_X86_PAE */ 219 217 220 218 static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count) ··· 434 428 pgd_t *pgd_alloc(struct mm_struct *mm) 435 429 { 436 430 pgd_t *pgd; 437 - pmd_t *u_pmds[PREALLOCATED_USER_PMDS]; 438 - pmd_t *pmds[PREALLOCATED_PMDS]; 431 + pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS]; 432 + pmd_t *pmds[MAX_PREALLOCATED_PMDS]; 439 433 440 434 pgd = _pgd_alloc(); 441 435