Merge tag 'x86-urgent-2025-03-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull more x86 fixes from Ingo Molnar:

- Add more model IDs to the AMD microcode version check, more people
are hitting these checks

- Fix a Xen guest boot warning related to AMD northbridge setup

- Fix SEV guest bugs related to a recent changes in its locking logic

- Fix a missing definition of PTRS_PER_PMD that assembly builds can hit

* tag 'x86-urgent-2025-03-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/microcode/AMD: Add some forgotten models to the SHA check
x86/mm: Define PTRS_PER_PMD for assembly code too
virt: sev-guest: Move SNP Guest Request data pages handling under snp_cmd_mutex
virt: sev-guest: Allocate request data dynamically
x86/amd_nb: Use rdmsr_safe() in amd_get_mmconfig_range()

+67 -43
+8 -15
arch/x86/coco/sev/core.c
··· 2853 if (!mdesc->response) 2854 goto e_free_request; 2855 2856 - mdesc->certs_data = alloc_shared_pages(SEV_FW_BLOB_MAX_SIZE); 2857 - if (!mdesc->certs_data) 2858 - goto e_free_response; 2859 - 2860 - /* initial the input address for guest request */ 2861 - mdesc->input.req_gpa = __pa(mdesc->request); 2862 - mdesc->input.resp_gpa = __pa(mdesc->response); 2863 - mdesc->input.data_gpa = __pa(mdesc->certs_data); 2864 - 2865 return mdesc; 2866 2867 - e_free_response: 2868 - free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg)); 2869 e_free_request: 2870 free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg)); 2871 e_unmap: ··· 2874 kfree(mdesc->ctx); 2875 free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg)); 2876 free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg)); 2877 - free_shared_pages(mdesc->certs_data, SEV_FW_BLOB_MAX_SIZE); 2878 iounmap((__force void __iomem *)mdesc->secrets); 2879 2880 memset(mdesc, 0, sizeof(*mdesc)); ··· 3042 * sequence number must be incremented or the VMPCK must be deleted to 3043 * prevent reuse of the IV. 3044 */ 3045 - rc = snp_issue_guest_request(req, &mdesc->input, rio); 3046 switch (rc) { 3047 case -ENOSPC: 3048 /* ··· 3052 * order to increment the sequence number and thus avoid 3053 * IV reuse. 3054 */ 3055 - override_npages = mdesc->input.data_npages; 3056 req->exit_code = SVM_VMGEXIT_GUEST_REQUEST; 3057 3058 /* ··· 3108 } 3109 3110 if (override_npages) 3111 - mdesc->input.data_npages = override_npages; 3112 3113 return rc; 3114 } ··· 3145 * request page. 3146 */ 3147 memcpy(mdesc->request, &mdesc->secret_request, sizeof(mdesc->secret_request)); 3148 3149 rc = __handle_guest_request(mdesc, req, rio); 3150 if (rc) {
··· 2853 if (!mdesc->response) 2854 goto e_free_request; 2855 2856 return mdesc; 2857 2858 e_free_request: 2859 free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg)); 2860 e_unmap: ··· 2885 kfree(mdesc->ctx); 2886 free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg)); 2887 free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg)); 2888 iounmap((__force void __iomem *)mdesc->secrets); 2889 2890 memset(mdesc, 0, sizeof(*mdesc)); ··· 3054 * sequence number must be incremented or the VMPCK must be deleted to 3055 * prevent reuse of the IV. 3056 */ 3057 + rc = snp_issue_guest_request(req, &req->input, rio); 3058 switch (rc) { 3059 case -ENOSPC: 3060 /* ··· 3064 * order to increment the sequence number and thus avoid 3065 * IV reuse. 3066 */ 3067 + override_npages = req->input.data_npages; 3068 req->exit_code = SVM_VMGEXIT_GUEST_REQUEST; 3069 3070 /* ··· 3120 } 3121 3122 if (override_npages) 3123 + req->input.data_npages = override_npages; 3124 3125 return rc; 3126 } ··· 3157 * request page. 3158 */ 3159 memcpy(mdesc->request, &mdesc->secret_request, sizeof(mdesc->secret_request)); 3160 + 3161 + /* Initialize the input address for guest request */ 3162 + req->input.req_gpa = __pa(mdesc->request); 3163 + req->input.resp_gpa = __pa(mdesc->response); 3164 + req->input.data_gpa = req->certs_data ? __pa(req->certs_data) : 0; 3165 3166 rc = __handle_guest_request(mdesc, req, rio); 3167 if (rc) {
+4 -4
arch/x86/include/asm/pgtable-2level_types.h
··· 23 #define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED 24 25 /* 26 - * traditional i386 two-level paging structure: 27 */ 28 29 #define PGDIR_SHIFT 22 30 #define PTRS_PER_PGD 1024 31 32 - 33 /* 34 - * the i386 is two-level, so we don't really have any 35 - * PMD directory physically. 36 */ 37 38 #define PTRS_PER_PTE 1024 39
··· 23 #define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED 24 25 /* 26 + * Traditional i386 two-level paging structure: 27 */ 28 29 #define PGDIR_SHIFT 22 30 #define PTRS_PER_PGD 1024 31 32 /* 33 + * The i386 is two-level, so we don't really have any 34 + * PMD directory physically: 35 */ 36 + #define PTRS_PER_PMD 1 37 38 #define PTRS_PER_PTE 1024 39
+3 -3
arch/x86/include/asm/sev.h
··· 203 unsigned int vmpck_id; 204 u8 msg_version; 205 u8 msg_type; 206 }; 207 208 /* ··· 266 struct snp_guest_msg secret_request, secret_response; 267 268 struct snp_secrets_page *secrets; 269 - struct snp_req_data input; 270 - 271 - void *certs_data; 272 273 struct aesgcm_ctx *ctx; 274
··· 203 unsigned int vmpck_id; 204 u8 msg_version; 205 u8 msg_type; 206 + 207 + struct snp_req_data input; 208 + void *certs_data; 209 }; 210 211 /* ··· 263 struct snp_guest_msg secret_request, secret_response; 264 265 struct snp_secrets_page *secrets; 266 267 struct aesgcm_ctx *ctx; 268
+3 -6
arch/x86/kernel/amd_nb.c
··· 143 144 struct resource *amd_get_mmconfig_range(struct resource *res) 145 { 146 - u32 address; 147 u64 base, msr; 148 unsigned int segn_busn_bits; 149 ··· 150 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 151 return NULL; 152 153 - /* assume all cpus from fam10h have mmconfig */ 154 - if (boot_cpu_data.x86 < 0x10) 155 return NULL; 156 - 157 - address = MSR_FAM10H_MMIO_CONF_BASE; 158 - rdmsrl(address, msr); 159 160 /* mmconfig is not enabled */ 161 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
··· 143 144 struct resource *amd_get_mmconfig_range(struct resource *res) 145 { 146 u64 base, msr; 147 unsigned int segn_busn_bits; 148 ··· 151 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 152 return NULL; 153 154 + /* Assume CPUs from Fam10h have mmconfig, although not all VMs do */ 155 + if (boot_cpu_data.x86 < 0x10 || 156 + rdmsrl_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr)) 157 return NULL; 158 159 /* mmconfig is not enabled */ 160 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
+6
arch/x86/kernel/cpu/microcode/amd.c
··· 175 { 176 switch (cur_rev >> 8) { 177 case 0x80012: return cur_rev <= 0x800126f; break; 178 case 0x83010: return cur_rev <= 0x830107c; break; 179 case 0x86001: return cur_rev <= 0x860010e; break; 180 case 0x86081: return cur_rev <= 0x8608108; break; 181 case 0x87010: return cur_rev <= 0x8701034; break; 182 case 0x8a000: return cur_rev <= 0x8a0000a; break; 183 case 0xa0011: return cur_rev <= 0xa0011da; break; 184 case 0xa0012: return cur_rev <= 0xa001243; break; 185 case 0xa1011: return cur_rev <= 0xa101153; break; 186 case 0xa1012: return cur_rev <= 0xa10124e; break; 187 case 0xa1081: return cur_rev <= 0xa108109; break; 188 case 0xa2010: return cur_rev <= 0xa20102f; break; 189 case 0xa2012: return cur_rev <= 0xa201212; break; 190 case 0xa6012: return cur_rev <= 0xa60120a; break; 191 case 0xa7041: return cur_rev <= 0xa704109; break; 192 case 0xa7052: return cur_rev <= 0xa705208; break; 193 case 0xa7080: return cur_rev <= 0xa708009; break; 194 case 0xa70c0: return cur_rev <= 0xa70C009; break; 195 case 0xaa002: return cur_rev <= 0xaa00218; break; 196 default: break; 197 }
··· 175 { 176 switch (cur_rev >> 8) { 177 case 0x80012: return cur_rev <= 0x800126f; break; 178 + case 0x80082: return cur_rev <= 0x800820f; break; 179 case 0x83010: return cur_rev <= 0x830107c; break; 180 case 0x86001: return cur_rev <= 0x860010e; break; 181 case 0x86081: return cur_rev <= 0x8608108; break; 182 case 0x87010: return cur_rev <= 0x8701034; break; 183 case 0x8a000: return cur_rev <= 0x8a0000a; break; 184 + case 0xa0010: return cur_rev <= 0xa00107a; break; 185 case 0xa0011: return cur_rev <= 0xa0011da; break; 186 case 0xa0012: return cur_rev <= 0xa001243; break; 187 + case 0xa0082: return cur_rev <= 0xa00820e; break; 188 case 0xa1011: return cur_rev <= 0xa101153; break; 189 case 0xa1012: return cur_rev <= 0xa10124e; break; 190 case 0xa1081: return cur_rev <= 0xa108109; break; 191 case 0xa2010: return cur_rev <= 0xa20102f; break; 192 case 0xa2012: return cur_rev <= 0xa201212; break; 193 + case 0xa4041: return cur_rev <= 0xa404109; break; 194 + case 0xa5000: return cur_rev <= 0xa500013; break; 195 case 0xa6012: return cur_rev <= 0xa60120a; break; 196 case 0xa7041: return cur_rev <= 0xa704109; break; 197 case 0xa7052: return cur_rev <= 0xa705208; break; 198 case 0xa7080: return cur_rev <= 0xa708009; break; 199 case 0xa70c0: return cur_rev <= 0xa70C009; break; 200 + case 0xaa001: return cur_rev <= 0xaa00116; break; 201 case 0xaa002: return cur_rev <= 0xaa00218; break; 202 default: break; 203 }
+43 -15
drivers/virt/coco/sev-guest/sev-guest.c
··· 38 struct miscdevice misc; 39 40 struct snp_msg_desc *msg_desc; 41 - 42 - union { 43 - struct snp_report_req report; 44 - struct snp_derived_key_req derived_key; 45 - struct snp_ext_report_req ext_report; 46 - } req; 47 }; 48 49 /* ··· 65 66 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) 67 { 68 - struct snp_report_req *report_req = &snp_dev->req.report; 69 struct snp_msg_desc *mdesc = snp_dev->msg_desc; 70 struct snp_report_resp *report_resp; 71 struct snp_guest_req req = {}; ··· 73 74 if (!arg->req_data || !arg->resp_data) 75 return -EINVAL; 76 77 if (copy_from_user(report_req, (void __user *)arg->req_data, sizeof(*report_req))) 78 return -EFAULT; ··· 114 115 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) 116 { 117 - struct snp_derived_key_req *derived_key_req = &snp_dev->req.derived_key; 118 struct snp_derived_key_resp derived_key_resp = {0}; 119 struct snp_msg_desc *mdesc = snp_dev->msg_desc; 120 struct snp_guest_req req = {}; ··· 132 */ 133 resp_len = sizeof(derived_key_resp.data) + mdesc->ctx->authsize; 134 if (sizeof(buf) < resp_len) 135 return -ENOMEM; 136 137 if (copy_from_user(derived_key_req, (void __user *)arg->req_data, ··· 170 struct snp_req_resp *io) 171 172 { 173 - struct snp_ext_report_req *report_req = &snp_dev->req.ext_report; 174 struct snp_msg_desc *mdesc = snp_dev->msg_desc; 175 struct snp_report_resp *report_resp; 176 struct snp_guest_req req = {}; 177 int ret, npages = 0, resp_len; 178 sockptr_t certs_address; 179 180 if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data)) 181 return -EINVAL; 182 183 if (copy_from_sockptr(report_req, io->req_data, sizeof(*report_req))) 184 return -EFAULT; ··· 210 * the host. If host does not supply any certs in it, then copy 211 * zeros to indicate that certificate data was not provided. 212 */ 213 - memset(mdesc->certs_data, 0, report_req->certs_len); 214 npages = report_req->certs_len >> PAGE_SHIFT; 215 cmd: 216 /* 217 * The intermediate response buffer is used while decrypting the ··· 232 */ 233 resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize; 234 report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); 235 - if (!report_resp) 236 - return -ENOMEM; 237 238 - mdesc->input.data_npages = npages; 239 240 req.msg_version = arg->msg_version; 241 req.msg_type = SNP_MSG_REPORT_REQ; ··· 252 253 /* If certs length is invalid then copy the returned length */ 254 if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) { 255 - report_req->certs_len = mdesc->input.data_npages << PAGE_SHIFT; 256 257 if (copy_to_sockptr(io->req_data, report_req, sizeof(*report_req))) 258 ret = -EFAULT; ··· 261 if (ret) 262 goto e_free; 263 264 - if (npages && copy_to_sockptr(certs_address, mdesc->certs_data, report_req->certs_len)) { 265 ret = -EFAULT; 266 goto e_free; 267 } ··· 271 272 e_free: 273 kfree(report_resp); 274 return ret; 275 } 276
··· 38 struct miscdevice misc; 39 40 struct snp_msg_desc *msg_desc; 41 }; 42 43 /* ··· 71 72 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) 73 { 74 + struct snp_report_req *report_req __free(kfree) = NULL; 75 struct snp_msg_desc *mdesc = snp_dev->msg_desc; 76 struct snp_report_resp *report_resp; 77 struct snp_guest_req req = {}; ··· 79 80 if (!arg->req_data || !arg->resp_data) 81 return -EINVAL; 82 + 83 + report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT); 84 + if (!report_req) 85 + return -ENOMEM; 86 87 if (copy_from_user(report_req, (void __user *)arg->req_data, sizeof(*report_req))) 88 return -EFAULT; ··· 116 117 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) 118 { 119 + struct snp_derived_key_req *derived_key_req __free(kfree) = NULL; 120 struct snp_derived_key_resp derived_key_resp = {0}; 121 struct snp_msg_desc *mdesc = snp_dev->msg_desc; 122 struct snp_guest_req req = {}; ··· 134 */ 135 resp_len = sizeof(derived_key_resp.data) + mdesc->ctx->authsize; 136 if (sizeof(buf) < resp_len) 137 + return -ENOMEM; 138 + 139 + derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT); 140 + if (!derived_key_req) 141 return -ENOMEM; 142 143 if (copy_from_user(derived_key_req, (void __user *)arg->req_data, ··· 168 struct snp_req_resp *io) 169 170 { 171 + struct snp_ext_report_req *report_req __free(kfree) = NULL; 172 struct snp_msg_desc *mdesc = snp_dev->msg_desc; 173 struct snp_report_resp *report_resp; 174 struct snp_guest_req req = {}; 175 int ret, npages = 0, resp_len; 176 sockptr_t certs_address; 177 + struct page *page; 178 179 if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data)) 180 return -EINVAL; 181 + 182 + report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT); 183 + if (!report_req) 184 + return -ENOMEM; 185 186 if (copy_from_sockptr(report_req, io->req_data, sizeof(*report_req))) 187 return -EFAULT; ··· 203 * the host. If host does not supply any certs in it, then copy 204 * zeros to indicate that certificate data was not provided. 205 */ 206 npages = report_req->certs_len >> PAGE_SHIFT; 207 + page = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 208 + get_order(report_req->certs_len)); 209 + if (!page) 210 + return -ENOMEM; 211 + 212 + req.certs_data = page_address(page); 213 + ret = set_memory_decrypted((unsigned long)req.certs_data, npages); 214 + if (ret) { 215 + pr_err("failed to mark page shared, ret=%d\n", ret); 216 + __free_pages(page, get_order(report_req->certs_len)); 217 + return -EFAULT; 218 + } 219 + 220 cmd: 221 /* 222 * The intermediate response buffer is used while decrypting the ··· 213 */ 214 resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize; 215 report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); 216 + if (!report_resp) { 217 + ret = -ENOMEM; 218 + goto e_free_data; 219 + } 220 221 + req.input.data_npages = npages; 222 223 req.msg_version = arg->msg_version; 224 req.msg_type = SNP_MSG_REPORT_REQ; ··· 231 232 /* If certs length is invalid then copy the returned length */ 233 if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) { 234 + report_req->certs_len = req.input.data_npages << PAGE_SHIFT; 235 236 if (copy_to_sockptr(io->req_data, report_req, sizeof(*report_req))) 237 ret = -EFAULT; ··· 240 if (ret) 241 goto e_free; 242 243 + if (npages && copy_to_sockptr(certs_address, req.certs_data, report_req->certs_len)) { 244 ret = -EFAULT; 245 goto e_free; 246 } ··· 250 251 e_free: 252 kfree(report_resp); 253 + e_free_data: 254 + if (npages) { 255 + if (set_memory_encrypted((unsigned long)req.certs_data, npages)) 256 + WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n"); 257 + else 258 + __free_pages(page, get_order(report_req->certs_len)); 259 + } 260 return ret; 261 } 262