Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Drivers: hv: Rename fields for SynIC message and event pages

Confidential VMBus requires interacting with two SynICs -- one
provided by the host hypervisor, and one provided by the paravisor.
Each SynIC requires its own message and event pages.

Rename the existing host-accessible SynIC message and event pages
with the "hyp_" prefix to clearly distinguish them from the paravisor
ones. The field name is also changed in mshv_root.* for consistency.

No functional changes.

Signed-off-by: Roman Kisel <romank@linux.microsoft.com>
Reviewed-by: Tianyu Lan <tiala@microsoft.com>
Reviewed-by: Michael Kelley <mhklinux@outlook.com>
Signed-off-by: Wei Liu <wei.liu@kernel.org>

authored by

Roman Kisel and committed by
Wei Liu
163224c1 a156ad8c

+45 -45
+3 -3
drivers/hv/channel_mgmt.c
··· 844 844 = per_cpu_ptr(hv_context.cpu_context, cpu); 845 845 846 846 /* 847 - * In a CoCo VM the synic_message_page is not allocated 847 + * In a CoCo VM the hyp_synic_message_page is not allocated 848 848 * in hv_synic_alloc(). Instead it is set/cleared in 849 849 * hv_synic_enable_regs() and hv_synic_disable_regs() 850 850 * such that it is set only when the CPU is online. If 851 851 * not all present CPUs are online, the message page 852 852 * might be NULL, so skip such CPUs. 853 853 */ 854 - page_addr = hv_cpu->synic_message_page; 854 + page_addr = hv_cpu->hyp_synic_message_page; 855 855 if (!page_addr) 856 856 continue; 857 857 ··· 892 892 struct hv_per_cpu_context *hv_cpu 893 893 = per_cpu_ptr(hv_context.cpu_context, cpu); 894 894 895 - page_addr = hv_cpu->synic_message_page; 895 + page_addr = hv_cpu->hyp_synic_message_page; 896 896 if (!page_addr) 897 897 continue; 898 898
+33 -33
drivers/hv/hv.c
··· 147 147 * Skip these pages allocation here. 148 148 */ 149 149 if (!ms_hyperv.paravisor_present && !hv_root_partition()) { 150 - hv_cpu->synic_message_page = 150 + hv_cpu->hyp_synic_message_page = 151 151 (void *)get_zeroed_page(GFP_ATOMIC); 152 - if (!hv_cpu->synic_message_page) { 152 + if (!hv_cpu->hyp_synic_message_page) { 153 153 pr_err("Unable to allocate SYNIC message page\n"); 154 154 goto err; 155 155 } 156 156 157 - hv_cpu->synic_event_page = 157 + hv_cpu->hyp_synic_event_page = 158 158 (void *)get_zeroed_page(GFP_ATOMIC); 159 - if (!hv_cpu->synic_event_page) { 159 + if (!hv_cpu->hyp_synic_event_page) { 160 160 pr_err("Unable to allocate SYNIC event page\n"); 161 161 162 - free_page((unsigned long)hv_cpu->synic_message_page); 163 - hv_cpu->synic_message_page = NULL; 162 + free_page((unsigned long)hv_cpu->hyp_synic_message_page); 163 + hv_cpu->hyp_synic_message_page = NULL; 164 164 goto err; 165 165 } 166 166 } ··· 168 168 if (!ms_hyperv.paravisor_present && 169 169 (hv_isolation_type_snp() || hv_isolation_type_tdx())) { 170 170 ret = set_memory_decrypted((unsigned long) 171 - hv_cpu->synic_message_page, 1); 171 + hv_cpu->hyp_synic_message_page, 1); 172 172 if (ret) { 173 173 pr_err("Failed to decrypt SYNIC msg page: %d\n", ret); 174 - hv_cpu->synic_message_page = NULL; 174 + hv_cpu->hyp_synic_message_page = NULL; 175 175 176 176 /* 177 177 * Free the event page here so that hv_synic_free() 178 178 * won't later try to re-encrypt it. 179 179 */ 180 - free_page((unsigned long)hv_cpu->synic_event_page); 181 - hv_cpu->synic_event_page = NULL; 180 + free_page((unsigned long)hv_cpu->hyp_synic_event_page); 181 + hv_cpu->hyp_synic_event_page = NULL; 182 182 goto err; 183 183 } 184 184 185 185 ret = set_memory_decrypted((unsigned long) 186 - hv_cpu->synic_event_page, 1); 186 + hv_cpu->hyp_synic_event_page, 1); 187 187 if (ret) { 188 188 pr_err("Failed to decrypt SYNIC event page: %d\n", ret); 189 - hv_cpu->synic_event_page = NULL; 189 + hv_cpu->hyp_synic_event_page = NULL; 190 190 goto err; 191 191 } 192 192 193 - memset(hv_cpu->synic_message_page, 0, PAGE_SIZE); 194 - memset(hv_cpu->synic_event_page, 0, PAGE_SIZE); 193 + memset(hv_cpu->hyp_synic_message_page, 0, PAGE_SIZE); 194 + memset(hv_cpu->hyp_synic_event_page, 0, PAGE_SIZE); 195 195 } 196 196 } 197 197 ··· 227 227 228 228 if (!ms_hyperv.paravisor_present && 229 229 (hv_isolation_type_snp() || hv_isolation_type_tdx())) { 230 - if (hv_cpu->synic_message_page) { 230 + if (hv_cpu->hyp_synic_message_page) { 231 231 ret = set_memory_encrypted((unsigned long) 232 - hv_cpu->synic_message_page, 1); 232 + hv_cpu->hyp_synic_message_page, 1); 233 233 if (ret) { 234 234 pr_err("Failed to encrypt SYNIC msg page: %d\n", ret); 235 - hv_cpu->synic_message_page = NULL; 235 + hv_cpu->hyp_synic_message_page = NULL; 236 236 } 237 237 } 238 238 239 - if (hv_cpu->synic_event_page) { 239 + if (hv_cpu->hyp_synic_event_page) { 240 240 ret = set_memory_encrypted((unsigned long) 241 - hv_cpu->synic_event_page, 1); 241 + hv_cpu->hyp_synic_event_page, 1); 242 242 if (ret) { 243 243 pr_err("Failed to encrypt SYNIC event page: %d\n", ret); 244 - hv_cpu->synic_event_page = NULL; 244 + hv_cpu->hyp_synic_event_page = NULL; 245 245 } 246 246 } 247 247 } 248 248 249 249 free_page((unsigned long)hv_cpu->post_msg_page); 250 - free_page((unsigned long)hv_cpu->synic_event_page); 251 - free_page((unsigned long)hv_cpu->synic_message_page); 250 + free_page((unsigned long)hv_cpu->hyp_synic_event_page); 251 + free_page((unsigned long)hv_cpu->hyp_synic_message_page); 252 252 } 253 253 254 254 kfree(hv_context.hv_numa_map); ··· 278 278 /* Mask out vTOM bit. ioremap_cache() maps decrypted */ 279 279 u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) & 280 280 ~ms_hyperv.shared_gpa_boundary; 281 - hv_cpu->synic_message_page = 281 + hv_cpu->hyp_synic_message_page = 282 282 (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE); 283 - if (!hv_cpu->synic_message_page) 283 + if (!hv_cpu->hyp_synic_message_page) 284 284 pr_err("Fail to map synic message page.\n"); 285 285 } else { 286 - simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page) 286 + simp.base_simp_gpa = virt_to_phys(hv_cpu->hyp_synic_message_page) 287 287 >> HV_HYP_PAGE_SHIFT; 288 288 } 289 289 ··· 297 297 /* Mask out vTOM bit. ioremap_cache() maps decrypted */ 298 298 u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) & 299 299 ~ms_hyperv.shared_gpa_boundary; 300 - hv_cpu->synic_event_page = 300 + hv_cpu->hyp_synic_event_page = 301 301 (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE); 302 - if (!hv_cpu->synic_event_page) 302 + if (!hv_cpu->hyp_synic_event_page) 303 303 pr_err("Fail to map synic event page.\n"); 304 304 } else { 305 - siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page) 305 + siefp.base_siefp_gpa = virt_to_phys(hv_cpu->hyp_synic_event_page) 306 306 >> HV_HYP_PAGE_SHIFT; 307 307 } 308 308 ··· 362 362 */ 363 363 simp.simp_enabled = 0; 364 364 if (ms_hyperv.paravisor_present || hv_root_partition()) { 365 - iounmap(hv_cpu->synic_message_page); 366 - hv_cpu->synic_message_page = NULL; 365 + iounmap(hv_cpu->hyp_synic_message_page); 366 + hv_cpu->hyp_synic_message_page = NULL; 367 367 } else { 368 368 simp.base_simp_gpa = 0; 369 369 } ··· 374 374 siefp.siefp_enabled = 0; 375 375 376 376 if (ms_hyperv.paravisor_present || hv_root_partition()) { 377 - iounmap(hv_cpu->synic_event_page); 378 - hv_cpu->synic_event_page = NULL; 377 + iounmap(hv_cpu->hyp_synic_event_page); 378 + hv_cpu->hyp_synic_event_page = NULL; 379 379 } else { 380 380 siefp.base_siefp_gpa = 0; 381 381 } ··· 405 405 { 406 406 struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context); 407 407 union hv_synic_event_flags *event = 408 - (union hv_synic_event_flags *)hv_cpu->synic_event_page + VMBUS_MESSAGE_SINT; 408 + (union hv_synic_event_flags *)hv_cpu->hyp_synic_event_page + VMBUS_MESSAGE_SINT; 409 409 unsigned long *recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */ 410 410 bool pending; 411 411 u32 relid;
+2 -2
drivers/hv/hyperv_vmbus.h
··· 121 121 * Per cpu state for channel handling 122 122 */ 123 123 struct hv_per_cpu_context { 124 - void *synic_message_page; 125 - void *synic_event_page; 124 + void *hyp_synic_message_page; 125 + void *hyp_synic_event_page; 126 126 127 127 /* 128 128 * The page is only used in hv_post_message() for a TDX VM (with the
+1 -1
drivers/hv/mshv_root.h
··· 169 169 }; 170 170 171 171 struct hv_synic_pages { 172 - struct hv_message_page *synic_message_page; 172 + struct hv_message_page *hyp_synic_message_page; 173 173 struct hv_synic_event_flags_page *synic_event_flags_page; 174 174 struct hv_synic_event_ring_page *synic_event_ring_page; 175 175 };
+3 -3
drivers/hv/mshv_synic.c
··· 394 394 void mshv_isr(void) 395 395 { 396 396 struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages); 397 - struct hv_message_page **msg_page = &spages->synic_message_page; 397 + struct hv_message_page **msg_page = &spages->hyp_synic_message_page; 398 398 struct hv_message *msg; 399 399 bool handled; 400 400 ··· 456 456 #endif 457 457 union hv_synic_scontrol sctrl; 458 458 struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages); 459 - struct hv_message_page **msg_page = &spages->synic_message_page; 459 + struct hv_message_page **msg_page = &spages->hyp_synic_message_page; 460 460 struct hv_synic_event_flags_page **event_flags_page = 461 461 &spages->synic_event_flags_page; 462 462 struct hv_synic_event_ring_page **event_ring_page = ··· 550 550 union hv_synic_sirbp sirbp; 551 551 union hv_synic_scontrol sctrl; 552 552 struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages); 553 - struct hv_message_page **msg_page = &spages->synic_message_page; 553 + struct hv_message_page **msg_page = &spages->hyp_synic_message_page; 554 554 struct hv_synic_event_flags_page **event_flags_page = 555 555 &spages->synic_event_flags_page; 556 556 struct hv_synic_event_ring_page **event_ring_page =
+3 -3
drivers/hv/vmbus_drv.c
··· 1060 1060 void vmbus_on_msg_dpc(unsigned long data) 1061 1061 { 1062 1062 struct hv_per_cpu_context *hv_cpu = (void *)data; 1063 - void *page_addr = hv_cpu->synic_message_page; 1063 + void *page_addr = hv_cpu->hyp_synic_message_page; 1064 1064 struct hv_message msg_copy, *msg = (struct hv_message *)page_addr + 1065 1065 VMBUS_MESSAGE_SINT; 1066 1066 struct vmbus_channel_message_header *hdr; ··· 1244 1244 * The event page can be directly checked to get the id of 1245 1245 * the channel that has the interrupt pending. 1246 1246 */ 1247 - void *page_addr = hv_cpu->synic_event_page; 1247 + void *page_addr = hv_cpu->hyp_synic_event_page; 1248 1248 union hv_synic_event_flags *event 1249 1249 = (union hv_synic_event_flags *)page_addr + 1250 1250 VMBUS_MESSAGE_SINT; ··· 1327 1327 1328 1328 vmbus_chan_sched(hv_cpu); 1329 1329 1330 - page_addr = hv_cpu->synic_message_page; 1330 + page_addr = hv_cpu->hyp_synic_message_page; 1331 1331 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; 1332 1332 1333 1333 /* Check if there are actual msgs to be processed */