at master 18 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (c) 2009, Microsoft Corporation. 4 * 5 * Authors: 6 * Haiyang Zhang <haiyangz@microsoft.com> 7 * Hank Janssen <hjanssen@microsoft.com> 8 */ 9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11#include <linux/io.h> 12#include <linux/kernel.h> 13#include <linux/mm.h> 14#include <linux/slab.h> 15#include <linux/vmalloc.h> 16#include <linux/hyperv.h> 17#include <linux/random.h> 18#include <linux/clockchips.h> 19#include <linux/delay.h> 20#include <linux/interrupt.h> 21#include <linux/export.h> 22#include <clocksource/hyperv_timer.h> 23#include <asm/mshyperv.h> 24#include <linux/set_memory.h> 25#include "hyperv_vmbus.h" 26 27/* The one and only */ 28struct hv_context hv_context; 29EXPORT_SYMBOL_FOR_MODULES(hv_context, "mshv_vtl"); 30 31/* 32 * hv_init - Main initialization routine. 33 * 34 * This routine must be called before any other routines in here are called 35 */ 36int hv_init(void) 37{ 38 hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context); 39 if (!hv_context.cpu_context) 40 return -ENOMEM; 41 return 0; 42} 43 44/* 45 * hv_post_message - Post a message using the hypervisor message IPC. 46 * 47 * This involves a hypercall. 48 */ 49int hv_post_message(union hv_connection_id connection_id, 50 enum hv_message_type message_type, 51 void *payload, size_t payload_size) 52{ 53 struct hv_input_post_message *aligned_msg; 54 unsigned long flags; 55 u64 status; 56 57 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) 58 return -EMSGSIZE; 59 60 local_irq_save(flags); 61 62 /* 63 * A TDX VM with the paravisor must use the decrypted post_msg_page: see 64 * the comment in struct hv_per_cpu_context. A SNP VM with the paravisor 65 * can use the encrypted hyperv_pcpu_input_arg because it copies the 66 * input into the GHCB page, which has been decrypted by the paravisor. 67 */ 68 if (hv_isolation_type_tdx() && ms_hyperv.paravisor_present) 69 aligned_msg = this_cpu_ptr(hv_context.cpu_context)->post_msg_page; 70 else 71 aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg); 72 73 aligned_msg->connectionid = connection_id; 74 aligned_msg->reserved = 0; 75 aligned_msg->message_type = message_type; 76 aligned_msg->payload_size = payload_size; 77 memcpy((void *)aligned_msg->payload, payload, payload_size); 78 79 if (ms_hyperv.paravisor_present && !vmbus_is_confidential()) { 80 /* 81 * If the VMBus isn't confidential, use the CoCo-specific 82 * mechanism to communicate with the hypervisor. 83 */ 84 if (hv_isolation_type_tdx()) 85 status = hv_tdx_hypercall(HVCALL_POST_MESSAGE, 86 virt_to_phys(aligned_msg), 0); 87 else if (hv_isolation_type_snp()) 88 status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE, 89 aligned_msg, NULL, 90 sizeof(*aligned_msg)); 91 else 92 status = HV_STATUS_INVALID_PARAMETER; 93 } else { 94 u64 control = HVCALL_POST_MESSAGE; 95 96 control |= hv_nested ? HV_HYPERCALL_NESTED : 0; 97 /* 98 * If there is no paravisor, this will go to the hypervisor. 99 * In the Confidential VMBus case, there is the paravisor 100 * to which this will trap. 101 */ 102 status = hv_do_hypercall(control, aligned_msg, NULL); 103 } 104 105 local_irq_restore(flags); 106 107 return hv_result(status); 108} 109EXPORT_SYMBOL_FOR_MODULES(hv_post_message, "mshv_vtl"); 110 111static int hv_alloc_page(void **page, bool decrypt, const char *note) 112{ 113 int ret = 0; 114 115 /* 116 * After the page changes its encryption status, its contents might 117 * appear scrambled on some hardware. Thus `get_zeroed_page` would 118 * zero the page out in vain, so do that explicitly exactly once. 119 * 120 * By default, the page is allocated encrypted in a CoCo VM. 121 */ 122 *page = (void *)__get_free_page(GFP_KERNEL); 123 if (!*page) 124 return -ENOMEM; 125 126 if (decrypt) 127 ret = set_memory_decrypted((unsigned long)*page, 1); 128 if (ret) 129 goto failed; 130 131 memset(*page, 0, PAGE_SIZE); 132 return 0; 133 134failed: 135 /* 136 * Report the failure but don't put the page back on the free list as 137 * its encryption status is unknown. 138 */ 139 pr_err("allocation failed for %s page, error %d, decrypted %d\n", 140 note, ret, decrypt); 141 *page = NULL; 142 return ret; 143} 144 145static int hv_free_page(void **page, bool encrypt, const char *note) 146{ 147 int ret = 0; 148 149 if (!*page) 150 return 0; 151 152 if (encrypt) 153 ret = set_memory_encrypted((unsigned long)*page, 1); 154 155 /* 156 * In the case of the failure, the page is leaked. Something is wrong, 157 * prefer to lose the page with the unknown encryption status and stay afloat. 158 */ 159 if (ret) 160 pr_err("deallocation failed for %s page, error %d, encrypt %d\n", 161 note, ret, encrypt); 162 else 163 free_page((unsigned long)*page); 164 165 *page = NULL; 166 167 return ret; 168} 169 170int hv_synic_alloc(void) 171{ 172 int cpu, ret = -ENOMEM; 173 struct hv_per_cpu_context *hv_cpu; 174 const bool decrypt = !vmbus_is_confidential(); 175 176 /* 177 * First, zero all per-cpu memory areas so hv_synic_free() can 178 * detect what memory has been allocated and cleanup properly 179 * after any failures. 180 */ 181 for_each_present_cpu(cpu) { 182 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); 183 memset(hv_cpu, 0, sizeof(*hv_cpu)); 184 } 185 186 hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask), 187 GFP_KERNEL); 188 if (!hv_context.hv_numa_map) { 189 pr_err("Unable to allocate NUMA map\n"); 190 goto err; 191 } 192 193 for_each_present_cpu(cpu) { 194 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); 195 196 tasklet_init(&hv_cpu->msg_dpc, 197 vmbus_on_msg_dpc, (unsigned long)hv_cpu); 198 199 if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) { 200 ret = hv_alloc_page(&hv_cpu->post_msg_page, 201 decrypt, "post msg"); 202 if (ret) 203 goto err; 204 } 205 206 /* 207 * If these SynIC pages are not allocated, SIEF and SIM pages 208 * are configured using what the root partition or the paravisor 209 * provides upon reading the SIEFP and SIMP registers. 210 */ 211 if (!ms_hyperv.paravisor_present && !hv_root_partition()) { 212 ret = hv_alloc_page(&hv_cpu->hyp_synic_message_page, 213 decrypt, "hypervisor SynIC msg"); 214 if (ret) 215 goto err; 216 ret = hv_alloc_page(&hv_cpu->hyp_synic_event_page, 217 decrypt, "hypervisor SynIC event"); 218 if (ret) 219 goto err; 220 } 221 222 if (vmbus_is_confidential()) { 223 ret = hv_alloc_page(&hv_cpu->para_synic_message_page, 224 false, "paravisor SynIC msg"); 225 if (ret) 226 goto err; 227 ret = hv_alloc_page(&hv_cpu->para_synic_event_page, 228 false, "paravisor SynIC event"); 229 if (ret) 230 goto err; 231 } 232 } 233 234 return 0; 235 236err: 237 /* 238 * Any memory allocations that succeeded will be freed when 239 * the caller cleans up by calling hv_synic_free() 240 */ 241 return ret; 242} 243 244void hv_synic_free(void) 245{ 246 int cpu; 247 const bool encrypt = !vmbus_is_confidential(); 248 249 for_each_present_cpu(cpu) { 250 struct hv_per_cpu_context *hv_cpu = 251 per_cpu_ptr(hv_context.cpu_context, cpu); 252 253 if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) 254 hv_free_page(&hv_cpu->post_msg_page, 255 encrypt, "post msg"); 256 if (!ms_hyperv.paravisor_present && !hv_root_partition()) { 257 hv_free_page(&hv_cpu->hyp_synic_event_page, 258 encrypt, "hypervisor SynIC event"); 259 hv_free_page(&hv_cpu->hyp_synic_message_page, 260 encrypt, "hypervisor SynIC msg"); 261 } 262 if (vmbus_is_confidential()) { 263 hv_free_page(&hv_cpu->para_synic_event_page, 264 false, "paravisor SynIC event"); 265 hv_free_page(&hv_cpu->para_synic_message_page, 266 false, "paravisor SynIC msg"); 267 } 268 } 269 270 kfree(hv_context.hv_numa_map); 271} 272 273/* 274 * hv_hyp_synic_enable_regs - Initialize the Synthetic Interrupt Controller 275 * with the hypervisor. 276 */ 277void hv_hyp_synic_enable_regs(unsigned int cpu) 278{ 279 struct hv_per_cpu_context *hv_cpu = 280 per_cpu_ptr(hv_context.cpu_context, cpu); 281 union hv_synic_simp simp; 282 union hv_synic_siefp siefp; 283 union hv_synic_sint shared_sint; 284 285 /* Setup the Synic's message page with the hypervisor. */ 286 simp.as_uint64 = hv_get_msr(HV_MSR_SIMP); 287 simp.simp_enabled = 1; 288 289 if (ms_hyperv.paravisor_present || hv_root_partition()) { 290 /* Mask out vTOM bit. ioremap_cache() maps decrypted */ 291 u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) & 292 ~ms_hyperv.shared_gpa_boundary; 293 hv_cpu->hyp_synic_message_page = 294 (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE); 295 if (!hv_cpu->hyp_synic_message_page) 296 pr_err("Fail to map synic message page.\n"); 297 } else { 298 simp.base_simp_gpa = virt_to_phys(hv_cpu->hyp_synic_message_page) 299 >> HV_HYP_PAGE_SHIFT; 300 } 301 302 hv_set_msr(HV_MSR_SIMP, simp.as_uint64); 303 304 /* Setup the Synic's event page with the hypervisor. */ 305 siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP); 306 siefp.siefp_enabled = 1; 307 308 if (ms_hyperv.paravisor_present || hv_root_partition()) { 309 /* Mask out vTOM bit. ioremap_cache() maps decrypted */ 310 u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) & 311 ~ms_hyperv.shared_gpa_boundary; 312 hv_cpu->hyp_synic_event_page = 313 (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE); 314 if (!hv_cpu->hyp_synic_event_page) 315 pr_err("Fail to map synic event page.\n"); 316 } else { 317 siefp.base_siefp_gpa = virt_to_phys(hv_cpu->hyp_synic_event_page) 318 >> HV_HYP_PAGE_SHIFT; 319 } 320 321 hv_set_msr(HV_MSR_SIEFP, siefp.as_uint64); 322 hv_enable_coco_interrupt(cpu, vmbus_interrupt, true); 323 324 /* Setup the shared SINT. */ 325 if (vmbus_irq != -1) 326 enable_percpu_irq(vmbus_irq, 0); 327 shared_sint.as_uint64 = hv_get_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT); 328 329 shared_sint.vector = vmbus_interrupt; 330 shared_sint.masked = false; 331 shared_sint.auto_eoi = hv_recommend_using_aeoi(); 332 hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 333} 334 335static void hv_hyp_synic_enable_interrupts(void) 336{ 337 union hv_synic_scontrol sctrl; 338 339 /* Enable the global synic bit */ 340 sctrl.as_uint64 = hv_get_msr(HV_MSR_SCONTROL); 341 sctrl.enable = 1; 342 343 hv_set_msr(HV_MSR_SCONTROL, sctrl.as_uint64); 344} 345 346static void hv_para_synic_enable_regs(unsigned int cpu) 347{ 348 union hv_synic_simp simp; 349 union hv_synic_siefp siefp; 350 struct hv_per_cpu_context *hv_cpu 351 = per_cpu_ptr(hv_context.cpu_context, cpu); 352 353 /* Setup the Synic's message page with the paravisor. */ 354 simp.as_uint64 = hv_para_get_synic_register(HV_MSR_SIMP); 355 simp.simp_enabled = 1; 356 simp.base_simp_gpa = virt_to_phys(hv_cpu->para_synic_message_page) 357 >> HV_HYP_PAGE_SHIFT; 358 hv_para_set_synic_register(HV_MSR_SIMP, simp.as_uint64); 359 360 /* Setup the Synic's event page with the paravisor. */ 361 siefp.as_uint64 = hv_para_get_synic_register(HV_MSR_SIEFP); 362 siefp.siefp_enabled = 1; 363 siefp.base_siefp_gpa = virt_to_phys(hv_cpu->para_synic_event_page) 364 >> HV_HYP_PAGE_SHIFT; 365 hv_para_set_synic_register(HV_MSR_SIEFP, siefp.as_uint64); 366} 367 368static void hv_para_synic_enable_interrupts(void) 369{ 370 union hv_synic_scontrol sctrl; 371 372 /* Enable the global synic bit */ 373 sctrl.as_uint64 = hv_para_get_synic_register(HV_MSR_SCONTROL); 374 sctrl.enable = 1; 375 hv_para_set_synic_register(HV_MSR_SCONTROL, sctrl.as_uint64); 376} 377 378int hv_synic_init(unsigned int cpu) 379{ 380 if (vmbus_is_confidential()) 381 hv_para_synic_enable_regs(cpu); 382 383 /* 384 * The SINT is set in hv_hyp_synic_enable_regs() by calling 385 * hv_set_msr(). hv_set_msr() in turn has special case code for the 386 * SINT MSRs that write to the hypervisor version of the MSR *and* 387 * the paravisor version of the MSR (but *without* the proxy bit when 388 * VMBus is confidential). 389 * 390 * Then enable interrupts via the paravisor if VMBus is confidential, 391 * and otherwise via the hypervisor. 392 */ 393 394 hv_hyp_synic_enable_regs(cpu); 395 if (vmbus_is_confidential()) 396 hv_para_synic_enable_interrupts(); 397 else 398 hv_hyp_synic_enable_interrupts(); 399 400 hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT); 401 402 return 0; 403} 404 405void hv_hyp_synic_disable_regs(unsigned int cpu) 406{ 407 struct hv_per_cpu_context *hv_cpu = 408 per_cpu_ptr(hv_context.cpu_context, cpu); 409 union hv_synic_sint shared_sint; 410 union hv_synic_simp simp; 411 union hv_synic_siefp siefp; 412 413 shared_sint.as_uint64 = hv_get_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT); 414 415 shared_sint.masked = 1; 416 417 /* Need to correctly cleanup in the case of SMP!!! */ 418 /* Disable the interrupt */ 419 hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 420 hv_enable_coco_interrupt(cpu, vmbus_interrupt, false); 421 422 simp.as_uint64 = hv_get_msr(HV_MSR_SIMP); 423 /* 424 * In Isolation VM, simp and sief pages are allocated by 425 * paravisor. These pages also will be used by kdump 426 * kernel. So just reset enable bit here and keep page 427 * addresses. 428 */ 429 simp.simp_enabled = 0; 430 if (ms_hyperv.paravisor_present || hv_root_partition()) { 431 if (hv_cpu->hyp_synic_message_page) { 432 iounmap(hv_cpu->hyp_synic_message_page); 433 hv_cpu->hyp_synic_message_page = NULL; 434 } 435 } else { 436 simp.base_simp_gpa = 0; 437 } 438 439 hv_set_msr(HV_MSR_SIMP, simp.as_uint64); 440 441 siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP); 442 siefp.siefp_enabled = 0; 443 444 if (ms_hyperv.paravisor_present || hv_root_partition()) { 445 if (hv_cpu->hyp_synic_event_page) { 446 iounmap(hv_cpu->hyp_synic_event_page); 447 hv_cpu->hyp_synic_event_page = NULL; 448 } 449 } else { 450 siefp.base_siefp_gpa = 0; 451 } 452 453 hv_set_msr(HV_MSR_SIEFP, siefp.as_uint64); 454} 455 456static void hv_hyp_synic_disable_interrupts(void) 457{ 458 union hv_synic_scontrol sctrl; 459 460 /* Disable the global synic bit */ 461 sctrl.as_uint64 = hv_get_msr(HV_MSR_SCONTROL); 462 sctrl.enable = 0; 463 hv_set_msr(HV_MSR_SCONTROL, sctrl.as_uint64); 464} 465 466static void hv_para_synic_disable_regs(unsigned int cpu) 467{ 468 union hv_synic_simp simp; 469 union hv_synic_siefp siefp; 470 471 /* Disable SynIC's message page in the paravisor. */ 472 simp.as_uint64 = hv_para_get_synic_register(HV_MSR_SIMP); 473 simp.simp_enabled = 0; 474 hv_para_set_synic_register(HV_MSR_SIMP, simp.as_uint64); 475 476 /* Disable SynIC's event page in the paravisor. */ 477 siefp.as_uint64 = hv_para_get_synic_register(HV_MSR_SIEFP); 478 siefp.siefp_enabled = 0; 479 hv_para_set_synic_register(HV_MSR_SIEFP, siefp.as_uint64); 480} 481 482static void hv_para_synic_disable_interrupts(void) 483{ 484 union hv_synic_scontrol sctrl; 485 486 /* Disable the global synic bit */ 487 sctrl.as_uint64 = hv_para_get_synic_register(HV_MSR_SCONTROL); 488 sctrl.enable = 0; 489 hv_para_set_synic_register(HV_MSR_SCONTROL, sctrl.as_uint64); 490} 491 492#define HV_MAX_TRIES 3 493/* 494 * Scan the event flags page of 'this' CPU looking for any bit that is set. If we find one 495 * bit set, then wait for a few milliseconds. Repeat these steps for a maximum of 3 times. 496 * Return 'true', if there is still any set bit after this operation; 'false', otherwise. 497 * 498 * If a bit is set, that means there is a pending channel interrupt. The expectation is 499 * that the normal interrupt handling mechanism will find and process the channel interrupt 500 * "very soon", and in the process clear the bit. 501 */ 502static bool __hv_synic_event_pending(union hv_synic_event_flags *event, int sint) 503{ 504 unsigned long *recv_int_page; 505 bool pending; 506 u32 relid; 507 int tries = 0; 508 509 if (!event) 510 return false; 511 512 event += sint; 513 recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */ 514retry: 515 pending = false; 516 for_each_set_bit(relid, recv_int_page, HV_EVENT_FLAGS_COUNT) { 517 /* Special case - VMBus channel protocol messages */ 518 if (relid == 0) 519 continue; 520 pending = true; 521 break; 522 } 523 if (pending && tries++ < HV_MAX_TRIES) { 524 usleep_range(10000, 20000); 525 goto retry; 526 } 527 return pending; 528} 529 530static bool hv_synic_event_pending(void) 531{ 532 struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context); 533 union hv_synic_event_flags *hyp_synic_event_page = hv_cpu->hyp_synic_event_page; 534 union hv_synic_event_flags *para_synic_event_page = hv_cpu->para_synic_event_page; 535 536 return 537 __hv_synic_event_pending(hyp_synic_event_page, VMBUS_MESSAGE_SINT) || 538 __hv_synic_event_pending(para_synic_event_page, VMBUS_MESSAGE_SINT); 539} 540 541static int hv_pick_new_cpu(struct vmbus_channel *channel) 542{ 543 int ret = -EBUSY; 544 int start; 545 int cpu; 546 547 lockdep_assert_cpus_held(); 548 lockdep_assert_held(&vmbus_connection.channel_mutex); 549 550 /* 551 * We can't assume that the relevant interrupts will be sent before 552 * the cpu is offlined on older versions of hyperv. 553 */ 554 if (vmbus_proto_version < VERSION_WIN10_V5_3) 555 return -EBUSY; 556 557 start = get_random_u32_below(nr_cpu_ids); 558 559 for_each_cpu_wrap(cpu, cpu_online_mask, start) { 560 if (channel->target_cpu == cpu || 561 channel->target_cpu == VMBUS_CONNECT_CPU) 562 continue; 563 564 ret = vmbus_channel_set_cpu(channel, cpu); 565 if (!ret) 566 break; 567 } 568 569 if (ret) 570 ret = vmbus_channel_set_cpu(channel, VMBUS_CONNECT_CPU); 571 572 return ret; 573} 574 575/* 576 * hv_synic_cleanup - Cleanup routine for hv_synic_init(). 577 */ 578int hv_synic_cleanup(unsigned int cpu) 579{ 580 struct vmbus_channel *channel, *sc; 581 int ret = 0; 582 583 if (vmbus_connection.conn_state != CONNECTED) 584 goto always_cleanup; 585 586 /* 587 * Hyper-V does not provide a way to change the connect CPU once 588 * it is set; we must prevent the connect CPU from going offline 589 * while the VM is running normally. But in the panic or kexec() 590 * path where the vmbus is already disconnected, the CPU must be 591 * allowed to shut down. 592 */ 593 if (cpu == VMBUS_CONNECT_CPU) 594 return -EBUSY; 595 596 /* 597 * Search for channels which are bound to the CPU we're about to 598 * cleanup. 599 */ 600 mutex_lock(&vmbus_connection.channel_mutex); 601 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { 602 if (channel->target_cpu == cpu) { 603 ret = hv_pick_new_cpu(channel); 604 if (ret) { 605 mutex_unlock(&vmbus_connection.channel_mutex); 606 return ret; 607 } 608 } 609 list_for_each_entry(sc, &channel->sc_list, sc_list) { 610 if (sc->target_cpu == cpu) { 611 ret = hv_pick_new_cpu(sc); 612 if (ret) { 613 mutex_unlock(&vmbus_connection.channel_mutex); 614 return ret; 615 } 616 } 617 } 618 } 619 mutex_unlock(&vmbus_connection.channel_mutex); 620 621 /* 622 * Scan the event flags page looking for bits that are set and waiting 623 * with a timeout for vmbus_chan_sched() to process such bits. If bits 624 * are still set after this operation and VMBus is connected, fail the 625 * CPU offlining operation. 626 */ 627 if (vmbus_proto_version >= VERSION_WIN10_V4_1 && hv_synic_event_pending()) 628 return -EBUSY; 629 630always_cleanup: 631 hv_stimer_legacy_cleanup(cpu); 632 633 /* 634 * First, disable the event and message pages 635 * used for communicating with the host, and then 636 * disable the host interrupts if VMBus is not 637 * confidential. 638 */ 639 hv_hyp_synic_disable_regs(cpu); 640 if (!vmbus_is_confidential()) 641 hv_hyp_synic_disable_interrupts(); 642 643 /* 644 * Perform the same steps for the Confidential VMBus. 645 * The sequencing provides the guarantee that no data 646 * may be posted for processing before disabling interrupts. 647 */ 648 if (vmbus_is_confidential()) { 649 hv_para_synic_disable_regs(cpu); 650 hv_para_synic_disable_interrupts(); 651 } 652 if (vmbus_irq != -1) 653 disable_percpu_irq(vmbus_irq); 654 655 return ret; 656}