Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
"RISC-V:

- Fix compilation without RISCV_ISA_ZICBOM

- Fix kvm_riscv_vcpu_timer_pending() for Sstc

ARM:

- Fix a bug preventing restoring an ITS containing mappings for very
large and very sparse device topology

- Work around a relocation handling error when compiling the nVHE
object with profile optimisation

- Fix for stage-2 invalidation holding the VM MMU lock for too long
by limiting the walk to the largest block mapping size

- Enable stack protection and branch profiling for VHE

- Two selftest fixes

x86:

- add compat implementation for KVM_X86_SET_MSR_FILTER ioctl

selftests:

- synchronize includes between include/uapi and tools/include/uapi"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
tools: include: sync include/api/linux/kvm.h
KVM: x86: Add compat handler for KVM_X86_SET_MSR_FILTER
KVM: x86: Copy filter arg outside kvm_vm_ioctl_set_msr_filter()
kvm: Add support for arch compat vm ioctls
RISC-V: KVM: Fix kvm_riscv_vcpu_timer_pending() for Sstc
RISC-V: Fix compilation without RISCV_ISA_ZICBOM
KVM: arm64: vgic: Fix exit condition in scan_its_table()
KVM: arm64: nvhe: Fix build with profile optimization
KVM: selftests: Fix number of pages for memory slot in memslot_modification_stress_test
KVM: arm64: selftests: Fix multiple versions of GIC creation
KVM: arm64: Enable stack protection and branch profiling for VHE
KVM: arm64: Limit stage2_apply_range() batch size to largest block
KVM: arm64: Work out supported block level at compile time

Changed files
+185 -104
arch
include
linux
tools
include
uapi
linux
testing
virt
+13 -5
arch/arm64/include/asm/kvm_pgtable.h
··· 13 13 14 14 #define KVM_PGTABLE_MAX_LEVELS 4U 15 15 16 + /* 17 + * The largest supported block sizes for KVM (no 52-bit PA support): 18 + * - 4K (level 1): 1GB 19 + * - 16K (level 2): 32MB 20 + * - 64K (level 2): 512MB 21 + */ 22 + #ifdef CONFIG_ARM64_4K_PAGES 23 + #define KVM_PGTABLE_MIN_BLOCK_LEVEL 1U 24 + #else 25 + #define KVM_PGTABLE_MIN_BLOCK_LEVEL 2U 26 + #endif 27 + 16 28 static inline u64 kvm_get_parange(u64 mmfr0) 17 29 { 18 30 u64 parange = cpuid_feature_extract_unsigned_field(mmfr0, ··· 70 58 71 59 static inline bool kvm_level_supports_block_mapping(u32 level) 72 60 { 73 - /* 74 - * Reject invalid block mappings and don't bother with 4TB mappings for 75 - * 52-bit PAs. 76 - */ 77 - return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1)); 61 + return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL; 78 62 } 79 63 80 64 /**
-20
arch/arm64/include/asm/stage2_pgtable.h
··· 11 11 #include <linux/pgtable.h> 12 12 13 13 /* 14 - * PGDIR_SHIFT determines the size a top-level page table entry can map 15 - * and depends on the number of levels in the page table. Compute the 16 - * PGDIR_SHIFT for a given number of levels. 17 - */ 18 - #define pt_levels_pgdir_shift(lvls) ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls)) 19 - 20 - /* 21 14 * The hardware supports concatenation of up to 16 tables at stage2 entry 22 15 * level and we use the feature whenever possible, which means we resolve 4 23 16 * additional bits of address at the entry level. ··· 23 30 #define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4) 24 31 #define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr) 25 32 26 - /* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */ 27 - #define stage2_pgdir_shift(kvm) pt_levels_pgdir_shift(kvm_stage2_levels(kvm)) 28 - #define stage2_pgdir_size(kvm) (1ULL << stage2_pgdir_shift(kvm)) 29 - #define stage2_pgdir_mask(kvm) ~(stage2_pgdir_size(kvm) - 1) 30 - 31 33 /* 32 34 * kvm_mmmu_cache_min_pages() is the number of pages required to install 33 35 * a stage-2 translation. We pre-allocate the entry level page table at 34 36 * the VM creation. 35 37 */ 36 38 #define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1) 37 - 38 - static inline phys_addr_t 39 - stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) 40 - { 41 - phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm); 42 - 43 - return (boundary - 1 < end - 1) ? boundary : end; 44 - } 45 39 46 40 #endif /* __ARM64_S2_PGTABLE_H_ */
+1 -4
arch/arm64/kvm/hyp/Makefile
··· 5 5 6 6 incdir := $(srctree)/$(src)/include 7 7 subdir-asflags-y := -I$(incdir) 8 - subdir-ccflags-y := -I$(incdir) \ 9 - -fno-stack-protector \ 10 - -DDISABLE_BRANCH_PROFILING \ 11 - $(DISABLE_STACKLEAK_PLUGIN) 8 + subdir-ccflags-y := -I$(incdir) 12 9 13 10 obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o
+7
arch/arm64/kvm/hyp/nvhe/Makefile
··· 10 10 # will explode instantly (Words of Marc Zyngier). So introduce a generic flag 11 11 # __DISABLE_TRACE_MMIO__ to disable MMIO tracing for nVHE KVM. 12 12 ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS -D__DISABLE_TRACE_MMIO__ 13 + ccflags-y += -fno-stack-protector \ 14 + -DDISABLE_BRANCH_PROFILING \ 15 + $(DISABLE_STACKLEAK_PLUGIN) 13 16 14 17 hostprogs := gen-hyprel 15 18 HOST_EXTRACFLAGS += -I$(objtree)/include ··· 92 89 # Remove ftrace, Shadow Call Stack, and CFI CFLAGS. 93 90 # This is equivalent to the 'notrace', '__noscs', and '__nocfi' annotations. 94 91 KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) $(CC_FLAGS_CFI), $(KBUILD_CFLAGS)) 92 + # Starting from 13.0.0 llvm emits SHT_REL section '.llvm.call-graph-profile' 93 + # when profile optimization is applied. gen-hyprel does not support SHT_REL and 94 + # causes a build failure. Remove profile optimization flags. 95 + KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%, $(KBUILD_CFLAGS)) 95 96 96 97 # KVM nVHE code is run at a different exception code with a different map, so 97 98 # compiler instrumentation that inserts callbacks or checks into the code may
+8 -1
arch/arm64/kvm/mmu.c
··· 31 31 32 32 static unsigned long io_map_base; 33 33 34 + static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end) 35 + { 36 + phys_addr_t size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL); 37 + phys_addr_t boundary = ALIGN_DOWN(addr + size, size); 38 + 39 + return (boundary - 1 < end - 1) ? boundary : end; 40 + } 34 41 35 42 /* 36 43 * Release kvm_mmu_lock periodically if the memory region is large. Otherwise, ··· 59 52 if (!pgt) 60 53 return -EINVAL; 61 54 62 - next = stage2_pgd_addr_end(kvm, addr, end); 55 + next = stage2_range_addr_end(addr, end); 63 56 ret = fn(pgt, addr, next - addr); 64 57 if (ret) 65 58 break;
+4 -1
arch/arm64/kvm/vgic/vgic-its.c
··· 2149 2149 2150 2150 memset(entry, 0, esz); 2151 2151 2152 - while (len > 0) { 2152 + while (true) { 2153 2153 int next_offset; 2154 2154 size_t byte_offset; 2155 2155 ··· 2162 2162 return next_offset; 2163 2163 2164 2164 byte_offset = next_offset * esz; 2165 + if (byte_offset >= len) 2166 + break; 2167 + 2165 2168 id += next_offset; 2166 2169 gpa += byte_offset; 2167 2170 len -= byte_offset;
-8
arch/riscv/include/asm/cacheflush.h
··· 42 42 43 43 #endif /* CONFIG_SMP */ 44 44 45 - /* 46 - * The T-Head CMO errata internally probe the CBOM block size, but otherwise 47 - * don't depend on Zicbom. 48 - */ 49 45 extern unsigned int riscv_cbom_block_size; 50 - #ifdef CONFIG_RISCV_ISA_ZICBOM 51 46 void riscv_init_cbom_blocksize(void); 52 - #else 53 - static inline void riscv_init_cbom_blocksize(void) { } 54 - #endif 55 47 56 48 #ifdef CONFIG_RISCV_DMA_NONCOHERENT 57 49 void riscv_noncoherent_supported(void);
+1
arch/riscv/include/asm/kvm_vcpu_timer.h
··· 45 45 int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu); 46 46 void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu); 47 47 void kvm_riscv_guest_timer_init(struct kvm *kvm); 48 + void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu); 48 49 void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu); 49 50 bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu); 50 51
+3
arch/riscv/kvm/vcpu.c
··· 708 708 clear_bit(IRQ_VS_SOFT, &v->irqs_pending); 709 709 } 710 710 } 711 + 712 + /* Sync-up timer CSRs */ 713 + kvm_riscv_vcpu_timer_sync(vcpu); 711 714 } 712 715 713 716 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
+20 -7
arch/riscv/kvm/vcpu_timer.c
··· 320 320 kvm_riscv_vcpu_timer_unblocking(vcpu); 321 321 } 322 322 323 + void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu) 324 + { 325 + struct kvm_vcpu_timer *t = &vcpu->arch.timer; 326 + 327 + if (!t->sstc_enabled) 328 + return; 329 + 330 + #if defined(CONFIG_32BIT) 331 + t->next_cycles = csr_read(CSR_VSTIMECMP); 332 + t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32; 333 + #else 334 + t->next_cycles = csr_read(CSR_VSTIMECMP); 335 + #endif 336 + } 337 + 323 338 void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu) 324 339 { 325 340 struct kvm_vcpu_timer *t = &vcpu->arch.timer; ··· 342 327 if (!t->sstc_enabled) 343 328 return; 344 329 345 - t = &vcpu->arch.timer; 346 - #if defined(CONFIG_32BIT) 347 - t->next_cycles = csr_read(CSR_VSTIMECMP); 348 - t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32; 349 - #else 350 - t->next_cycles = csr_read(CSR_VSTIMECMP); 351 - #endif 330 + /* 331 + * The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync() 332 + * upon every VM exit so no need to save here. 333 + */ 334 + 352 335 /* timer should be enabled for the remaining operations */ 353 336 if (unlikely(!t->init_done)) 354 337 return;
+38
arch/riscv/mm/cacheflush.c
··· 3 3 * Copyright (C) 2017 SiFive 4 4 */ 5 5 6 + #include <linux/of.h> 6 7 #include <asm/cacheflush.h> 7 8 8 9 #ifdef CONFIG_SMP ··· 87 86 flush_icache_all(); 88 87 } 89 88 #endif /* CONFIG_MMU */ 89 + 90 + unsigned int riscv_cbom_block_size; 91 + EXPORT_SYMBOL_GPL(riscv_cbom_block_size); 92 + 93 + void riscv_init_cbom_blocksize(void) 94 + { 95 + struct device_node *node; 96 + unsigned long cbom_hartid; 97 + u32 val, probed_block_size; 98 + int ret; 99 + 100 + probed_block_size = 0; 101 + for_each_of_cpu_node(node) { 102 + unsigned long hartid; 103 + 104 + ret = riscv_of_processor_hartid(node, &hartid); 105 + if (ret) 106 + continue; 107 + 108 + /* set block-size for cbom extension if available */ 109 + ret = of_property_read_u32(node, "riscv,cbom-block-size", &val); 110 + if (ret) 111 + continue; 112 + 113 + if (!probed_block_size) { 114 + probed_block_size = val; 115 + cbom_hartid = hartid; 116 + } else { 117 + if (probed_block_size != val) 118 + pr_warn("cbom-block-size mismatched between harts %lu and %lu\n", 119 + cbom_hartid, hartid); 120 + } 121 + } 122 + 123 + if (probed_block_size) 124 + riscv_cbom_block_size = probed_block_size; 125 + }
-41
arch/riscv/mm/dma-noncoherent.c
··· 8 8 #include <linux/dma-direct.h> 9 9 #include <linux/dma-map-ops.h> 10 10 #include <linux/mm.h> 11 - #include <linux/of.h> 12 - #include <linux/of_device.h> 13 11 #include <asm/cacheflush.h> 14 - 15 - unsigned int riscv_cbom_block_size; 16 - EXPORT_SYMBOL_GPL(riscv_cbom_block_size); 17 12 18 13 static bool noncoherent_supported; 19 14 ··· 71 76 72 77 dev->dma_coherent = coherent; 73 78 } 74 - 75 - #ifdef CONFIG_RISCV_ISA_ZICBOM 76 - void riscv_init_cbom_blocksize(void) 77 - { 78 - struct device_node *node; 79 - unsigned long cbom_hartid; 80 - u32 val, probed_block_size; 81 - int ret; 82 - 83 - probed_block_size = 0; 84 - for_each_of_cpu_node(node) { 85 - unsigned long hartid; 86 - 87 - ret = riscv_of_processor_hartid(node, &hartid); 88 - if (ret) 89 - continue; 90 - 91 - /* set block-size for cbom extension if available */ 92 - ret = of_property_read_u32(node, "riscv,cbom-block-size", &val); 93 - if (ret) 94 - continue; 95 - 96 - if (!probed_block_size) { 97 - probed_block_size = val; 98 - cbom_hartid = hartid; 99 - } else { 100 - if (probed_block_size != val) 101 - pr_warn("cbom-block-size mismatched between harts %lu and %lu\n", 102 - cbom_hartid, hartid); 103 - } 104 - } 105 - 106 - if (probed_block_size) 107 - riscv_cbom_block_size = probed_block_size; 108 - } 109 - #endif 110 79 111 80 void riscv_noncoherent_supported(void) 112 81 {
+73 -14
arch/x86/kvm/x86.c
··· 6442 6442 return 0; 6443 6443 } 6444 6444 6445 - static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) 6445 + static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, 6446 + struct kvm_msr_filter *filter) 6446 6447 { 6447 - struct kvm_msr_filter __user *user_msr_filter = argp; 6448 6448 struct kvm_x86_msr_filter *new_filter, *old_filter; 6449 - struct kvm_msr_filter filter; 6450 6449 bool default_allow; 6451 6450 bool empty = true; 6452 6451 int r = 0; 6453 6452 u32 i; 6454 6453 6455 - if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) 6456 - return -EFAULT; 6457 - 6458 - if (filter.flags & ~KVM_MSR_FILTER_DEFAULT_DENY) 6454 + if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY) 6459 6455 return -EINVAL; 6460 6456 6461 - for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) 6462 - empty &= !filter.ranges[i].nmsrs; 6457 + for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) 6458 + empty &= !filter->ranges[i].nmsrs; 6463 6459 6464 - default_allow = !(filter.flags & KVM_MSR_FILTER_DEFAULT_DENY); 6460 + default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY); 6465 6461 if (empty && !default_allow) 6466 6462 return -EINVAL; 6467 6463 ··· 6465 6469 if (!new_filter) 6466 6470 return -ENOMEM; 6467 6471 6468 - for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { 6469 - r = kvm_add_msr_filter(new_filter, &filter.ranges[i]); 6472 + for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) { 6473 + r = kvm_add_msr_filter(new_filter, &filter->ranges[i]); 6470 6474 if (r) { 6471 6475 kvm_free_msr_filter(new_filter); 6472 6476 return r; ··· 6488 6492 6489 6493 return 0; 6490 6494 } 6495 + 6496 + #ifdef CONFIG_KVM_COMPAT 6497 + /* for KVM_X86_SET_MSR_FILTER */ 6498 + struct kvm_msr_filter_range_compat { 6499 + __u32 flags; 6500 + __u32 nmsrs; 6501 + __u32 base; 6502 + __u32 bitmap; 6503 + }; 6504 + 6505 + struct kvm_msr_filter_compat { 6506 + __u32 flags; 6507 + struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES]; 6508 + }; 6509 + 6510 + #define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat) 6511 + 6512 + long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, 6513 + unsigned long arg) 6514 + { 6515 + void __user *argp = (void __user *)arg; 6516 + struct kvm *kvm = filp->private_data; 6517 + long r = -ENOTTY; 6518 + 6519 + switch (ioctl) { 6520 + case KVM_X86_SET_MSR_FILTER_COMPAT: { 6521 + struct kvm_msr_filter __user *user_msr_filter = argp; 6522 + struct kvm_msr_filter_compat filter_compat; 6523 + struct kvm_msr_filter filter; 6524 + int i; 6525 + 6526 + if (copy_from_user(&filter_compat, user_msr_filter, 6527 + sizeof(filter_compat))) 6528 + return -EFAULT; 6529 + 6530 + filter.flags = filter_compat.flags; 6531 + for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { 6532 + struct kvm_msr_filter_range_compat *cr; 6533 + 6534 + cr = &filter_compat.ranges[i]; 6535 + filter.ranges[i] = (struct kvm_msr_filter_range) { 6536 + .flags = cr->flags, 6537 + .nmsrs = cr->nmsrs, 6538 + .base = cr->base, 6539 + .bitmap = (__u8 *)(ulong)cr->bitmap, 6540 + }; 6541 + } 6542 + 6543 + r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); 6544 + break; 6545 + } 6546 + } 6547 + 6548 + return r; 6549 + } 6550 + #endif 6491 6551 6492 6552 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 6493 6553 static int kvm_arch_suspend_notifier(struct kvm *kvm) ··· 6967 6915 case KVM_SET_PMU_EVENT_FILTER: 6968 6916 r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp); 6969 6917 break; 6970 - case KVM_X86_SET_MSR_FILTER: 6971 - r = kvm_vm_ioctl_set_msr_filter(kvm, argp); 6918 + case KVM_X86_SET_MSR_FILTER: { 6919 + struct kvm_msr_filter __user *user_msr_filter = argp; 6920 + struct kvm_msr_filter filter; 6921 + 6922 + if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) 6923 + return -EFAULT; 6924 + 6925 + r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); 6972 6926 break; 6927 + } 6973 6928 default: 6974 6929 r = -ENOTTY; 6975 6930 }
+2
include/linux/kvm_host.h
··· 1390 1390 struct kvm_enable_cap *cap); 1391 1391 long kvm_arch_vm_ioctl(struct file *filp, 1392 1392 unsigned int ioctl, unsigned long arg); 1393 + long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, 1394 + unsigned long arg); 1393 1395 1394 1396 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 1395 1397 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
+1
tools/include/uapi/linux/kvm.h
··· 1177 1177 #define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220 1178 1178 #define KVM_CAP_S390_ZPCI_OP 221 1179 1179 #define KVM_CAP_S390_CPU_TOPOLOGY 222 1180 + #define KVM_CAP_DIRTY_LOG_RING_ACQ_REL 223 1180 1181 1181 1182 #ifdef KVM_CAP_IRQ_ROUTING 1182 1183
+2 -2
tools/testing/selftests/kvm/aarch64/vgic_init.c
··· 662 662 : KVM_DEV_TYPE_ARM_VGIC_V2; 663 663 664 664 if (!__kvm_test_create_device(v.vm, other)) { 665 - ret = __kvm_test_create_device(v.vm, other); 666 - TEST_ASSERT(ret && (errno == EINVAL || errno == EEXIST), 665 + ret = __kvm_create_device(v.vm, other); 666 + TEST_ASSERT(ret < 0 && (errno == EINVAL || errno == EEXIST), 667 667 "create GIC device while other version exists"); 668 668 } 669 669
+1 -1
tools/testing/selftests/kvm/memslot_modification_stress_test.c
··· 67 67 static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, 68 68 uint64_t nr_modifications) 69 69 { 70 - const uint64_t pages = 1; 70 + uint64_t pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size; 71 71 uint64_t gpa; 72 72 int i; 73 73
+11
virt/kvm/kvm_main.c
··· 4839 4839 }; 4840 4840 }; 4841 4841 4842 + long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, 4843 + unsigned long arg) 4844 + { 4845 + return -ENOTTY; 4846 + } 4847 + 4842 4848 static long kvm_vm_compat_ioctl(struct file *filp, 4843 4849 unsigned int ioctl, unsigned long arg) 4844 4850 { ··· 4853 4847 4854 4848 if (kvm->mm != current->mm || kvm->vm_dead) 4855 4849 return -EIO; 4850 + 4851 + r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg); 4852 + if (r != -ENOTTY) 4853 + return r; 4854 + 4856 4855 switch (ioctl) { 4857 4856 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 4858 4857 case KVM_CLEAR_DIRTY_LOG: {