Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/mm: Rework I$/D$ coherency (v3)

This patch reworks the way we do I and D cache coherency on PowerPC.

The "old" way was split in 3 different parts depending on the processor type:

- Hash with per-page exec support (64-bit and >= POWER4 only) does it
at hashing time, by preventing exec on unclean pages and cleaning pages
on exec faults.

- Everything without per-page exec support (32-bit hash, 8xx, and
64-bit < POWER4) does it for all page going to user space in update_mmu_cache().

- Embedded with per-page exec support does it from do_page_fault() on
exec faults, in a way similar to what the hash code does.

That leads to confusion, and bugs. For example, the method using update_mmu_cache()
is racy on SMP where another processor can see the new PTE and hash it in before
we have cleaned the cache, and then blow trying to execute. This is hard to hit but
I think it has bitten us in the past.

Also, it's inefficient for embedded where we always end up having to do at least
one more page fault.

This reworks the whole thing by moving the cache sync into two main call sites,
though we keep different behaviours depending on the HW capability. The call
sites are set_pte_at() which is now made out of line, and ptep_set_access_flags()
which joins the former in pgtable.c

The base idea for Embedded with per-page exec support, is that we now do the
flush at set_pte_at() time when coming from an exec fault, which allows us
to avoid the double fault problem completely (we can even improve the situation
more by implementing TLB preload in update_mmu_cache() but that's for later).

If for some reason we didn't do it there and we try to execute, we'll hit
the page fault, which will do a minor fault, which will hit ptep_set_access_flags()
to do things like update _PAGE_ACCESSED or _PAGE_DIRTY if needed, we just make
this guys also perform the I/D cache sync for exec faults now. This second path
is the catch all for things that weren't cleaned at set_pte_at() time.

For cpus without per-pag exec support, we always do the sync at set_pte_at(),
thus guaranteeing that when the PTE is visible to other processors, the cache
is clean.

For the 64-bit hash with per-page exec support case, we keep the old mechanism
for now. I'll look into changing it later, once I've reworked a bit how we
use _PAGE_EXEC.

This is also a first step for adding _PAGE_EXEC support for embedded platforms

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

+245 -136
+1 -1
arch/powerpc/include/asm/highmem.h
··· 99 99 #ifdef CONFIG_DEBUG_HIGHMEM 100 100 BUG_ON(!pte_none(*(kmap_pte-idx))); 101 101 #endif 102 - __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); 102 + __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); 103 103 local_flush_tlb_page(NULL, vaddr); 104 104 105 105 return (void*) vaddr;
+5 -51
arch/powerpc/include/asm/pgtable-ppc32.h
··· 429 429 #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() 430 430 #endif 431 431 432 + #define _PAGE_HPTEFLAGS _PAGE_HASHPTE 433 + 432 434 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 433 435 434 436 ··· 669 667 #endif /* CONFIG_PTE_64BIT */ 670 668 671 669 /* 672 - * set_pte stores a linux PTE into the linux page table. 673 - * On machines which use an MMU hash table we avoid changing the 674 - * _PAGE_HASHPTE bit. 675 - */ 676 - 677 - static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, 678 - pte_t *ptep, pte_t pte) 679 - { 680 - #if (_PAGE_HASHPTE != 0) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) 681 - pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE); 682 - #elif defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) 683 - #if _PAGE_HASHPTE != 0 684 - if (pte_val(*ptep) & _PAGE_HASHPTE) 685 - flush_hash_entry(mm, ptep, addr); 686 - #endif 687 - __asm__ __volatile__("\ 688 - stw%U0%X0 %2,%0\n\ 689 - eieio\n\ 690 - stw%U0%X0 %L2,%1" 691 - : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) 692 - : "r" (pte) : "memory"); 693 - #else 694 - *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) 695 - | (pte_val(pte) & ~_PAGE_HASHPTE)); 696 - #endif 697 - } 698 - 699 - 700 - static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 701 - pte_t *ptep, pte_t pte) 702 - { 703 - #if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) && defined(CONFIG_DEBUG_VM) 704 - WARN_ON(pte_present(*ptep)); 705 - #endif 706 - __set_pte_at(mm, addr, ptep, pte); 707 - } 708 - 709 - /* 710 670 * 2.6 calls this without flushing the TLB entry; this is wrong 711 671 * for our hash-based implementation, we fix that up here. 712 672 */ ··· 708 744 } 709 745 710 746 711 - #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 712 - static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) 747 + static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) 713 748 { 714 749 unsigned long bits = pte_val(entry) & 715 - (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW); 750 + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | 751 + _PAGE_HWEXEC | _PAGE_EXEC); 716 752 pte_update(ptep, 0, bits); 717 753 } 718 - 719 - #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 720 - ({ \ 721 - int __changed = !pte_same(*(__ptep), __entry); \ 722 - if (__changed) { \ 723 - __ptep_set_access_flags(__ptep, __entry, __dirty); \ 724 - flush_tlb_page_nohash(__vma, __address); \ 725 - } \ 726 - __changed; \ 727 - }) 728 754 729 755 #define __HAVE_ARCH_PTE_SAME 730 756 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
+7 -22
arch/powerpc/include/asm/pgtable-ppc64.h
··· 125 125 #define _PTEIDX_SECONDARY 0x8 126 126 #define _PTEIDX_GROUP_IX 0x7 127 127 128 + /* To make some generic powerpc code happy */ 129 + #define _PAGE_HWEXEC 0 128 130 129 131 /* 130 132 * POWER4 and newer have per page execute protection, older chips can only ··· 287 285 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) 288 286 : "cc" ); 289 287 288 + /* huge pages use the old page table lock */ 289 + if (!huge) 290 + assert_pte_locked(mm, addr); 291 + 290 292 if (old & _PAGE_HASHPTE) 291 293 hpte_need_flush(mm, addr, ptep, old, huge); 292 294 return old; ··· 365 359 pte_update(mm, addr, ptep, ~0UL, 0); 366 360 } 367 361 368 - /* 369 - * set_pte stores a linux PTE into the linux page table. 370 - */ 371 - static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 372 - pte_t *ptep, pte_t pte) 373 - { 374 - if (pte_present(*ptep)) 375 - pte_clear(mm, addr, ptep); 376 - pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); 377 - *ptep = pte; 378 - } 379 362 380 363 /* Set the dirty and/or accessed bits atomically in a linux PTE, this 381 364 * function doesn't need to flush the hash entry 382 365 */ 383 - #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 384 - static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) 366 + static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) 385 367 { 386 368 unsigned long bits = pte_val(entry) & 387 369 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); ··· 386 392 :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) 387 393 :"cc"); 388 394 } 389 - #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 390 - ({ \ 391 - int __changed = !pte_same(*(__ptep), __entry); \ 392 - if (__changed) { \ 393 - __ptep_set_access_flags(__ptep, __entry, __dirty); \ 394 - flush_tlb_page_nohash(__vma, __address); \ 395 - } \ 396 - __changed; \ 397 - }) 398 395 399 396 #define __HAVE_ARCH_PTE_SAME 400 397 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
+84
arch/powerpc/include/asm/pgtable.h
··· 6 6 #include <asm/processor.h> /* For TASK_SIZE */ 7 7 #include <asm/mmu.h> 8 8 #include <asm/page.h> 9 + 9 10 struct mm_struct; 11 + 12 + #ifdef CONFIG_DEBUG_VM 13 + extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr); 14 + #else /* CONFIG_DEBUG_VM */ 15 + static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) 16 + { 17 + } 18 + #endif /* !CONFIG_DEBUG_VM */ 19 + 10 20 #endif /* !__ASSEMBLY__ */ 11 21 12 22 #if defined(CONFIG_PPC64) ··· 26 16 #endif 27 17 28 18 #ifndef __ASSEMBLY__ 19 + 20 + /* Insert a PTE, top-level function is out of line. It uses an inline 21 + * low level function in the respective pgtable-* files 22 + */ 23 + extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 24 + pte_t pte); 25 + 26 + /* This low level function performs the actual PTE insertion 27 + * Setting the PTE depends on the MMU type and other factors. It's 28 + * an horrible mess that I'm not going to try to clean up now but 29 + * I'm keeping it in one place rather than spread around 30 + */ 31 + static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, 32 + pte_t *ptep, pte_t pte, int percpu) 33 + { 34 + #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) 35 + /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the 36 + * helper pte_update() which does an atomic update. We need to do that 37 + * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a 38 + * per-CPU PTE such as a kmap_atomic, we do a simple update preserving 39 + * the hash bits instead (ie, same as the non-SMP case) 40 + */ 41 + if (percpu) 42 + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) 43 + | (pte_val(pte) & ~_PAGE_HASHPTE)); 44 + else 45 + pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); 46 + 47 + #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) 48 + /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we 49 + * can just store as long as we do the two halves in the right order 50 + * with a barrier in between. This is possible because we take care, 51 + * in the hash code, to pre-invalidate if the PTE was already hashed, 52 + * which synchronizes us with any concurrent invalidation. 53 + * In the percpu case, we also fallback to the simple update preserving 54 + * the hash bits 55 + */ 56 + if (percpu) { 57 + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) 58 + | (pte_val(pte) & ~_PAGE_HASHPTE)); 59 + return; 60 + } 61 + #if _PAGE_HASHPTE != 0 62 + if (pte_val(*ptep) & _PAGE_HASHPTE) 63 + flush_hash_entry(mm, ptep, addr); 64 + #endif 65 + __asm__ __volatile__("\ 66 + stw%U0%X0 %2,%0\n\ 67 + eieio\n\ 68 + stw%U0%X0 %L2,%1" 69 + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) 70 + : "r" (pte) : "memory"); 71 + 72 + #elif defined(CONFIG_PPC_STD_MMU_32) 73 + /* Third case is 32-bit hash table in UP mode, we need to preserve 74 + * the _PAGE_HASHPTE bit since we may not have invalidated the previous 75 + * translation in the hash yet (done in a subsequent flush_tlb_xxx()) 76 + * and see we need to keep track that this PTE needs invalidating 77 + */ 78 + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) 79 + | (pte_val(pte) & ~_PAGE_HASHPTE)); 80 + 81 + #else 82 + /* Anything else just stores the PTE normally. That covers all 64-bit 83 + * cases, and 32-bit non-hash with 64-bit PTEs in UP mode 84 + */ 85 + *ptep = pte; 86 + #endif 87 + } 88 + 89 + 90 + #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 91 + extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, 92 + pte_t *ptep, pte_t entry, int dirty); 29 93 30 94 /* 31 95 * Macro to mark a page protection value as "uncacheable".
+17 -29
arch/powerpc/mm/fault.c
··· 253 253 #endif /* CONFIG_8xx */ 254 254 255 255 if (is_exec) { 256 - #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) 257 - /* protection fault */ 256 + #ifdef CONFIG_PPC_STD_MMU 257 + /* Protection fault on exec go straight to failure on 258 + * Hash based MMUs as they either don't support per-page 259 + * execute permission, or if they do, it's handled already 260 + * at the hash level. This test would probably have to 261 + * be removed if we change the way this works to make hash 262 + * processors use the same I/D cache coherency mechanism 263 + * as embedded. 264 + */ 258 265 if (error_code & DSISR_PROTFAULT) 259 266 goto bad_area; 267 + #endif /* CONFIG_PPC_STD_MMU */ 268 + 260 269 /* 261 270 * Allow execution from readable areas if the MMU does not 262 271 * provide separate controls over reading and executing. 272 + * 273 + * Note: That code used to not be enabled for 4xx/BookE. 274 + * It is now as I/D cache coherency for these is done at 275 + * set_pte_at() time and I see no reason why the test 276 + * below wouldn't be valid on those processors. This -may- 277 + * break programs compiled with a really old ABI though. 263 278 */ 264 279 if (!(vma->vm_flags & VM_EXEC) && 265 280 (cpu_has_feature(CPU_FTR_NOEXECUTE) || 266 281 !(vma->vm_flags & (VM_READ | VM_WRITE)))) 267 282 goto bad_area; 268 - #else 269 - pte_t *ptep; 270 - pmd_t *pmdp; 271 - 272 - /* Since 4xx/Book-E supports per-page execute permission, 273 - * we lazily flush dcache to icache. */ 274 - ptep = NULL; 275 - if (get_pteptr(mm, address, &ptep, &pmdp)) { 276 - spinlock_t *ptl = pte_lockptr(mm, pmdp); 277 - spin_lock(ptl); 278 - if (pte_present(*ptep)) { 279 - struct page *page = pte_page(*ptep); 280 - 281 - if (!test_bit(PG_arch_1, &page->flags)) { 282 - flush_dcache_icache_page(page); 283 - set_bit(PG_arch_1, &page->flags); 284 - } 285 - pte_update(ptep, 0, _PAGE_HWEXEC | 286 - _PAGE_ACCESSED); 287 - local_flush_tlb_page(vma, address); 288 - pte_unmap_unlock(ptep, ptl); 289 - up_read(&mm->mmap_sem); 290 - return 0; 291 - } 292 - pte_unmap_unlock(ptep, ptl); 293 - } 294 - #endif 295 283 /* a write */ 296 284 } else if (is_write) { 297 285 if (!(vma->vm_flags & VM_WRITE))
-33
arch/powerpc/mm/mem.c
··· 472 472 { 473 473 #ifdef CONFIG_PPC_STD_MMU 474 474 unsigned long access = 0, trap; 475 - #endif 476 - unsigned long pfn = pte_pfn(pte); 477 475 478 - /* handle i-cache coherency */ 479 - if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && 480 - !cpu_has_feature(CPU_FTR_NOEXECUTE) && 481 - pfn_valid(pfn)) { 482 - struct page *page = pfn_to_page(pfn); 483 - #ifdef CONFIG_8xx 484 - /* On 8xx, cache control instructions (particularly 485 - * "dcbst" from flush_dcache_icache) fault as write 486 - * operation if there is an unpopulated TLB entry 487 - * for the address in question. To workaround that, 488 - * we invalidate the TLB here, thus avoiding dcbst 489 - * misbehaviour. 490 - */ 491 - _tlbil_va(address, 0 /* 8xx doesn't care about PID */); 492 - #endif 493 - /* The _PAGE_USER test should really be _PAGE_EXEC, but 494 - * older glibc versions execute some code from no-exec 495 - * pages, which for now we are supporting. If exec-only 496 - * pages are ever implemented, this will have to change. 497 - */ 498 - if (!PageReserved(page) && (pte_val(pte) & _PAGE_USER) 499 - && !test_bit(PG_arch_1, &page->flags)) { 500 - if (vma->vm_mm == current->active_mm) { 501 - __flush_dcache_icache((void *) address); 502 - } else 503 - flush_dcache_icache_page(page); 504 - set_bit(PG_arch_1, &page->flags); 505 - } 506 - } 507 - 508 - #ifdef CONFIG_PPC_STD_MMU 509 476 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ 510 477 if (!pte_young(pte) || address >= TASK_SIZE) 511 478 return;
+131
arch/powerpc/mm/pgtable.c
··· 1 1 /* 2 2 * This file contains common routines for dealing with free of page tables 3 + * Along with common page table handling code 3 4 * 4 5 * Derived from arch/powerpc/mm/tlb_64.c: 5 6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) ··· 116 115 pte_free_submit(*batchp); 117 116 *batchp = NULL; 118 117 } 118 + 119 + /* 120 + * Handle i/d cache flushing, called from set_pte_at() or ptep_set_access_flags() 121 + */ 122 + static pte_t do_dcache_icache_coherency(pte_t pte) 123 + { 124 + unsigned long pfn = pte_pfn(pte); 125 + struct page *page; 126 + 127 + if (unlikely(!pfn_valid(pfn))) 128 + return pte; 129 + page = pfn_to_page(pfn); 130 + 131 + if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) { 132 + pr_debug("do_dcache_icache_coherency... flushing\n"); 133 + flush_dcache_icache_page(page); 134 + set_bit(PG_arch_1, &page->flags); 135 + } 136 + else 137 + pr_debug("do_dcache_icache_coherency... already clean\n"); 138 + return __pte(pte_val(pte) | _PAGE_HWEXEC); 139 + } 140 + 141 + static inline int is_exec_fault(void) 142 + { 143 + return current->thread.regs && TRAP(current->thread.regs) == 0x400; 144 + } 145 + 146 + /* We only try to do i/d cache coherency on stuff that looks like 147 + * reasonably "normal" PTEs. We currently require a PTE to be present 148 + * and we avoid _PAGE_SPECIAL and _PAGE_NO_CACHE 149 + */ 150 + static inline int pte_looks_normal(pte_t pte) 151 + { 152 + return (pte_val(pte) & 153 + (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE)) == 154 + (_PAGE_PRESENT); 155 + } 156 + 157 + #if defined(CONFIG_PPC_STD_MMU) 158 + /* Server-style MMU handles coherency when hashing if HW exec permission 159 + * is supposed per page (currently 64-bit only). Else, we always flush 160 + * valid PTEs in set_pte. 161 + */ 162 + static inline int pte_need_exec_flush(pte_t pte, int set_pte) 163 + { 164 + return set_pte && pte_looks_normal(pte) && 165 + !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || 166 + cpu_has_feature(CPU_FTR_NOEXECUTE)); 167 + } 168 + #elif _PAGE_HWEXEC == 0 169 + /* Embedded type MMU without HW exec support (8xx only so far), we flush 170 + * the cache for any present PTE 171 + */ 172 + static inline int pte_need_exec_flush(pte_t pte, int set_pte) 173 + { 174 + return set_pte && pte_looks_normal(pte); 175 + } 176 + #else 177 + /* Other embedded CPUs with HW exec support per-page, we flush on exec 178 + * fault if HWEXEC is not set 179 + */ 180 + static inline int pte_need_exec_flush(pte_t pte, int set_pte) 181 + { 182 + return pte_looks_normal(pte) && is_exec_fault() && 183 + !(pte_val(pte) & _PAGE_HWEXEC); 184 + } 185 + #endif 186 + 187 + /* 188 + * set_pte stores a linux PTE into the linux page table. 189 + */ 190 + void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) 191 + { 192 + #ifdef CONFIG_DEBUG_VM 193 + WARN_ON(pte_present(*ptep)); 194 + #endif 195 + /* Note: mm->context.id might not yet have been assigned as 196 + * this context might not have been activated yet when this 197 + * is called. 198 + */ 199 + pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); 200 + if (pte_need_exec_flush(pte, 1)) 201 + pte = do_dcache_icache_coherency(pte); 202 + 203 + /* Perform the setting of the PTE */ 204 + __set_pte_at(mm, addr, ptep, pte, 0); 205 + } 206 + 207 + /* 208 + * This is called when relaxing access to a PTE. It's also called in the page 209 + * fault path when we don't hit any of the major fault cases, ie, a minor 210 + * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have 211 + * handled those two for us, we additionally deal with missing execute 212 + * permission here on some processors 213 + */ 214 + int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, 215 + pte_t *ptep, pte_t entry, int dirty) 216 + { 217 + int changed; 218 + if (!dirty && pte_need_exec_flush(entry, 0)) 219 + entry = do_dcache_icache_coherency(entry); 220 + changed = !pte_same(*(ptep), entry); 221 + if (changed) { 222 + assert_pte_locked(vma->vm_mm, address); 223 + __ptep_set_access_flags(ptep, entry); 224 + flush_tlb_page_nohash(vma, address); 225 + } 226 + return changed; 227 + } 228 + 229 + #ifdef CONFIG_DEBUG_VM 230 + void assert_pte_locked(struct mm_struct *mm, unsigned long addr) 231 + { 232 + pgd_t *pgd; 233 + pud_t *pud; 234 + pmd_t *pmd; 235 + 236 + if (mm == &init_mm) 237 + return; 238 + pgd = mm->pgd + pgd_index(addr); 239 + BUG_ON(pgd_none(*pgd)); 240 + pud = pud_offset(pgd, addr); 241 + BUG_ON(pud_none(*pud)); 242 + pmd = pmd_offset(pud, addr); 243 + BUG_ON(!pmd_present(*pmd)); 244 + BUG_ON(!spin_is_locked(pte_lockptr(mm, pmd))); 245 + } 246 + #endif /* CONFIG_DEBUG_VM */ 247 +