Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] powerpc: Merge vdso's and add vdso support to 32 bits kernel

This patch moves the vdso's to arch/powerpc, adds support for the 32
bits vdso to the 32 bits kernel, rename systemcfg (finally !), and adds
some new (still untested) routines to both vdso's: clock_gettime() with
support for CLOCK_REALTIME and CLOCK_MONOTONIC, clock_getres() (same
clocks) and get_tbfreq() for glibc to retreive the timebase frequency.

Tom,Steve: The implementation of get_tbfreq() I've done for 32 bits
returns a long long (r3, r4) not a long. This is such that if we ever
add support for >4Ghz timebases on ppc32, the userland interface won't
have to change.

I have tested gettimeofday() using some glibc patches in both ppc32 and
ppc64 kernels using 32 bits userland (I haven't had a chance to test a
64 bits userland yet, but the implementation didn't change and was
tested earlier). I haven't tested yet the new functions.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

Benjamin Herrenschmidt and committed by
Paul Mackerras
a7f290da 6761c4a0

+1547 -436
+3 -1
arch/powerpc/kernel/Makefile
··· 12 12 endif 13 13 14 14 obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ 15 - irq.o signal_32.o pmc.o 15 + irq.o signal_32.o pmc.o vdso.o 16 + obj-y += vdso32/ 16 17 obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ 17 18 signal_64.o ptrace32.o systbl.o \ 18 19 paca.o ioctl32.o cpu_setup_power4.o \ 19 20 firmware.o sysfs.o udbg.o 21 + obj-$(CONFIG_PPC64) += vdso64/ 20 22 obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o 21 23 obj-$(CONFIG_POWER4) += idle_power4.o 22 24 obj-$(CONFIG_PPC_OF) += of_device.o
+31 -14
arch/powerpc/kernel/asm-offsets.c
··· 37 37 #include <asm/cputable.h> 38 38 #include <asm/thread_info.h> 39 39 #include <asm/rtas.h> 40 + #include <asm/vdso_datapage.h> 40 41 #ifdef CONFIG_PPC64 41 42 #include <asm/paca.h> 42 43 #include <asm/lppaca.h> 43 44 #include <asm/iseries/hv_lp_event.h> 44 45 #include <asm/cache.h> 45 - #include <asm/systemcfg.h> 46 46 #include <asm/compat.h> 47 47 #endif 48 48 ··· 251 251 252 252 DEFINE(TASK_SIZE, TASK_SIZE); 253 253 DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); 254 - #else /* CONFIG_PPC64 */ 255 - /* systemcfg offsets for use by vdso */ 256 - DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp)); 257 - DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec)); 258 - DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs)); 259 - DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec)); 260 - DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count)); 261 - DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest)); 262 - DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime)); 263 - DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32)); 264 - DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64)); 254 + #endif /* ! CONFIG_PPC64 */ 265 255 266 - /* timeval/timezone offsets for use by vdso */ 256 + /* datapage offsets for use by vdso */ 257 + DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct vdso_data, tb_orig_stamp)); 258 + DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct vdso_data, tb_ticks_per_sec)); 259 + DEFINE(CFG_TB_TO_XS, offsetof(struct vdso_data, tb_to_xs)); 260 + DEFINE(CFG_STAMP_XSEC, offsetof(struct vdso_data, stamp_xsec)); 261 + DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct vdso_data, tb_update_count)); 262 + DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct vdso_data, tz_minuteswest)); 263 + DEFINE(CFG_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); 264 + DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32)); 265 + DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec)); 266 + DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 267 + #ifdef CONFIG_PPC64 268 + DEFINE(CFG_SYSCALL_MAP64, offsetof(struct vdso_data, syscall_map_64)); 267 269 DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec)); 268 270 DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec)); 269 271 DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec)); 270 272 DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec)); 273 + DEFINE(TSPC32_TV_SEC, offsetof(struct compat_timespec, tv_sec)); 274 + DEFINE(TSPC32_TV_NSEC, offsetof(struct compat_timespec, tv_nsec)); 275 + #else 276 + DEFINE(TVAL32_TV_SEC, offsetof(struct timeval, tv_sec)); 277 + DEFINE(TVAL32_TV_USEC, offsetof(struct timeval, tv_usec)); 278 + DEFINE(TSPEC32_TV_SEC, offsetof(struct timespec, tv_sec)); 279 + DEFINE(TSPEC32_TV_NSEC, offsetof(struct timespec, tv_nsec)); 280 + #endif 281 + /* timeval/timezone offsets for use by vdso */ 271 282 DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); 272 283 DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); 273 - #endif /* CONFIG_PPC64 */ 284 + 285 + /* Other bits used by the vdso */ 286 + DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); 287 + DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); 288 + DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); 289 + DEFINE(CLOCK_REALTIME_RES, TICK_NSEC); 290 + 274 291 return 0; 275 292 }
-7
arch/powerpc/kernel/paca.c
··· 15 15 #include <asm/processor.h> 16 16 #include <asm/ptrace.h> 17 17 #include <asm/page.h> 18 - #include <asm/systemcfg.h> 19 18 #include <asm/lppaca.h> 20 19 #include <asm/iseries/it_lp_queue.h> 21 20 #include <asm/paca.h> 22 - 23 - static union { 24 - struct systemcfg data; 25 - u8 page[PAGE_SIZE]; 26 - } systemcfg_store __attribute__((__section__(".data.page.aligned"))); 27 - struct systemcfg *_systemcfg = &systemcfg_store.data; 28 21 29 22 30 23 /* This symbol is provided by the linker - let it fill in the paca
+2 -2
arch/powerpc/kernel/proc_ppc64.c
··· 23 23 #include <linux/slab.h> 24 24 #include <linux/kernel.h> 25 25 26 - #include <asm/systemcfg.h> 26 + #include <asm/vdso_datapage.h> 27 27 #include <asm/rtas.h> 28 28 #include <asm/uaccess.h> 29 29 #include <asm/prom.h> ··· 72 72 if (!pde) 73 73 return 1; 74 74 pde->nlink = 1; 75 - pde->data = _systemcfg; 75 + pde->data = vdso_data; 76 76 pde->size = PAGE_SIZE; 77 77 pde->proc_fops = &page_map_fops; 78 78
-1
arch/powerpc/kernel/rtas-proc.c
··· 32 32 #include <asm/rtas.h> 33 33 #include <asm/machdep.h> /* for ppc_md */ 34 34 #include <asm/time.h> 35 - #include <asm/systemcfg.h> 36 35 37 36 /* Token for Sensors */ 38 37 #define KEY_SWITCH 0x0001
+2 -2
arch/powerpc/kernel/setup-common.c
··· 33 33 #include <asm/io.h> 34 34 #include <asm/prom.h> 35 35 #include <asm/processor.h> 36 - #include <asm/systemcfg.h> 36 + #include <asm/vdso_datapage.h> 37 37 #include <asm/pgtable.h> 38 38 #include <asm/smp.h> 39 39 #include <asm/elf.h> ··· 564 564 cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]); 565 565 } 566 566 567 - _systemcfg->processorCount = num_present_cpus(); 567 + vdso_data->processorCount = num_present_cpus(); 568 568 #endif /* CONFIG_PPC64 */ 569 569 } 570 570 #endif /* CONFIG_SMP */
+10 -52
arch/powerpc/kernel/setup_64.c
··· 57 57 #include <asm/lmb.h> 58 58 #include <asm/iseries/it_lp_naca.h> 59 59 #include <asm/firmware.h> 60 - #include <asm/systemcfg.h> 61 60 #include <asm/xmon.h> 62 61 #include <asm/udbg.h> 63 62 ··· 374 375 DBG("Argh, can't find dcache properties ! " 375 376 "sizep: %p, lsizep: %p\n", sizep, lsizep); 376 377 377 - _systemcfg->dcache_size = ppc64_caches.dsize = size; 378 - _systemcfg->dcache_line_size = 379 - ppc64_caches.dline_size = lsize; 378 + ppc64_caches.dsize = size; 379 + ppc64_caches.dline_size = lsize; 380 380 ppc64_caches.log_dline_size = __ilog2(lsize); 381 381 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize; 382 382 ··· 391 393 DBG("Argh, can't find icache properties ! " 392 394 "sizep: %p, lsizep: %p\n", sizep, lsizep); 393 395 394 - _systemcfg->icache_size = ppc64_caches.isize = size; 395 - _systemcfg->icache_line_size = 396 - ppc64_caches.iline_size = lsize; 396 + ppc64_caches.isize = size; 397 + ppc64_caches.iline_size = lsize; 397 398 ppc64_caches.log_iline_size = __ilog2(lsize); 398 399 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize; 399 400 } 400 401 } 401 - 402 - /* Add an eye catcher and the systemcfg layout version number */ 403 - strcpy(_systemcfg->eye_catcher, "SYSTEMCFG:PPC64"); 404 - _systemcfg->version.major = SYSTEMCFG_MAJOR; 405 - _systemcfg->version.minor = SYSTEMCFG_MINOR; 406 - _systemcfg->processor = mfspr(SPRN_PVR); 407 - _systemcfg->platform = _machine; 408 - _systemcfg->physicalMemorySize = lmb_phys_mem_size(); 409 402 410 403 DBG(" <- initialize_cache_info()\n"); 411 404 } ··· 484 495 485 496 printk("-----------------------------------------------------\n"); 486 497 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); 487 - printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller); 488 - printk("systemcfg = 0x%p\n", _systemcfg); 489 - printk("systemcfg->platform = 0x%x\n", _systemcfg->platform); 490 - printk("systemcfg->processorCount = 0x%lx\n", _systemcfg->processorCount); 491 - printk("systemcfg->physicalMemorySize = 0x%lx\n", _systemcfg->physicalMemorySize); 498 + printk("ppc64_interrupt_controller = 0x%ld\n", 499 + ppc64_interrupt_controller); 500 + printk("platform = 0x%x\n", _machine); 501 + printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size()); 492 502 printk("ppc64_caches.dcache_line_size = 0x%x\n", 493 - ppc64_caches.dline_size); 503 + ppc64_caches.dline_size); 494 504 printk("ppc64_caches.icache_line_size = 0x%x\n", 495 - ppc64_caches.iline_size); 505 + ppc64_caches.iline_size); 496 506 printk("htab_address = 0x%p\n", htab_address); 497 507 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); 498 508 printk("-----------------------------------------------------\n"); ··· 556 568 } 557 569 558 570 /* 559 - * Called from setup_arch to initialize the bitmap of available 560 - * syscalls in the systemcfg page 561 - */ 562 - void __init setup_syscall_map(void) 563 - { 564 - unsigned int i, count64 = 0, count32 = 0; 565 - extern unsigned long *sys_call_table; 566 - extern unsigned long sys_ni_syscall; 567 - 568 - 569 - for (i = 0; i < __NR_syscalls; i++) { 570 - if (sys_call_table[i*2] != sys_ni_syscall) { 571 - count64++; 572 - _systemcfg->syscall_map_64[i >> 5] |= 573 - 0x80000000UL >> (i & 0x1f); 574 - } 575 - if (sys_call_table[i*2+1] != sys_ni_syscall) { 576 - count32++; 577 - _systemcfg->syscall_map_32[i >> 5] |= 578 - 0x80000000UL >> (i & 0x1f); 579 - } 580 - } 581 - printk(KERN_INFO "Syscall map setup, %d 32-bit and %d 64-bit syscalls\n", 582 - count32, count64); 583 - } 584 - 585 - /* 586 571 * Called into from start_kernel, after lock_kernel has been called. 587 572 * Initializes bootmem, which is unsed to manage page allocation until 588 573 * mem_init is called. ··· 595 634 /* set up the bootmem stuff with available memory */ 596 635 do_init_bootmem(); 597 636 sparse_init(); 598 - 599 - /* initialize the syscall map in systemcfg */ 600 - setup_syscall_map(); 601 637 602 638 #ifdef CONFIG_DUMMY_CONSOLE 603 639 conswitchp = &dummy_con;
+3 -9
arch/powerpc/kernel/signal_32.c
··· 43 43 #include <asm/uaccess.h> 44 44 #include <asm/cacheflush.h> 45 45 #include <asm/sigcontext.h> 46 + #include <asm/vdso.h> 46 47 #ifdef CONFIG_PPC64 47 48 #include "ppc32.h" 48 49 #include <asm/unistd.h> 49 - #include <asm/vdso.h> 50 50 #else 51 51 #include <asm/ucontext.h> 52 52 #include <asm/pgtable.h> ··· 809 809 810 810 /* Save user registers on the stack */ 811 811 frame = &rt_sf->uc.uc_mcontext; 812 - #ifdef CONFIG_PPC64 813 812 if (vdso32_rt_sigtramp && current->thread.vdso_base) { 814 813 if (save_user_regs(regs, frame, 0)) 815 814 goto badframe; 816 815 regs->link = current->thread.vdso_base + vdso32_rt_sigtramp; 817 - } else 818 - #endif 819 - { 816 + } else { 820 817 if (save_user_regs(regs, frame, __NR_rt_sigreturn)) 821 818 goto badframe; 822 819 regs->link = (unsigned long) frame->tramp; ··· 1087 1090 || __put_user(sig, &sc->signal)) 1088 1091 goto badframe; 1089 1092 1090 - #ifdef CONFIG_PPC64 1091 1093 if (vdso32_sigtramp && current->thread.vdso_base) { 1092 1094 if (save_user_regs(regs, &frame->mctx, 0)) 1093 1095 goto badframe; 1094 1096 regs->link = current->thread.vdso_base + vdso32_sigtramp; 1095 - } else 1096 - #endif 1097 - { 1097 + } else { 1098 1098 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn)) 1099 1099 goto badframe; 1100 1100 regs->link = (unsigned long) frame->mctx.tramp;
+2 -2
arch/powerpc/kernel/smp.c
··· 44 44 #include <asm/cputable.h> 45 45 #include <asm/system.h> 46 46 #include <asm/mpic.h> 47 - #include <asm/systemcfg.h> 47 + #include <asm/vdso_datapage.h> 48 48 #ifdef CONFIG_PPC64 49 49 #include <asm/paca.h> 50 50 #endif ··· 371 371 372 372 cpu_clear(cpu, cpu_online_map); 373 373 #ifdef CONFIG_PPC64 374 - _systemcfg->processorCount--; 374 + vdso_data->processorCount--; 375 375 fixup_irqs(cpu_online_map); 376 376 #endif 377 377 return 0;
-1
arch/powerpc/kernel/sysfs.c
··· 16 16 #include <asm/firmware.h> 17 17 #include <asm/hvcall.h> 18 18 #include <asm/prom.h> 19 - #include <asm/systemcfg.h> 20 19 #include <asm/paca.h> 21 20 #include <asm/lppaca.h> 22 21 #include <asm/machdep.h>
+18 -22
arch/powerpc/kernel/time.c
··· 62 62 #include <asm/irq.h> 63 63 #include <asm/div64.h> 64 64 #include <asm/smp.h> 65 + #include <asm/vdso_datapage.h> 65 66 #ifdef CONFIG_PPC64 66 - #include <asm/systemcfg.h> 67 67 #include <asm/firmware.h> 68 68 #endif 69 69 #ifdef CONFIG_PPC_ISERIES ··· 261 261 do_gtod.varp = temp_varp; 262 262 do_gtod.var_idx = temp_idx; 263 263 264 - #ifdef CONFIG_PPC64 265 264 /* 266 265 * tb_update_count is used to allow the userspace gettimeofday code 267 266 * to assure itself that it sees a consistent view of the tb_to_xs and ··· 270 271 * tb_to_xs and stamp_xsec values are consistent. If not, then it 271 272 * loops back and reads them again until this criteria is met. 272 273 */ 273 - ++(_systemcfg->tb_update_count); 274 + ++(vdso_data->tb_update_count); 274 275 smp_wmb(); 275 - _systemcfg->tb_orig_stamp = new_tb_stamp; 276 - _systemcfg->stamp_xsec = new_stamp_xsec; 277 - _systemcfg->tb_to_xs = new_tb_to_xs; 276 + vdso_data->tb_orig_stamp = new_tb_stamp; 277 + vdso_data->stamp_xsec = new_stamp_xsec; 278 + vdso_data->tb_to_xs = new_tb_to_xs; 279 + vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; 280 + vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; 278 281 smp_wmb(); 279 - ++(_systemcfg->tb_update_count); 280 - #endif 282 + ++(vdso_data->tb_update_count); 281 283 } 282 284 283 285 /* ··· 357 357 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 358 358 tb_to_xs = divres.result_low; 359 359 do_gtod.varp->tb_to_xs = tb_to_xs; 360 - _systemcfg->tb_ticks_per_sec = 361 - tb_ticks_per_sec; 362 - _systemcfg->tb_to_xs = tb_to_xs; 360 + vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 361 + vdso_data->tb_to_xs = tb_to_xs; 363 362 } 364 363 else { 365 364 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" ··· 560 561 new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs; 561 562 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs); 562 563 563 - #ifdef CONFIG_PPC64 564 - _systemcfg->tz_minuteswest = sys_tz.tz_minuteswest; 565 - _systemcfg->tz_dsttime = sys_tz.tz_dsttime; 566 - #endif 564 + vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; 565 + vdso_data->tz_dsttime = sys_tz.tz_dsttime; 567 566 568 567 write_sequnlock_irqrestore(&xtime_lock, flags); 569 568 clock_was_set(); ··· 710 713 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 711 714 do_gtod.varp->tb_to_xs = tb_to_xs; 712 715 do_gtod.tb_to_us = tb_to_us; 713 - #ifdef CONFIG_PPC64 714 - _systemcfg->tb_orig_stamp = tb_last_jiffy; 715 - _systemcfg->tb_update_count = 0; 716 - _systemcfg->tb_ticks_per_sec = tb_ticks_per_sec; 717 - _systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; 718 - _systemcfg->tb_to_xs = tb_to_xs; 719 - #endif 716 + 717 + vdso_data->tb_orig_stamp = tb_last_jiffy; 718 + vdso_data->tb_update_count = 0; 719 + vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 720 + vdso_data->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; 721 + vdso_data->tb_to_xs = tb_to_xs; 720 722 721 723 time_freq = 0; 722 724
-1
arch/powerpc/kernel/traps.c
··· 49 49 #ifdef CONFIG_PPC64 50 50 #include <asm/firmware.h> 51 51 #include <asm/processor.h> 52 - #include <asm/systemcfg.h> 53 52 #endif 54 53 55 54 #ifdef CONFIG_PPC64 /* XXX */
+746
arch/powerpc/kernel/vdso.c
··· 1 + /* 2 + * linux/arch/ppc64/kernel/vdso.c 3 + * 4 + * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. 5 + * <benh@kernel.crashing.org> 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License 9 + * as published by the Free Software Foundation; either version 10 + * 2 of the License, or (at your option) any later version. 11 + */ 12 + 13 + #include <linux/config.h> 14 + #include <linux/module.h> 15 + #include <linux/errno.h> 16 + #include <linux/sched.h> 17 + #include <linux/kernel.h> 18 + #include <linux/mm.h> 19 + #include <linux/smp.h> 20 + #include <linux/smp_lock.h> 21 + #include <linux/stddef.h> 22 + #include <linux/unistd.h> 23 + #include <linux/slab.h> 24 + #include <linux/user.h> 25 + #include <linux/elf.h> 26 + #include <linux/security.h> 27 + #include <linux/bootmem.h> 28 + 29 + #include <asm/pgtable.h> 30 + #include <asm/system.h> 31 + #include <asm/processor.h> 32 + #include <asm/mmu.h> 33 + #include <asm/mmu_context.h> 34 + #include <asm/lmb.h> 35 + #include <asm/machdep.h> 36 + #include <asm/cputable.h> 37 + #include <asm/sections.h> 38 + #include <asm/vdso.h> 39 + #include <asm/vdso_datapage.h> 40 + 41 + #undef DEBUG 42 + 43 + #ifdef DEBUG 44 + #define DBG(fmt...) printk(fmt) 45 + #else 46 + #define DBG(fmt...) 47 + #endif 48 + 49 + /* Max supported size for symbol names */ 50 + #define MAX_SYMNAME 64 51 + 52 + extern char vdso32_start, vdso32_end; 53 + static void *vdso32_kbase = &vdso32_start; 54 + unsigned int vdso32_pages; 55 + unsigned long vdso32_sigtramp; 56 + unsigned long vdso32_rt_sigtramp; 57 + 58 + #ifdef CONFIG_PPC64 59 + extern char vdso64_start, vdso64_end; 60 + static void *vdso64_kbase = &vdso64_start; 61 + unsigned int vdso64_pages; 62 + unsigned long vdso64_rt_sigtramp; 63 + #endif /* CONFIG_PPC64 */ 64 + 65 + /* 66 + * The vdso data page (aka. systemcfg for old ppc64 fans) is here. 67 + * Once the early boot kernel code no longer needs to muck around 68 + * with it, it will become dynamically allocated 69 + */ 70 + static union { 71 + struct vdso_data data; 72 + u8 page[PAGE_SIZE]; 73 + } vdso_data_store __attribute__((__section__(".data.page_aligned"))); 74 + struct vdso_data *vdso_data = &vdso_data_store.data; 75 + 76 + /* Format of the patch table */ 77 + struct vdso_patch_def 78 + { 79 + unsigned long ftr_mask, ftr_value; 80 + const char *gen_name; 81 + const char *fix_name; 82 + }; 83 + 84 + /* Table of functions to patch based on the CPU type/revision 85 + * 86 + * Currently, we only change sync_dicache to do nothing on processors 87 + * with a coherent icache 88 + */ 89 + static struct vdso_patch_def vdso_patches[] = { 90 + { 91 + CPU_FTR_COHERENT_ICACHE, CPU_FTR_COHERENT_ICACHE, 92 + "__kernel_sync_dicache", "__kernel_sync_dicache_p5" 93 + }, 94 + { 95 + CPU_FTR_USE_TB, 0, 96 + "__kernel_gettimeofday", NULL 97 + }, 98 + }; 99 + 100 + /* 101 + * Some infos carried around for each of them during parsing at 102 + * boot time. 103 + */ 104 + struct lib32_elfinfo 105 + { 106 + Elf32_Ehdr *hdr; /* ptr to ELF */ 107 + Elf32_Sym *dynsym; /* ptr to .dynsym section */ 108 + unsigned long dynsymsize; /* size of .dynsym section */ 109 + char *dynstr; /* ptr to .dynstr section */ 110 + unsigned long text; /* offset of .text section in .so */ 111 + }; 112 + 113 + struct lib64_elfinfo 114 + { 115 + Elf64_Ehdr *hdr; 116 + Elf64_Sym *dynsym; 117 + unsigned long dynsymsize; 118 + char *dynstr; 119 + unsigned long text; 120 + }; 121 + 122 + 123 + #ifdef __DEBUG 124 + static void dump_one_vdso_page(struct page *pg, struct page *upg) 125 + { 126 + printk("kpg: %p (c:%d,f:%08lx)", __va(page_to_pfn(pg) << PAGE_SHIFT), 127 + page_count(pg), 128 + pg->flags); 129 + if (upg/* && pg != upg*/) { 130 + printk(" upg: %p (c:%d,f:%08lx)", __va(page_to_pfn(upg) 131 + << PAGE_SHIFT), 132 + page_count(upg), 133 + upg->flags); 134 + } 135 + printk("\n"); 136 + } 137 + 138 + static void dump_vdso_pages(struct vm_area_struct * vma) 139 + { 140 + int i; 141 + 142 + if (!vma || test_thread_flag(TIF_32BIT)) { 143 + printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase); 144 + for (i=0; i<vdso32_pages; i++) { 145 + struct page *pg = virt_to_page(vdso32_kbase + 146 + i*PAGE_SIZE); 147 + struct page *upg = (vma && vma->vm_mm) ? 148 + follow_page(vma->vm_mm, vma->vm_start + 149 + i*PAGE_SIZE, 0) 150 + : NULL; 151 + dump_one_vdso_page(pg, upg); 152 + } 153 + } 154 + if (!vma || !test_thread_flag(TIF_32BIT)) { 155 + printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase); 156 + for (i=0; i<vdso64_pages; i++) { 157 + struct page *pg = virt_to_page(vdso64_kbase + 158 + i*PAGE_SIZE); 159 + struct page *upg = (vma && vma->vm_mm) ? 160 + follow_page(vma->vm_mm, vma->vm_start + 161 + i*PAGE_SIZE, 0) 162 + : NULL; 163 + dump_one_vdso_page(pg, upg); 164 + } 165 + } 166 + } 167 + #endif /* DEBUG */ 168 + 169 + /* 170 + * Keep a dummy vma_close for now, it will prevent VMA merging. 171 + */ 172 + static void vdso_vma_close(struct vm_area_struct * vma) 173 + { 174 + } 175 + 176 + /* 177 + * Our nopage() function, maps in the actual vDSO kernel pages, they will 178 + * be mapped read-only by do_no_page(), and eventually COW'ed, either 179 + * right away for an initial write access, or by do_wp_page(). 180 + */ 181 + static struct page * vdso_vma_nopage(struct vm_area_struct * vma, 182 + unsigned long address, int *type) 183 + { 184 + unsigned long offset = address - vma->vm_start; 185 + struct page *pg; 186 + #ifdef CONFIG_PPC64 187 + void *vbase = test_thread_flag(TIF_32BIT) ? 188 + vdso32_kbase : vdso64_kbase; 189 + #else 190 + void *vbase = vdso32_kbase; 191 + #endif 192 + 193 + DBG("vdso_vma_nopage(current: %s, address: %016lx, off: %lx)\n", 194 + current->comm, address, offset); 195 + 196 + if (address < vma->vm_start || address > vma->vm_end) 197 + return NOPAGE_SIGBUS; 198 + 199 + /* 200 + * Last page is systemcfg. 201 + */ 202 + if ((vma->vm_end - address) <= PAGE_SIZE) 203 + pg = virt_to_page(vdso_data); 204 + else 205 + pg = virt_to_page(vbase + offset); 206 + 207 + get_page(pg); 208 + DBG(" ->page count: %d\n", page_count(pg)); 209 + 210 + return pg; 211 + } 212 + 213 + static struct vm_operations_struct vdso_vmops = { 214 + .close = vdso_vma_close, 215 + .nopage = vdso_vma_nopage, 216 + }; 217 + 218 + /* 219 + * This is called from binfmt_elf, we create the special vma for the 220 + * vDSO and insert it into the mm struct tree 221 + */ 222 + int arch_setup_additional_pages(struct linux_binprm *bprm, 223 + int executable_stack) 224 + { 225 + struct mm_struct *mm = current->mm; 226 + struct vm_area_struct *vma; 227 + unsigned long vdso_pages; 228 + unsigned long vdso_base; 229 + 230 + #ifdef CONFIG_PPC64 231 + if (test_thread_flag(TIF_32BIT)) { 232 + vdso_pages = vdso32_pages; 233 + vdso_base = VDSO32_MBASE; 234 + } else { 235 + vdso_pages = vdso64_pages; 236 + vdso_base = VDSO64_MBASE; 237 + } 238 + #else 239 + vdso_pages = vdso32_pages; 240 + vdso_base = VDSO32_MBASE; 241 + #endif 242 + 243 + current->thread.vdso_base = 0; 244 + 245 + /* vDSO has a problem and was disabled, just don't "enable" it for the 246 + * process 247 + */ 248 + if (vdso_pages == 0) 249 + return 0; 250 + 251 + vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 252 + if (vma == NULL) 253 + return -ENOMEM; 254 + 255 + memset(vma, 0, sizeof(*vma)); 256 + 257 + /* Add a page to the vdso size for the data page */ 258 + vdso_pages ++; 259 + 260 + /* 261 + * pick a base address for the vDSO in process space. We try to put it 262 + * at vdso_base which is the "natural" base for it, but we might fail 263 + * and end up putting it elsewhere. 264 + */ 265 + vdso_base = get_unmapped_area(NULL, vdso_base, 266 + vdso_pages << PAGE_SHIFT, 0, 0); 267 + if (vdso_base & ~PAGE_MASK) { 268 + kmem_cache_free(vm_area_cachep, vma); 269 + return (int)vdso_base; 270 + } 271 + 272 + current->thread.vdso_base = vdso_base; 273 + 274 + vma->vm_mm = mm; 275 + vma->vm_start = current->thread.vdso_base; 276 + vma->vm_end = vma->vm_start + (vdso_pages << PAGE_SHIFT); 277 + 278 + /* 279 + * our vma flags don't have VM_WRITE so by default, the process isn't 280 + * allowed to write those pages. 281 + * gdb can break that with ptrace interface, and thus trigger COW on 282 + * those pages but it's then your responsibility to never do that on 283 + * the "data" page of the vDSO or you'll stop getting kernel updates 284 + * and your nice userland gettimeofday will be totally dead. 285 + * It's fine to use that for setting breakpoints in the vDSO code 286 + * pages though 287 + */ 288 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | 289 + VM_MAYEXEC | VM_RESERVED; 290 + vma->vm_flags |= mm->def_flags; 291 + vma->vm_page_prot = protection_map[vma->vm_flags & 0x7]; 292 + vma->vm_ops = &vdso_vmops; 293 + 294 + down_write(&mm->mmap_sem); 295 + if (insert_vm_struct(mm, vma)) { 296 + up_write(&mm->mmap_sem); 297 + kmem_cache_free(vm_area_cachep, vma); 298 + return -ENOMEM; 299 + } 300 + mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 301 + up_write(&mm->mmap_sem); 302 + 303 + return 0; 304 + } 305 + 306 + static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname, 307 + unsigned long *size) 308 + { 309 + Elf32_Shdr *sechdrs; 310 + unsigned int i; 311 + char *secnames; 312 + 313 + /* Grab section headers and strings so we can tell who is who */ 314 + sechdrs = (void *)ehdr + ehdr->e_shoff; 315 + secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset; 316 + 317 + /* Find the section they want */ 318 + for (i = 1; i < ehdr->e_shnum; i++) { 319 + if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) { 320 + if (size) 321 + *size = sechdrs[i].sh_size; 322 + return (void *)ehdr + sechdrs[i].sh_offset; 323 + } 324 + } 325 + *size = 0; 326 + return NULL; 327 + } 328 + 329 + static Elf32_Sym * __init find_symbol32(struct lib32_elfinfo *lib, 330 + const char *symname) 331 + { 332 + unsigned int i; 333 + char name[MAX_SYMNAME], *c; 334 + 335 + for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) { 336 + if (lib->dynsym[i].st_name == 0) 337 + continue; 338 + strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, 339 + MAX_SYMNAME); 340 + c = strchr(name, '@'); 341 + if (c) 342 + *c = 0; 343 + if (strcmp(symname, name) == 0) 344 + return &lib->dynsym[i]; 345 + } 346 + return NULL; 347 + } 348 + 349 + /* Note that we assume the section is .text and the symbol is relative to 350 + * the library base 351 + */ 352 + static unsigned long __init find_function32(struct lib32_elfinfo *lib, 353 + const char *symname) 354 + { 355 + Elf32_Sym *sym = find_symbol32(lib, symname); 356 + 357 + if (sym == NULL) { 358 + printk(KERN_WARNING "vDSO32: function %s not found !\n", 359 + symname); 360 + return 0; 361 + } 362 + return sym->st_value - VDSO32_LBASE; 363 + } 364 + 365 + static int vdso_do_func_patch32(struct lib32_elfinfo *v32, 366 + struct lib64_elfinfo *v64, 367 + const char *orig, const char *fix) 368 + { 369 + Elf32_Sym *sym32_gen, *sym32_fix; 370 + 371 + sym32_gen = find_symbol32(v32, orig); 372 + if (sym32_gen == NULL) { 373 + printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", orig); 374 + return -1; 375 + } 376 + if (fix == NULL) { 377 + sym32_gen->st_name = 0; 378 + return 0; 379 + } 380 + sym32_fix = find_symbol32(v32, fix); 381 + if (sym32_fix == NULL) { 382 + printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", fix); 383 + return -1; 384 + } 385 + sym32_gen->st_value = sym32_fix->st_value; 386 + sym32_gen->st_size = sym32_fix->st_size; 387 + sym32_gen->st_info = sym32_fix->st_info; 388 + sym32_gen->st_other = sym32_fix->st_other; 389 + sym32_gen->st_shndx = sym32_fix->st_shndx; 390 + 391 + return 0; 392 + } 393 + 394 + 395 + #ifdef CONFIG_PPC64 396 + 397 + static void * __init find_section64(Elf64_Ehdr *ehdr, const char *secname, 398 + unsigned long *size) 399 + { 400 + Elf64_Shdr *sechdrs; 401 + unsigned int i; 402 + char *secnames; 403 + 404 + /* Grab section headers and strings so we can tell who is who */ 405 + sechdrs = (void *)ehdr + ehdr->e_shoff; 406 + secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset; 407 + 408 + /* Find the section they want */ 409 + for (i = 1; i < ehdr->e_shnum; i++) { 410 + if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) { 411 + if (size) 412 + *size = sechdrs[i].sh_size; 413 + return (void *)ehdr + sechdrs[i].sh_offset; 414 + } 415 + } 416 + if (size) 417 + *size = 0; 418 + return NULL; 419 + } 420 + 421 + static Elf64_Sym * __init find_symbol64(struct lib64_elfinfo *lib, 422 + const char *symname) 423 + { 424 + unsigned int i; 425 + char name[MAX_SYMNAME], *c; 426 + 427 + for (i = 0; i < (lib->dynsymsize / sizeof(Elf64_Sym)); i++) { 428 + if (lib->dynsym[i].st_name == 0) 429 + continue; 430 + strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, 431 + MAX_SYMNAME); 432 + c = strchr(name, '@'); 433 + if (c) 434 + *c = 0; 435 + if (strcmp(symname, name) == 0) 436 + return &lib->dynsym[i]; 437 + } 438 + return NULL; 439 + } 440 + 441 + /* Note that we assume the section is .text and the symbol is relative to 442 + * the library base 443 + */ 444 + static unsigned long __init find_function64(struct lib64_elfinfo *lib, 445 + const char *symname) 446 + { 447 + Elf64_Sym *sym = find_symbol64(lib, symname); 448 + 449 + if (sym == NULL) { 450 + printk(KERN_WARNING "vDSO64: function %s not found !\n", 451 + symname); 452 + return 0; 453 + } 454 + #ifdef VDS64_HAS_DESCRIPTORS 455 + return *((u64 *)(vdso64_kbase + sym->st_value - VDSO64_LBASE)) - 456 + VDSO64_LBASE; 457 + #else 458 + return sym->st_value - VDSO64_LBASE; 459 + #endif 460 + } 461 + 462 + static int vdso_do_func_patch64(struct lib32_elfinfo *v32, 463 + struct lib64_elfinfo *v64, 464 + const char *orig, const char *fix) 465 + { 466 + Elf64_Sym *sym64_gen, *sym64_fix; 467 + 468 + sym64_gen = find_symbol64(v64, orig); 469 + if (sym64_gen == NULL) { 470 + printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", orig); 471 + return -1; 472 + } 473 + if (fix == NULL) { 474 + sym64_gen->st_name = 0; 475 + return 0; 476 + } 477 + sym64_fix = find_symbol64(v64, fix); 478 + if (sym64_fix == NULL) { 479 + printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", fix); 480 + return -1; 481 + } 482 + sym64_gen->st_value = sym64_fix->st_value; 483 + sym64_gen->st_size = sym64_fix->st_size; 484 + sym64_gen->st_info = sym64_fix->st_info; 485 + sym64_gen->st_other = sym64_fix->st_other; 486 + sym64_gen->st_shndx = sym64_fix->st_shndx; 487 + 488 + return 0; 489 + } 490 + 491 + #endif /* CONFIG_PPC64 */ 492 + 493 + 494 + static __init int vdso_do_find_sections(struct lib32_elfinfo *v32, 495 + struct lib64_elfinfo *v64) 496 + { 497 + void *sect; 498 + 499 + /* 500 + * Locate symbol tables & text section 501 + */ 502 + 503 + v32->dynsym = find_section32(v32->hdr, ".dynsym", &v32->dynsymsize); 504 + v32->dynstr = find_section32(v32->hdr, ".dynstr", NULL); 505 + if (v32->dynsym == NULL || v32->dynstr == NULL) { 506 + printk(KERN_ERR "vDSO32: required symbol section not found\n"); 507 + return -1; 508 + } 509 + sect = find_section32(v32->hdr, ".text", NULL); 510 + if (sect == NULL) { 511 + printk(KERN_ERR "vDSO32: the .text section was not found\n"); 512 + return -1; 513 + } 514 + v32->text = sect - vdso32_kbase; 515 + 516 + #ifdef CONFIG_PPC64 517 + v64->dynsym = find_section64(v64->hdr, ".dynsym", &v64->dynsymsize); 518 + v64->dynstr = find_section64(v64->hdr, ".dynstr", NULL); 519 + if (v64->dynsym == NULL || v64->dynstr == NULL) { 520 + printk(KERN_ERR "vDSO64: required symbol section not found\n"); 521 + return -1; 522 + } 523 + sect = find_section64(v64->hdr, ".text", NULL); 524 + if (sect == NULL) { 525 + printk(KERN_ERR "vDSO64: the .text section was not found\n"); 526 + return -1; 527 + } 528 + v64->text = sect - vdso64_kbase; 529 + #endif /* CONFIG_PPC64 */ 530 + 531 + return 0; 532 + } 533 + 534 + static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32, 535 + struct lib64_elfinfo *v64) 536 + { 537 + /* 538 + * Find signal trampolines 539 + */ 540 + 541 + #ifdef CONFIG_PPC64 542 + vdso64_rt_sigtramp = find_function64(v64, "__kernel_sigtramp_rt64"); 543 + #endif 544 + vdso32_sigtramp = find_function32(v32, "__kernel_sigtramp32"); 545 + vdso32_rt_sigtramp = find_function32(v32, "__kernel_sigtramp_rt32"); 546 + } 547 + 548 + static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32, 549 + struct lib64_elfinfo *v64) 550 + { 551 + Elf32_Sym *sym32; 552 + #ifdef CONFIG_PPC64 553 + Elf64_Sym *sym64; 554 + 555 + sym64 = find_symbol64(v64, "__kernel_datapage_offset"); 556 + if (sym64 == NULL) { 557 + printk(KERN_ERR "vDSO64: Can't find symbol " 558 + "__kernel_datapage_offset !\n"); 559 + return -1; 560 + } 561 + *((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) = 562 + (vdso64_pages << PAGE_SHIFT) - 563 + (sym64->st_value - VDSO64_LBASE); 564 + #endif /* CONFIG_PPC64 */ 565 + 566 + sym32 = find_symbol32(v32, "__kernel_datapage_offset"); 567 + if (sym32 == NULL) { 568 + printk(KERN_ERR "vDSO32: Can't find symbol " 569 + "__kernel_datapage_offset !\n"); 570 + return -1; 571 + } 572 + *((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) = 573 + (vdso32_pages << PAGE_SHIFT) - 574 + (sym32->st_value - VDSO32_LBASE); 575 + 576 + return 0; 577 + } 578 + 579 + static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32, 580 + struct lib64_elfinfo *v64) 581 + { 582 + int i; 583 + 584 + for (i = 0; i < ARRAY_SIZE(vdso_patches); i++) { 585 + struct vdso_patch_def *patch = &vdso_patches[i]; 586 + int match = (cur_cpu_spec->cpu_features & patch->ftr_mask) 587 + == patch->ftr_value; 588 + if (!match) 589 + continue; 590 + 591 + DBG("replacing %s with %s...\n", patch->gen_name, 592 + patch->fix_name ? "NONE" : patch->fix_name); 593 + 594 + /* 595 + * Patch the 32 bits and 64 bits symbols. Note that we do not 596 + * patch the "." symbol on 64 bits. 597 + * It would be easy to do, but doesn't seem to be necessary, 598 + * patching the OPD symbol is enough. 599 + */ 600 + vdso_do_func_patch32(v32, v64, patch->gen_name, 601 + patch->fix_name); 602 + #ifdef CONFIG_PPC64 603 + vdso_do_func_patch64(v32, v64, patch->gen_name, 604 + patch->fix_name); 605 + #endif /* CONFIG_PPC64 */ 606 + } 607 + 608 + return 0; 609 + } 610 + 611 + 612 + static __init int vdso_setup(void) 613 + { 614 + struct lib32_elfinfo v32; 615 + struct lib64_elfinfo v64; 616 + 617 + v32.hdr = vdso32_kbase; 618 + #ifdef CONFIG_PPC64 619 + v64.hdr = vdso64_kbase; 620 + #endif 621 + if (vdso_do_find_sections(&v32, &v64)) 622 + return -1; 623 + 624 + if (vdso_fixup_datapage(&v32, &v64)) 625 + return -1; 626 + 627 + if (vdso_fixup_alt_funcs(&v32, &v64)) 628 + return -1; 629 + 630 + vdso_setup_trampolines(&v32, &v64); 631 + 632 + return 0; 633 + } 634 + 635 + /* 636 + * Called from setup_arch to initialize the bitmap of available 637 + * syscalls in the systemcfg page 638 + */ 639 + static void __init vdso_setup_syscall_map(void) 640 + { 641 + unsigned int i; 642 + extern unsigned long *sys_call_table; 643 + extern unsigned long sys_ni_syscall; 644 + 645 + 646 + for (i = 0; i < __NR_syscalls; i++) { 647 + #ifdef CONFIG_PPC64 648 + if (sys_call_table[i*2] != sys_ni_syscall) 649 + vdso_data->syscall_map_64[i >> 5] |= 650 + 0x80000000UL >> (i & 0x1f); 651 + if (sys_call_table[i*2+1] != sys_ni_syscall) 652 + vdso_data->syscall_map_32[i >> 5] |= 653 + 0x80000000UL >> (i & 0x1f); 654 + #else /* CONFIG_PPC64 */ 655 + if (sys_call_table[i] != sys_ni_syscall) 656 + vdso_data->syscall_map_32[i >> 5] |= 657 + 0x80000000UL >> (i & 0x1f); 658 + #endif /* CONFIG_PPC64 */ 659 + } 660 + } 661 + 662 + 663 + void __init vdso_init(void) 664 + { 665 + int i; 666 + 667 + #ifdef CONFIG_PPC64 668 + /* 669 + * Fill up the "systemcfg" stuff for backward compatiblity 670 + */ 671 + strcpy(vdso_data->eye_catcher, "SYSTEMCFG:PPC64"); 672 + vdso_data->version.major = SYSTEMCFG_MAJOR; 673 + vdso_data->version.minor = SYSTEMCFG_MINOR; 674 + vdso_data->processor = mfspr(SPRN_PVR); 675 + vdso_data->platform = _machine; 676 + vdso_data->physicalMemorySize = lmb_phys_mem_size(); 677 + vdso_data->dcache_size = ppc64_caches.dsize; 678 + vdso_data->dcache_line_size = ppc64_caches.dline_size; 679 + vdso_data->icache_size = ppc64_caches.isize; 680 + vdso_data->icache_line_size = ppc64_caches.iline_size; 681 + 682 + /* 683 + * Calculate the size of the 64 bits vDSO 684 + */ 685 + vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT; 686 + DBG("vdso64_kbase: %p, 0x%x pages\n", vdso64_kbase, vdso64_pages); 687 + #endif /* CONFIG_PPC64 */ 688 + 689 + 690 + /* 691 + * Calculate the size of the 32 bits vDSO 692 + */ 693 + vdso32_pages = (&vdso32_end - &vdso32_start) >> PAGE_SHIFT; 694 + DBG("vdso32_kbase: %p, 0x%x pages\n", vdso32_kbase, vdso32_pages); 695 + 696 + 697 + /* 698 + * Setup the syscall map in the vDOS 699 + */ 700 + vdso_setup_syscall_map(); 701 + /* 702 + * Initialize the vDSO images in memory, that is do necessary 703 + * fixups of vDSO symbols, locate trampolines, etc... 704 + */ 705 + if (vdso_setup()) { 706 + printk(KERN_ERR "vDSO setup failure, not enabled !\n"); 707 + vdso32_pages = 0; 708 + #ifdef CONFIG_PPC64 709 + vdso64_pages = 0; 710 + #endif 711 + return; 712 + } 713 + 714 + /* Make sure pages are in the correct state */ 715 + for (i = 0; i < vdso32_pages; i++) { 716 + struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); 717 + ClearPageReserved(pg); 718 + get_page(pg); 719 + 720 + } 721 + #ifdef CONFIG_PPC64 722 + for (i = 0; i < vdso64_pages; i++) { 723 + struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); 724 + ClearPageReserved(pg); 725 + get_page(pg); 726 + } 727 + #endif /* CONFIG_PPC64 */ 728 + 729 + get_page(virt_to_page(vdso_data)); 730 + } 731 + 732 + int in_gate_area_no_task(unsigned long addr) 733 + { 734 + return 0; 735 + } 736 + 737 + int in_gate_area(struct task_struct *task, unsigned long addr) 738 + { 739 + return 0; 740 + } 741 + 742 + struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 743 + { 744 + return NULL; 745 + } 746 +
+315
arch/powerpc/kernel/vdso32/gettimeofday.S
··· 1 + /* 2 + * Userland implementation of gettimeofday() for 32 bits processes in a 3 + * ppc64 kernel for use in the vDSO 4 + * 5 + * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org, 6 + * IBM Corp. 7 + * 8 + * This program is free software; you can redistribute it and/or 9 + * modify it under the terms of the GNU General Public License 10 + * as published by the Free Software Foundation; either version 11 + * 2 of the License, or (at your option) any later version. 12 + */ 13 + #include <linux/config.h> 14 + #include <asm/processor.h> 15 + #include <asm/ppc_asm.h> 16 + #include <asm/vdso.h> 17 + #include <asm/asm-offsets.h> 18 + #include <asm/unistd.h> 19 + 20 + .text 21 + /* 22 + * Exact prototype of gettimeofday 23 + * 24 + * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); 25 + * 26 + */ 27 + V_FUNCTION_BEGIN(__kernel_gettimeofday) 28 + .cfi_startproc 29 + mflr r12 30 + .cfi_register lr,r12 31 + 32 + mr r10,r3 /* r10 saves tv */ 33 + mr r11,r4 /* r11 saves tz */ 34 + bl __get_datapage@local /* get data page */ 35 + mr r9, r3 /* datapage ptr in r9 */ 36 + bl __do_get_xsec@local /* get xsec from tb & kernel */ 37 + bne- 2f /* out of line -> do syscall */ 38 + 39 + /* seconds are xsec >> 20 */ 40 + rlwinm r5,r4,12,20,31 41 + rlwimi r5,r3,12,0,19 42 + stw r5,TVAL32_TV_SEC(r10) 43 + 44 + /* get remaining xsec and convert to usec. we scale 45 + * up remaining xsec by 12 bits and get the top 32 bits 46 + * of the multiplication 47 + */ 48 + rlwinm r5,r4,12,0,19 49 + lis r6,1000000@h 50 + ori r6,r6,1000000@l 51 + mulhwu r5,r5,r6 52 + stw r5,TVAL32_TV_USEC(r10) 53 + 54 + cmpli cr0,r11,0 /* check if tz is NULL */ 55 + beq 1f 56 + lwz r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */ 57 + lwz r5,CFG_TZ_DSTTIME(r9) 58 + stw r4,TZONE_TZ_MINWEST(r11) 59 + stw r5,TZONE_TZ_DSTTIME(r11) 60 + 61 + 1: mtlr r12 62 + li r3,0 63 + blr 64 + 65 + 2: 66 + mtlr r12 67 + mr r3,r10 68 + mr r4,r11 69 + li r0,__NR_gettimeofday 70 + sc 71 + blr 72 + .cfi_endproc 73 + V_FUNCTION_END(__kernel_gettimeofday) 74 + 75 + /* 76 + * Exact prototype of clock_gettime() 77 + * 78 + * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); 79 + * 80 + */ 81 + V_FUNCTION_BEGIN(__kernel_clock_gettime) 82 + .cfi_startproc 83 + /* Check for supported clock IDs */ 84 + cmpli cr0,r3,CLOCK_REALTIME 85 + cmpli cr1,r3,CLOCK_MONOTONIC 86 + cror cr0,cr0,cr1 87 + bne cr0,99f 88 + 89 + mflr r12 /* r12 saves lr */ 90 + .cfi_register lr,r12 91 + mr r10,r3 /* r10 saves id */ 92 + mr r11,r4 /* r11 saves tp */ 93 + bl __get_datapage@local /* get data page */ 94 + mr r9, r3 /* datapage ptr in r9 */ 95 + beq cr1,50f /* if monotonic -> jump there */ 96 + 97 + /* 98 + * CLOCK_REALTIME 99 + */ 100 + 101 + bl __do_get_xsec@local /* get xsec from tb & kernel */ 102 + bne- 98f /* out of line -> do syscall */ 103 + 104 + /* seconds are xsec >> 20 */ 105 + rlwinm r5,r4,12,20,31 106 + rlwimi r5,r3,12,0,19 107 + stw r5,TSPC32_TV_SEC(r11) 108 + 109 + /* get remaining xsec and convert to nsec. we scale 110 + * up remaining xsec by 12 bits and get the top 32 bits 111 + * of the multiplication, then we multiply by 1000 112 + */ 113 + rlwinm r5,r4,12,0,19 114 + lis r6,1000000@h 115 + ori r6,r6,1000000@l 116 + mulhwu r5,r5,r6 117 + mulli r5,r5,1000 118 + stw r5,TSPC32_TV_NSEC(r11) 119 + mtlr r12 120 + li r3,0 121 + blr 122 + 123 + /* 124 + * CLOCK_MONOTONIC 125 + */ 126 + 127 + 50: bl __do_get_xsec@local /* get xsec from tb & kernel */ 128 + bne- 98f /* out of line -> do syscall */ 129 + 130 + /* seconds are xsec >> 20 */ 131 + rlwinm r6,r4,12,20,31 132 + rlwimi r6,r3,12,0,19 133 + 134 + /* get remaining xsec and convert to nsec. we scale 135 + * up remaining xsec by 12 bits and get the top 32 bits 136 + * of the multiplication, then we multiply by 1000 137 + */ 138 + rlwinm r7,r4,12,0,19 139 + lis r5,1000000@h 140 + ori r5,r5,1000000@l 141 + mulhwu r7,r7,r5 142 + mulli r7,r7,1000 143 + 144 + /* now we must fixup using wall to monotonic. We need to snapshot 145 + * that value and do the counter trick again. Fortunately, we still 146 + * have the counter value in r8 that was returned by __do_get_xsec. 147 + * At this point, r6,r7 contain our sec/nsec values, r3,r4 and r5 148 + * can be used 149 + */ 150 + 151 + lwz r3,WTOM_CLOCK_SEC(r9) 152 + lwz r4,WTOM_CLOCK_NSEC(r9) 153 + 154 + /* We now have our result in r3,r4. We create a fake dependency 155 + * on that result and re-check the counter 156 + */ 157 + or r5,r4,r3 158 + xor r0,r5,r5 159 + add r9,r9,r0 160 + #ifdef CONFIG_PPC64 161 + lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9) 162 + #else 163 + lwz r0,(CFG_TB_UPDATE_COUNT)(r9) 164 + #endif 165 + cmpl cr0,r8,r0 /* check if updated */ 166 + bne- 50b 167 + 168 + /* Calculate and store result. Note that this mimmics the C code, 169 + * which may cause funny results if nsec goes negative... is that 170 + * possible at all ? 171 + */ 172 + add r3,r3,r6 173 + add r4,r4,r7 174 + lis r5,NSEC_PER_SEC@h 175 + ori r5,r5,NSEC_PER_SEC@l 176 + cmpli cr0,r4,r5 177 + blt 1f 178 + subf r4,r5,r4 179 + addi r3,r3,1 180 + 1: stw r3,TSPC32_TV_SEC(r11) 181 + stw r4,TSPC32_TV_NSEC(r11) 182 + 183 + mtlr r12 184 + li r3,0 185 + blr 186 + 187 + /* 188 + * syscall fallback 189 + */ 190 + 98: 191 + mtlr r12 192 + mr r3,r10 193 + mr r4,r11 194 + 99: 195 + li r0,__NR_clock_gettime 196 + sc 197 + blr 198 + .cfi_endproc 199 + V_FUNCTION_END(__kernel_clock_gettime) 200 + 201 + 202 + /* 203 + * Exact prototype of clock_getres() 204 + * 205 + * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); 206 + * 207 + */ 208 + V_FUNCTION_BEGIN(__kernel_clock_getres) 209 + .cfi_startproc 210 + /* Check for supported clock IDs */ 211 + cmpwi cr0,r3,CLOCK_REALTIME 212 + cmpwi cr1,r3,CLOCK_MONOTONIC 213 + cror cr0,cr0,cr1 214 + bne cr0,99f 215 + 216 + li r3,0 217 + cmpli cr0,r4,0 218 + beqlr 219 + lis r5,CLOCK_REALTIME_RES@h 220 + ori r5,r5,CLOCK_REALTIME_RES@l 221 + stw r3,TSPC32_TV_SEC(r4) 222 + stw r5,TSPC32_TV_NSEC(r4) 223 + blr 224 + 225 + /* 226 + * syscall fallback 227 + */ 228 + 99: 229 + li r0,__NR_clock_getres 230 + sc 231 + blr 232 + .cfi_endproc 233 + V_FUNCTION_END(__kernel_clock_getres) 234 + 235 + 236 + /* 237 + * This is the core of gettimeofday() & friends, it returns the xsec 238 + * value in r3 & r4 and expects the datapage ptr (non clobbered) 239 + * in r9. clobbers r0,r4,r5,r6,r7,r8. 240 + * When returning, r8 contains the counter value that can be reused 241 + * by the monotonic clock implementation 242 + */ 243 + __do_get_xsec: 244 + .cfi_startproc 245 + /* Check for update count & load values. We use the low 246 + * order 32 bits of the update count 247 + */ 248 + #ifdef CONFIG_PPC64 249 + 1: lwz r8,(CFG_TB_UPDATE_COUNT+4)(r9) 250 + #else 251 + 1: lwz r8,(CFG_TB_UPDATE_COUNT)(r9) 252 + #endif 253 + andi. r0,r8,1 /* pending update ? loop */ 254 + bne- 1b 255 + xor r0,r8,r8 /* create dependency */ 256 + add r9,r9,r0 257 + 258 + /* Load orig stamp (offset to TB) */ 259 + lwz r5,CFG_TB_ORIG_STAMP(r9) 260 + lwz r6,(CFG_TB_ORIG_STAMP+4)(r9) 261 + 262 + /* Get a stable TB value */ 263 + 2: mftbu r3 264 + mftbl r4 265 + mftbu r0 266 + cmpl cr0,r3,r0 267 + bne- 2b 268 + 269 + /* Substract tb orig stamp. If the high part is non-zero, we jump to 270 + * the slow path which call the syscall. 271 + * If it's ok, then we have our 32 bits tb_ticks value in r7 272 + */ 273 + subfc r7,r6,r4 274 + subfe. r0,r5,r3 275 + bne- 3f 276 + 277 + /* Load scale factor & do multiplication */ 278 + lwz r5,CFG_TB_TO_XS(r9) /* load values */ 279 + lwz r6,(CFG_TB_TO_XS+4)(r9) 280 + mulhwu r4,r7,r5 281 + mulhwu r6,r7,r6 282 + mullw r0,r7,r5 283 + addc r6,r6,r0 284 + 285 + /* At this point, we have the scaled xsec value in r4 + XER:CA 286 + * we load & add the stamp since epoch 287 + */ 288 + lwz r5,CFG_STAMP_XSEC(r9) 289 + lwz r6,(CFG_STAMP_XSEC+4)(r9) 290 + adde r4,r4,r6 291 + addze r3,r5 292 + 293 + /* We now have our result in r3,r4. We create a fake dependency 294 + * on that result and re-check the counter 295 + */ 296 + or r6,r4,r3 297 + xor r0,r6,r6 298 + add r9,r9,r0 299 + #ifdef CONFIG_PPC64 300 + lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9) 301 + #else 302 + lwz r0,(CFG_TB_UPDATE_COUNT)(r9) 303 + #endif 304 + cmpl cr0,r8,r0 /* check if updated */ 305 + bne- 1b 306 + 307 + /* Warning ! The caller expects CR:EQ to be set to indicate a 308 + * successful calculation (so it won't fallback to the syscall 309 + * method). We have overriden that CR bit in the counter check, 310 + * but fortunately, the loop exit condition _is_ CR:EQ set, so 311 + * we can exit safely here. If you change this code, be careful 312 + * of that side effect. 313 + */ 314 + 3: blr 315 + .cfi_endproc
+242
arch/powerpc/kernel/vdso64/gettimeofday.S
··· 1 + /* 2 + * Userland implementation of gettimeofday() for 64 bits processes in a 3 + * ppc64 kernel for use in the vDSO 4 + * 5 + * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), 6 + * IBM Corp. 7 + * 8 + * This program is free software; you can redistribute it and/or 9 + * modify it under the terms of the GNU General Public License 10 + * as published by the Free Software Foundation; either version 11 + * 2 of the License, or (at your option) any later version. 12 + */ 13 + #include <linux/config.h> 14 + #include <asm/processor.h> 15 + #include <asm/ppc_asm.h> 16 + #include <asm/vdso.h> 17 + #include <asm/asm-offsets.h> 18 + #include <asm/unistd.h> 19 + 20 + .text 21 + /* 22 + * Exact prototype of gettimeofday 23 + * 24 + * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); 25 + * 26 + */ 27 + V_FUNCTION_BEGIN(__kernel_gettimeofday) 28 + .cfi_startproc 29 + mflr r12 30 + .cfi_register lr,r12 31 + 32 + mr r11,r3 /* r11 holds tv */ 33 + mr r10,r4 /* r10 holds tz */ 34 + bl V_LOCAL_FUNC(__get_datapage) /* get data page */ 35 + bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */ 36 + lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */ 37 + ori r7,r7,16960 38 + rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */ 39 + rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */ 40 + std r5,TVAL64_TV_SEC(r11) /* store sec in tv */ 41 + subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */ 42 + mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) / 43 + * XSEC_PER_SEC 44 + */ 45 + rldicl r0,r0,44,20 46 + cmpldi cr0,r10,0 /* check if tz is NULL */ 47 + std r0,TVAL64_TV_USEC(r11) /* store usec in tv */ 48 + beq 1f 49 + lwz r4,CFG_TZ_MINUTEWEST(r3)/* fill tz */ 50 + lwz r5,CFG_TZ_DSTTIME(r3) 51 + stw r4,TZONE_TZ_MINWEST(r10) 52 + stw r5,TZONE_TZ_DSTTIME(r10) 53 + 1: mtlr r12 54 + li r3,0 /* always success */ 55 + blr 56 + .cfi_endproc 57 + V_FUNCTION_END(__kernel_gettimeofday) 58 + 59 + 60 + /* 61 + * Exact prototype of clock_gettime() 62 + * 63 + * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); 64 + * 65 + */ 66 + V_FUNCTION_BEGIN(__kernel_clock_gettime) 67 + .cfi_startproc 68 + /* Check for supported clock IDs */ 69 + cmpwi cr0,r3,CLOCK_REALTIME 70 + cmpwi cr1,r3,CLOCK_MONOTONIC 71 + cror cr0,cr0,cr1 72 + bne cr0,99f 73 + 74 + mflr r12 /* r12 saves lr */ 75 + .cfi_register lr,r12 76 + mr r10,r3 /* r10 saves id */ 77 + mr r11,r4 /* r11 saves tp */ 78 + bl V_LOCAL_FUNC(__get_datapage) /* get data page */ 79 + beq cr1,50f /* if monotonic -> jump there */ 80 + 81 + /* 82 + * CLOCK_REALTIME 83 + */ 84 + 85 + bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */ 86 + 87 + lis r7,0x3b9a /* r7 = 1000000000 = NSEC_PER_SEC */ 88 + ori r7,r7,0xca00 89 + rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */ 90 + rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */ 91 + std r5,TSPC64_TV_SEC(r11) /* store sec in tv */ 92 + subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */ 93 + mulld r0,r0,r7 /* nsec = (xsec * NSEC_PER_SEC) / 94 + * XSEC_PER_SEC 95 + */ 96 + rldicl r0,r0,44,20 97 + std r0,TSPC64_TV_NSEC(r11) /* store nsec in tp */ 98 + 99 + mtlr r12 100 + li r3,0 101 + blr 102 + 103 + /* 104 + * CLOCK_MONOTONIC 105 + */ 106 + 107 + 50: bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */ 108 + 109 + lis r7,0x3b9a /* r7 = 1000000000 = NSEC_PER_SEC */ 110 + ori r7,r7,0xca00 111 + rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */ 112 + rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */ 113 + subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */ 114 + mulld r0,r0,r7 /* nsec = (xsec * NSEC_PER_SEC) / 115 + * XSEC_PER_SEC 116 + */ 117 + rldicl r6,r0,44,20 118 + 119 + /* now we must fixup using wall to monotonic. We need to snapshot 120 + * that value and do the counter trick again. Fortunately, we still 121 + * have the counter value in r8 that was returned by __do_get_xsec. 122 + * At this point, r5,r6 contain our sec/nsec values. 123 + * can be used 124 + */ 125 + 126 + lwz r4,WTOM_CLOCK_SEC(r9) 127 + lwz r7,WTOM_CLOCK_NSEC(r9) 128 + 129 + /* We now have our result in r4,r7. We create a fake dependency 130 + * on that result and re-check the counter 131 + */ 132 + or r9,r4,r7 133 + xor r0,r9,r9 134 + add r3,r3,r0 135 + ld r0,CFG_TB_UPDATE_COUNT(r3) 136 + cmpld cr0,r0,r8 /* check if updated */ 137 + bne- 50b 138 + 139 + /* Calculate and store result. Note that this mimmics the C code, 140 + * which may cause funny results if nsec goes negative... is that 141 + * possible at all ? 142 + */ 143 + add r4,r4,r5 144 + add r7,r7,r6 145 + lis r9,NSEC_PER_SEC@h 146 + ori r9,r9,NSEC_PER_SEC@l 147 + cmpli cr0,r7,r9 148 + blt 1f 149 + subf r7,r9,r7 150 + addi r4,r4,1 151 + 1: std r4,TSPC64_TV_SEC(r11) 152 + std r7,TSPC64_TV_NSEC(r11) 153 + 154 + mtlr r12 155 + li r3,0 156 + blr 157 + 158 + /* 159 + * syscall fallback 160 + */ 161 + 98: 162 + mtlr r12 163 + mr r3,r10 164 + mr r4,r11 165 + 99: 166 + li r0,__NR_clock_gettime 167 + sc 168 + blr 169 + .cfi_endproc 170 + V_FUNCTION_END(__kernel_clock_gettime) 171 + 172 + 173 + /* 174 + * Exact prototype of clock_getres() 175 + * 176 + * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); 177 + * 178 + */ 179 + V_FUNCTION_BEGIN(__kernel_clock_getres) 180 + .cfi_startproc 181 + /* Check for supported clock IDs */ 182 + cmpwi cr0,r3,CLOCK_REALTIME 183 + cmpwi cr1,r3,CLOCK_MONOTONIC 184 + cror cr0,cr0,cr1 185 + bne cr0,99f 186 + 187 + li r3,0 188 + cmpli cr0,r4,0 189 + beqlr 190 + lis r5,CLOCK_REALTIME_RES@h 191 + ori r5,r5,CLOCK_REALTIME_RES@l 192 + std r3,TSPC64_TV_SEC(r4) 193 + std r5,TSPC64_TV_NSEC(r4) 194 + blr 195 + 196 + /* 197 + * syscall fallback 198 + */ 199 + 99: 200 + li r0,__NR_clock_getres 201 + sc 202 + blr 203 + .cfi_endproc 204 + V_FUNCTION_END(__kernel_clock_getres) 205 + 206 + 207 + /* 208 + * This is the core of gettimeofday(), it returns the xsec 209 + * value in r4 and expects the datapage ptr (non clobbered) 210 + * in r3. clobbers r0,r4,r5,r6,r7,r8 211 + * When returning, r8 contains the counter value that can be reused 212 + */ 213 + V_FUNCTION_BEGIN(__do_get_xsec) 214 + .cfi_startproc 215 + /* check for update count & load values */ 216 + 1: ld r8,CFG_TB_UPDATE_COUNT(r3) 217 + andi. r0,r4,1 /* pending update ? loop */ 218 + bne- 1b 219 + xor r0,r4,r4 /* create dependency */ 220 + add r3,r3,r0 221 + 222 + /* Get TB & offset it */ 223 + mftb r7 224 + ld r9,CFG_TB_ORIG_STAMP(r3) 225 + subf r7,r9,r7 226 + 227 + /* Scale result */ 228 + ld r5,CFG_TB_TO_XS(r3) 229 + mulhdu r7,r7,r5 230 + 231 + /* Add stamp since epoch */ 232 + ld r6,CFG_STAMP_XSEC(r3) 233 + add r4,r6,r7 234 + 235 + xor r0,r4,r4 236 + add r3,r3,r0 237 + ld r0,CFG_TB_UPDATE_COUNT(r3) 238 + cmpld cr0,r0,r8 /* check if updated */ 239 + bne- 1b 240 + blr 241 + .cfi_endproc 242 + V_FUNCTION_END(__do_get_xsec)
-4
arch/powerpc/mm/mem.c
··· 46 46 #include <asm/prom.h> 47 47 #include <asm/lmb.h> 48 48 #include <asm/sections.h> 49 - #ifdef CONFIG_PPC64 50 49 #include <asm/vdso.h> 51 - #endif 52 50 53 51 #include "mmu_decl.h" 54 52 ··· 395 397 396 398 mem_init_done = 1; 397 399 398 - #ifdef CONFIG_PPC64 399 400 /* Initialize the vDSO */ 400 401 vdso_init(); 401 - #endif 402 402 } 403 403 404 404 /*
-1
arch/powerpc/oprofile/op_model_power4.c
··· 14 14 #include <asm/system.h> 15 15 #include <asm/processor.h> 16 16 #include <asm/cputable.h> 17 - #include <asm/systemcfg.h> 18 17 #include <asm/rtas.h> 19 18 #include <asm/oprofile_impl.h> 20 19 #include <asm/reg.h>
+1 -2
arch/ppc64/kernel/Makefile
··· 14 14 obj-y += idle.o dma.o \ 15 15 align.o \ 16 16 rtc.o \ 17 - iommu.o vdso.o 18 - obj-y += vdso32/ vdso64/ 17 + iommu.o 19 18 20 19 pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o 21 20
+4
arch/ppc64/kernel/vdso32/Makefile arch/powerpc/kernel/vdso32/Makefile
··· 5 5 6 6 # Build rules 7 7 8 + ifeq ($(CONFIG_PPC32),y) 9 + CROSS32CC := $(CC) 10 + endif 11 + 8 12 targets := $(obj-vdso32) vdso32.so 9 13 obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) 10 14
arch/ppc64/kernel/vdso32/cacheflush.S arch/powerpc/kernel/vdso32/cacheflush.S
+16
arch/ppc64/kernel/vdso32/datapage.S arch/powerpc/kernel/vdso32/datapage.S
··· 66 66 blr 67 67 .cfi_endproc 68 68 V_FUNCTION_END(__kernel_get_syscall_map) 69 + 70 + /* 71 + * void unsigned long long __kernel_get_tbfreq(void); 72 + * 73 + * returns the timebase frequency in HZ 74 + */ 75 + V_FUNCTION_BEGIN(__kernel_get_tbfreq) 76 + .cfi_startproc 77 + mflr r12 78 + .cfi_register lr,r12 79 + bl __get_datapage@local 80 + lwz r3,CFG_TB_TICKS_PER_SEC(r3) 81 + lwz r4,(CFG_TB_TICKS_PER_SEC + 4)(r3) 82 + mtlr r12 83 + .cfi_endproc 84 + V_FUNCTION_END(__kernel_get_tbfreq)
-140
arch/ppc64/kernel/vdso32/gettimeofday.S
··· 1 - /* 2 - * Userland implementation of gettimeofday() for 32 bits processes in a 3 - * ppc64 kernel for use in the vDSO 4 - * 5 - * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), IBM Corp. 6 - * 7 - * This program is free software; you can redistribute it and/or 8 - * modify it under the terms of the GNU General Public License 9 - * as published by the Free Software Foundation; either version 10 - * 2 of the License, or (at your option) any later version. 11 - */ 12 - #include <linux/config.h> 13 - #include <asm/processor.h> 14 - #include <asm/ppc_asm.h> 15 - #include <asm/vdso.h> 16 - #include <asm/asm-offsets.h> 17 - #include <asm/unistd.h> 18 - 19 - .text 20 - /* 21 - * Exact prototype of gettimeofday 22 - * 23 - * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); 24 - * 25 - */ 26 - V_FUNCTION_BEGIN(__kernel_gettimeofday) 27 - .cfi_startproc 28 - mflr r12 29 - .cfi_register lr,r12 30 - 31 - mr r10,r3 /* r10 saves tv */ 32 - mr r11,r4 /* r11 saves tz */ 33 - bl __get_datapage@local /* get data page */ 34 - mr r9, r3 /* datapage ptr in r9 */ 35 - bl __do_get_xsec@local /* get xsec from tb & kernel */ 36 - bne- 2f /* out of line -> do syscall */ 37 - 38 - /* seconds are xsec >> 20 */ 39 - rlwinm r5,r4,12,20,31 40 - rlwimi r5,r3,12,0,19 41 - stw r5,TVAL32_TV_SEC(r10) 42 - 43 - /* get remaining xsec and convert to usec. we scale 44 - * up remaining xsec by 12 bits and get the top 32 bits 45 - * of the multiplication 46 - */ 47 - rlwinm r5,r4,12,0,19 48 - lis r6,1000000@h 49 - ori r6,r6,1000000@l 50 - mulhwu r5,r5,r6 51 - stw r5,TVAL32_TV_USEC(r10) 52 - 53 - cmpli cr0,r11,0 /* check if tz is NULL */ 54 - beq 1f 55 - lwz r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */ 56 - lwz r5,CFG_TZ_DSTTIME(r9) 57 - stw r4,TZONE_TZ_MINWEST(r11) 58 - stw r5,TZONE_TZ_DSTTIME(r11) 59 - 60 - 1: mtlr r12 61 - li r3,0 62 - blr 63 - 64 - 2: mr r3,r10 65 - mr r4,r11 66 - li r0,__NR_gettimeofday 67 - sc 68 - b 1b 69 - .cfi_endproc 70 - V_FUNCTION_END(__kernel_gettimeofday) 71 - 72 - /* 73 - * This is the core of gettimeofday(), it returns the xsec 74 - * value in r3 & r4 and expects the datapage ptr (non clobbered) 75 - * in r9. clobbers r0,r4,r5,r6,r7,r8 76 - */ 77 - __do_get_xsec: 78 - .cfi_startproc 79 - /* Check for update count & load values. We use the low 80 - * order 32 bits of the update count 81 - */ 82 - 1: lwz r8,(CFG_TB_UPDATE_COUNT+4)(r9) 83 - andi. r0,r8,1 /* pending update ? loop */ 84 - bne- 1b 85 - xor r0,r8,r8 /* create dependency */ 86 - add r9,r9,r0 87 - 88 - /* Load orig stamp (offset to TB) */ 89 - lwz r5,CFG_TB_ORIG_STAMP(r9) 90 - lwz r6,(CFG_TB_ORIG_STAMP+4)(r9) 91 - 92 - /* Get a stable TB value */ 93 - 2: mftbu r3 94 - mftbl r4 95 - mftbu r0 96 - cmpl cr0,r3,r0 97 - bne- 2b 98 - 99 - /* Substract tb orig stamp. If the high part is non-zero, we jump to the 100 - * slow path which call the syscall. If it's ok, then we have our 32 bits 101 - * tb_ticks value in r7 102 - */ 103 - subfc r7,r6,r4 104 - subfe. r0,r5,r3 105 - bne- 3f 106 - 107 - /* Load scale factor & do multiplication */ 108 - lwz r5,CFG_TB_TO_XS(r9) /* load values */ 109 - lwz r6,(CFG_TB_TO_XS+4)(r9) 110 - mulhwu r4,r7,r5 111 - mulhwu r6,r7,r6 112 - mullw r0,r7,r5 113 - addc r6,r6,r0 114 - 115 - /* At this point, we have the scaled xsec value in r4 + XER:CA 116 - * we load & add the stamp since epoch 117 - */ 118 - lwz r5,CFG_STAMP_XSEC(r9) 119 - lwz r6,(CFG_STAMP_XSEC+4)(r9) 120 - adde r4,r4,r6 121 - addze r3,r5 122 - 123 - /* We now have our result in r3,r4. We create a fake dependency 124 - * on that result and re-check the counter 125 - */ 126 - xor r0,r4,r4 127 - add r9,r9,r0 128 - lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9) 129 - cmpl cr0,r8,r0 /* check if updated */ 130 - bne- 1b 131 - 132 - /* Warning ! The caller expects CR:EQ to be set to indicate a 133 - * successful calculation (so it won't fallback to the syscall 134 - * method). We have overriden that CR bit in the counter check, 135 - * but fortunately, the loop exit condition _is_ CR:EQ set, so 136 - * we can exit safely here. If you change this code, be careful 137 - * of that side effect. 138 - */ 139 - 3: blr 140 - .cfi_endproc
arch/ppc64/kernel/vdso32/note.S arch/powerpc/kernel/vdso32/note.S
arch/ppc64/kernel/vdso32/sigtramp.S arch/powerpc/kernel/vdso32/sigtramp.S
+4 -1
arch/ppc64/kernel/vdso32/vdso32.lds.S arch/powerpc/kernel/vdso32/vdso32.lds.S
··· 102 102 { 103 103 VDSO_VERSION_STRING { 104 104 global: 105 - __kernel_datapage_offset; /* Has to be there for the kernel to find it */ 105 + __kernel_datapage_offset; /* Has to be there for the kernel to find */ 106 106 __kernel_get_syscall_map; 107 107 __kernel_gettimeofday; 108 + __kernel_clock_gettime; 109 + __kernel_clock_getres; 110 + __kernel_get_tbfreq; 108 111 __kernel_sync_dicache; 109 112 __kernel_sync_dicache_p5; 110 113 __kernel_sigtramp32;
+4 -4
arch/ppc64/kernel/vdso32/vdso32_wrapper.S arch/powerpc/kernel/vdso64/vdso64_wrapper.S
··· 3 3 4 4 .section ".data.page_aligned" 5 5 6 - .globl vdso32_start, vdso32_end 6 + .globl vdso64_start, vdso64_end 7 7 .balign PAGE_SIZE 8 - vdso32_start: 9 - .incbin "arch/ppc64/kernel/vdso32/vdso32.so" 8 + vdso64_start: 9 + .incbin "arch/powerpc/kernel/vdso64/vdso64.so" 10 10 .balign PAGE_SIZE 11 - vdso32_end: 11 + vdso64_end: 12 12 13 13 .previous
arch/ppc64/kernel/vdso64/Makefile arch/powerpc/kernel/vdso64/Makefile
arch/ppc64/kernel/vdso64/cacheflush.S arch/powerpc/kernel/vdso64/cacheflush.S
+16
arch/ppc64/kernel/vdso64/datapage.S arch/powerpc/kernel/vdso64/datapage.S
··· 66 66 blr 67 67 .cfi_endproc 68 68 V_FUNCTION_END(__kernel_get_syscall_map) 69 + 70 + 71 + /* 72 + * void unsigned long __kernel_get_tbfreq(void); 73 + * 74 + * returns the timebase frequency in HZ 75 + */ 76 + V_FUNCTION_BEGIN(__kernel_get_tbfreq) 77 + .cfi_startproc 78 + mflr r12 79 + .cfi_register lr,r12 80 + bl V_LOCAL_FUNC(__get_datapage) 81 + ld r3,CFG_TB_TICKS_PER_SEC(r3) 82 + mtlr r12 83 + .cfi_endproc 84 + V_FUNCTION_END(__kernel_get_tbfreq)
-91
arch/ppc64/kernel/vdso64/gettimeofday.S
··· 1 - /* 2 - * Userland implementation of gettimeofday() for 64 bits processes in a 3 - * ppc64 kernel for use in the vDSO 4 - * 5 - * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), 6 - * IBM Corp. 7 - * 8 - * This program is free software; you can redistribute it and/or 9 - * modify it under the terms of the GNU General Public License 10 - * as published by the Free Software Foundation; either version 11 - * 2 of the License, or (at your option) any later version. 12 - */ 13 - #include <linux/config.h> 14 - #include <asm/processor.h> 15 - #include <asm/ppc_asm.h> 16 - #include <asm/vdso.h> 17 - #include <asm/asm-offsets.h> 18 - 19 - .text 20 - /* 21 - * Exact prototype of gettimeofday 22 - * 23 - * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); 24 - * 25 - */ 26 - V_FUNCTION_BEGIN(__kernel_gettimeofday) 27 - .cfi_startproc 28 - mflr r12 29 - .cfi_register lr,r12 30 - 31 - mr r11,r3 /* r11 holds tv */ 32 - mr r10,r4 /* r10 holds tz */ 33 - bl V_LOCAL_FUNC(__get_datapage) /* get data page */ 34 - bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */ 35 - lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */ 36 - ori r7,r7,16960 37 - rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */ 38 - rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */ 39 - std r5,TVAL64_TV_SEC(r11) /* store sec in tv */ 40 - subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */ 41 - mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) / XSEC_PER_SEC */ 42 - rldicl r0,r0,44,20 43 - cmpldi cr0,r10,0 /* check if tz is NULL */ 44 - std r0,TVAL64_TV_USEC(r11) /* store usec in tv */ 45 - beq 1f 46 - lwz r4,CFG_TZ_MINUTEWEST(r3)/* fill tz */ 47 - lwz r5,CFG_TZ_DSTTIME(r3) 48 - stw r4,TZONE_TZ_MINWEST(r10) 49 - stw r5,TZONE_TZ_DSTTIME(r10) 50 - 1: mtlr r12 51 - li r3,0 /* always success */ 52 - blr 53 - .cfi_endproc 54 - V_FUNCTION_END(__kernel_gettimeofday) 55 - 56 - 57 - /* 58 - * This is the core of gettimeofday(), it returns the xsec 59 - * value in r4 and expects the datapage ptr (non clobbered) 60 - * in r3. clobbers r0,r4,r5,r6,r7,r8 61 - */ 62 - V_FUNCTION_BEGIN(__do_get_xsec) 63 - .cfi_startproc 64 - /* check for update count & load values */ 65 - 1: ld r7,CFG_TB_UPDATE_COUNT(r3) 66 - andi. r0,r4,1 /* pending update ? loop */ 67 - bne- 1b 68 - xor r0,r4,r4 /* create dependency */ 69 - add r3,r3,r0 70 - 71 - /* Get TB & offset it */ 72 - mftb r8 73 - ld r9,CFG_TB_ORIG_STAMP(r3) 74 - subf r8,r9,r8 75 - 76 - /* Scale result */ 77 - ld r5,CFG_TB_TO_XS(r3) 78 - mulhdu r8,r8,r5 79 - 80 - /* Add stamp since epoch */ 81 - ld r6,CFG_STAMP_XSEC(r3) 82 - add r4,r6,r8 83 - 84 - xor r0,r4,r4 85 - add r3,r3,r0 86 - ld r0,CFG_TB_UPDATE_COUNT(r3) 87 - cmpld cr0,r0,r7 /* check if updated */ 88 - bne- 1b 89 - blr 90 - .cfi_endproc 91 - V_FUNCTION_END(__do_get_xsec)
arch/ppc64/kernel/vdso64/note.S arch/powerpc/kernel/vdso64/note.S
arch/ppc64/kernel/vdso64/sigtramp.S arch/powerpc/kernel/vdso64/sigtramp.S
+4 -1
arch/ppc64/kernel/vdso64/vdso64.lds.S arch/powerpc/kernel/vdso64/vdso64.lds.S
··· 102 102 { 103 103 VDSO_VERSION_STRING { 104 104 global: 105 - __kernel_datapage_offset; /* Has to be there for the kernel to find it */ 105 + __kernel_datapage_offset; /* Has to be there for the kernel to find */ 106 106 __kernel_get_syscall_map; 107 107 __kernel_gettimeofday; 108 + __kernel_clock_gettime; 109 + __kernel_clock_getres; 110 + __kernel_get_tbfreq; 108 111 __kernel_sync_dicache; 109 112 __kernel_sync_dicache_p5; 110 113 __kernel_sigtramp_rt64;
+4 -4
arch/ppc64/kernel/vdso64/vdso64_wrapper.S arch/powerpc/kernel/vdso32/vdso32_wrapper.S
··· 3 3 4 4 .section ".data.page_aligned" 5 5 6 - .globl vdso64_start, vdso64_end 6 + .globl vdso32_start, vdso32_end 7 7 .balign PAGE_SIZE 8 - vdso64_start: 9 - .incbin "arch/ppc64/kernel/vdso64/vdso64.so" 8 + vdso32_start: 9 + .incbin "arch/powerpc/kernel/vdso32/vdso32.so" 10 10 .balign PAGE_SIZE 11 - vdso64_end: 11 + vdso32_end: 12 12 13 13 .previous
-2
include/asm-powerpc/auxvec.h
··· 14 14 /* The vDSO location. We have to use the same value as x86 for glibc's 15 15 * sake :-) 16 16 */ 17 - #ifdef __powerpc64__ 18 17 #define AT_SYSINFO_EHDR 33 19 - #endif 20 18 21 19 #endif
+4 -6
include/asm-powerpc/elf.h
··· 269 269 extern int icache_bsize; 270 270 extern int ucache_bsize; 271 271 272 - #ifdef __powerpc64__ 272 + /* vDSO has arch_setup_additional_pages */ 273 + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 273 274 struct linux_binprm; 274 - #define ARCH_HAS_SETUP_ADDITIONAL_PAGES /* vDSO has arch_setup_additional_pages */ 275 - extern int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack); 275 + extern int arch_setup_additional_pages(struct linux_binprm *bprm, 276 + int executable_stack); 276 277 #define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b); 277 - #else 278 - #define VDSO_AUX_ENT(a,b) 279 - #endif /* __powerpc64__ */ 280 278 281 279 /* 282 280 * The requirements here are:
+1 -1
include/asm-powerpc/processor.h
··· 177 177 #ifdef CONFIG_PPC64 178 178 unsigned long start_tb; /* Start purr when proc switched in */ 179 179 unsigned long accum_tb; /* Total accumilated purr for process */ 180 - unsigned long vdso_base; /* base of the vDSO library */ 181 180 #endif 181 + unsigned long vdso_base; /* base of the vDSO library */ 182 182 unsigned long dabr; /* Data address breakpoint register */ 183 183 #ifdef CONFIG_ALTIVEC 184 184 /* Complete AltiVec register set */
-64
include/asm-powerpc/systemcfg.h
··· 1 - #ifndef _SYSTEMCFG_H 2 - #define _SYSTEMCFG_H 3 - 4 - /* 5 - * Copyright (C) 2002 Peter Bergner <bergner@vnet.ibm.com>, IBM 6 - * 7 - * This program is free software; you can redistribute it and/or 8 - * modify it under the terms of the GNU General Public License 9 - * as published by the Free Software Foundation; either version 10 - * 2 of the License, or (at your option) any later version. 11 - */ 12 - 13 - /* Change Activity: 14 - * 2002/09/30 : bergner : Created 15 - * End Change Activity 16 - */ 17 - 18 - /* 19 - * If the major version changes we are incompatible. 20 - * Minor version changes are a hint. 21 - */ 22 - #define SYSTEMCFG_MAJOR 1 23 - #define SYSTEMCFG_MINOR 1 24 - 25 - #ifndef __ASSEMBLY__ 26 - 27 - #include <linux/unistd.h> 28 - 29 - #define SYSCALL_MAP_SIZE ((__NR_syscalls + 31) / 32) 30 - 31 - struct systemcfg { 32 - __u8 eye_catcher[16]; /* Eyecatcher: SYSTEMCFG:PPC64 0x00 */ 33 - struct { /* Systemcfg version numbers */ 34 - __u32 major; /* Major number 0x10 */ 35 - __u32 minor; /* Minor number 0x14 */ 36 - } version; 37 - 38 - __u32 platform; /* Platform flags 0x18 */ 39 - __u32 processor; /* Processor type 0x1C */ 40 - __u64 processorCount; /* # of physical processors 0x20 */ 41 - __u64 physicalMemorySize; /* Size of real memory(B) 0x28 */ 42 - __u64 tb_orig_stamp; /* Timebase at boot 0x30 */ 43 - __u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */ 44 - __u64 tb_to_xs; /* Inverse of TB to 2^20 0x40 */ 45 - __u64 stamp_xsec; /* 0x48 */ 46 - __u64 tb_update_count; /* Timebase atomicity ctr 0x50 */ 47 - __u32 tz_minuteswest; /* Minutes west of Greenwich 0x58 */ 48 - __u32 tz_dsttime; /* Type of dst correction 0x5C */ 49 - /* next four are no longer used except to be exported to /proc */ 50 - __u32 dcache_size; /* L1 d-cache size 0x60 */ 51 - __u32 dcache_line_size; /* L1 d-cache line size 0x64 */ 52 - __u32 icache_size; /* L1 i-cache size 0x68 */ 53 - __u32 icache_line_size; /* L1 i-cache line size 0x6C */ 54 - __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of available syscalls 0x70 */ 55 - __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of available syscalls */ 56 - }; 57 - 58 - #ifdef __KERNEL__ 59 - extern struct systemcfg *_systemcfg; /* to be renamed */ 60 - #endif 61 - 62 - #endif /* __ASSEMBLY__ */ 63 - 64 - #endif /* _SYSTEMCFG_H */
+108
include/asm-powerpc/vdso_datapage.h
··· 1 + #ifndef _VDSO_DATAPAGE_H 2 + #define _VDSO_DATAPAGE_H 3 + 4 + /* 5 + * Copyright (C) 2002 Peter Bergner <bergner@vnet.ibm.com>, IBM 6 + * Copyright (C) 2005 Benjamin Herrenschmidy <benh@kernel.crashing.org>, 7 + * IBM Corp. 8 + * 9 + * This program is free software; you can redistribute it and/or 10 + * modify it under the terms of the GNU General Public License 11 + * as published by the Free Software Foundation; either version 12 + * 2 of the License, or (at your option) any later version. 13 + */ 14 + 15 + 16 + /* 17 + * Note about this structure: 18 + * 19 + * This structure was historically called systemcfg and exposed to 20 + * userland via /proc/ppc64/systemcfg. Unfortunately, this became an 21 + * ABI issue as some proprietary software started relying on being able 22 + * to mmap() it, thus we have to keep the base layout at least for a 23 + * few kernel versions. 24 + * 25 + * However, since ppc32 doesn't suffer from this backward handicap, 26 + * a simpler version of the data structure is used there with only the 27 + * fields actually used by the vDSO. 28 + * 29 + */ 30 + 31 + /* 32 + * If the major version changes we are incompatible. 33 + * Minor version changes are a hint. 34 + */ 35 + #define SYSTEMCFG_MAJOR 1 36 + #define SYSTEMCFG_MINOR 1 37 + 38 + #ifndef __ASSEMBLY__ 39 + 40 + #include <linux/unistd.h> 41 + 42 + #define SYSCALL_MAP_SIZE ((__NR_syscalls + 31) / 32) 43 + 44 + /* 45 + * So here is the ppc64 backward compatible version 46 + */ 47 + 48 + #ifdef CONFIG_PPC64 49 + 50 + struct vdso_data { 51 + __u8 eye_catcher[16]; /* Eyecatcher: SYSTEMCFG:PPC64 0x00 */ 52 + struct { /* Systemcfg version numbers */ 53 + __u32 major; /* Major number 0x10 */ 54 + __u32 minor; /* Minor number 0x14 */ 55 + } version; 56 + 57 + __u32 platform; /* Platform flags 0x18 */ 58 + __u32 processor; /* Processor type 0x1C */ 59 + __u64 processorCount; /* # of physical processors 0x20 */ 60 + __u64 physicalMemorySize; /* Size of real memory(B) 0x28 */ 61 + __u64 tb_orig_stamp; /* Timebase at boot 0x30 */ 62 + __u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */ 63 + __u64 tb_to_xs; /* Inverse of TB to 2^20 0x40 */ 64 + __u64 stamp_xsec; /* 0x48 */ 65 + __u64 tb_update_count; /* Timebase atomicity ctr 0x50 */ 66 + __u32 tz_minuteswest; /* Minutes west of Greenwich 0x58 */ 67 + __u32 tz_dsttime; /* Type of dst correction 0x5C */ 68 + __u32 dcache_size; /* L1 d-cache size 0x60 */ 69 + __u32 dcache_line_size; /* L1 d-cache line size 0x64 */ 70 + __u32 icache_size; /* L1 i-cache size 0x68 */ 71 + __u32 icache_line_size; /* L1 i-cache line size 0x6C */ 72 + 73 + /* those additional ones don't have to be located anywhere 74 + * special as they were not part of the original systemcfg 75 + */ 76 + __s64 wtom_clock_sec; /* Wall to monotonic clock */ 77 + __s32 wtom_clock_nsec; 78 + __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */ 79 + __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ 80 + }; 81 + 82 + #else /* CONFIG_PPC64 */ 83 + 84 + /* 85 + * And here is the simpler 32 bits version 86 + */ 87 + struct vdso_data { 88 + __u64 tb_orig_stamp; /* Timebase at boot 0x30 */ 89 + __u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */ 90 + __u64 tb_to_xs; /* Inverse of TB to 2^20 0x40 */ 91 + __u64 stamp_xsec; /* 0x48 */ 92 + __u32 tb_update_count; /* Timebase atomicity ctr 0x50 */ 93 + __u32 tz_minuteswest; /* Minutes west of Greenwich 0x58 */ 94 + __u32 tz_dsttime; /* Type of dst correction 0x5C */ 95 + __s32 wtom_clock_sec; /* Wall to monotonic clock */ 96 + __s32 wtom_clock_nsec; 97 + __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ 98 + }; 99 + 100 + #endif /* CONFIG_PPC64 */ 101 + 102 + #ifdef __KERNEL__ 103 + extern struct vdso_data *vdso_data; 104 + #endif 105 + 106 + #endif /* __ASSEMBLY__ */ 107 + 108 + #endif /* _SYSTEMCFG_H */
+7 -1
include/asm-ppc/page.h
··· 1 1 #ifndef _PPC_PAGE_H 2 2 #define _PPC_PAGE_H 3 3 4 + #include <linux/config.h> 5 + #include <asm/asm-compat.h> 6 + 4 7 /* PAGE_SHIFT determines the page size */ 5 8 #define PAGE_SHIFT 12 6 - #define PAGE_SIZE (1UL << PAGE_SHIFT) 9 + #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) 7 10 8 11 /* 9 12 * Subtle: this is an int (not an unsigned long) and so it ··· 171 168 172 169 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 173 170 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 171 + 172 + /* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */ 173 + #define __HAVE_ARCH_GATE_AREA 1 174 174 175 175 #endif /* __KERNEL__ */ 176 176 #endif /* _PPC_PAGE_H */
include/asm-ppc64/vdso.h include/asm-powerpc/vdso.h