Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86

* ssh://master.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (35 commits)
x86: Add HPET force support for MCP55 (nForce 5) chipsets
x86: Force enable HPET for CK804 (nForce 4) chipsets
x86: clean up setup.h and the boot code
x86: Save registers in saved_context during suspend and hibernation
x86: merge setup_32/64.h
x86: merge signal_32/64.h
x86: merge required-features.h
x86: merge sigcontext_32/64.h
x86: merge msr_32/64.h
x86: merge mttr_32/64.h
x86: merge statfs_32/64.h
x86: merge stat_32/64.h
x86: merge shmbuf_32/64.h
x86: merge ptrace_32/64.h
x86: merge msgbuf_32/64.h
x86: merge elf_32/64.h
x86: merge byteorder_32/64.h
x86: whitespace cleanup of mce_64.c
x86: consolidate the cpu/ related code usage
x86: prepare consolidation of cpu/ related code usage
...

+2398 -3059
+2 -1
Documentation/kernel-parameters.txt
··· 422 422 hpet= [X86-32,HPET] option to control HPET usage 423 423 Format: { enable (default) | disable | force } 424 424 disable: disable HPET and use PIT instead 425 - force: allow force enabled of undocumented chips (ICH4, VIA) 425 + force: allow force enabled of undocumented chips (ICH4, 426 + VIA, nVidia) 426 427 427 428 com20020= [HW,NET] ARCnet - COM20020 chipset 428 429 Format:
+2
arch/i386/Kconfig
··· 1270 1270 1271 1271 source "fs/Kconfig" 1272 1272 1273 + source "kernel/Kconfig.instrumentation" 1274 + 1273 1275 source "arch/i386/Kconfig.debug" 1274 1276 1275 1277 source "security/Kconfig"
+6
arch/i386/Makefile
··· 20 20 # Fill in SRCARCH 21 21 SRCARCH := x86 22 22 23 + # BITS is used as extension for files which are available in a 32 bit 24 + # and a 64 bit version to simplify shared Makefiles. 25 + # e.g.: obj-y += foo_$(BITS).o 26 + BITS := 32 27 + export BITS 28 + 23 29 HAS_BIARCH := $(call cc-option-yn, -m32) 24 30 ifeq ($(HAS_BIARCH),y) 25 31 AS := $(AS) --32
+1 -1
arch/x86/boot/boot.h
··· 23 23 #include <linux/types.h> 24 24 #include <linux/edd.h> 25 25 #include <asm/boot.h> 26 - #include <asm/bootparam.h> 26 + #include <asm/setup.h> 27 27 28 28 /* Useful macros */ 29 29 #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+2 -4
arch/x86/boot/compressed/relocs.c
··· 38 38 39 39 static int is_safe_abs_reloc(const char* sym_name) 40 40 { 41 - int i, array_size; 41 + int i; 42 42 43 - array_size = sizeof(safe_abs_relocs)/sizeof(char*); 44 - 45 - for(i = 0; i < array_size; i++) { 43 + for(i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) { 46 44 if (!strcmp(sym_name, safe_abs_relocs[i])) 47 45 /* Match found */ 48 46 return 1;
-2
arch/x86/boot/main.c
··· 26 26 * screws up the old-style command line protocol, adjust by 27 27 * filling in the new-style command line pointer instead. 28 28 */ 29 - #define OLD_CL_MAGIC 0xA33F 30 - #define OLD_CL_ADDRESS 0x20 31 29 32 30 static void copy_boot_params(void) 33 31 {
+15 -5
arch/x86/crypto/Makefile
··· 1 - ifeq ($(CONFIG_X86_32),y) 2 - include ${srctree}/arch/x86/crypto/Makefile_32 3 - else 4 - include ${srctree}/arch/x86/crypto/Makefile_64 5 - endif 1 + # 2 + # Arch-specific CryptoAPI modules. 3 + # 4 + 5 + obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o 6 + obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o 7 + 8 + obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o 9 + obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o 10 + 11 + aes-i586-y := aes-i586-asm_32.o aes_32.o 12 + twofish-i586-y := twofish-i586-asm_32.o twofish_32.o 13 + 14 + aes-x86_64-y := aes-x86_64-asm_64.o aes_64.o 15 + twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o
-12
arch/x86/crypto/Makefile_32
··· 1 - # 2 - # x86/crypto/Makefile 3 - # 4 - # Arch-specific CryptoAPI modules. 5 - # 6 - 7 - obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o 8 - obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o 9 - 10 - aes-i586-y := aes-i586-asm_32.o aes_32.o 11 - twofish-i586-y := twofish-i586-asm_32.o twofish_32.o 12 -
-12
arch/x86/crypto/Makefile_64
··· 1 - # 2 - # x86/crypto/Makefile 3 - # 4 - # Arch-specific CryptoAPI modules. 5 - # 6 - 7 - obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o 8 - obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o 9 - 10 - aes-x86_64-y := aes-x86_64-asm_64.o aes_64.o 11 - twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o 12 -
+1 -1
arch/x86/kernel/Makefile_32
··· 26 26 obj-$(CONFIG_X86_LOCAL_APIC) += apic_32.o nmi_32.o 27 27 obj-$(CONFIG_X86_IO_APIC) += io_apic_32.o 28 28 obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 29 - obj-$(CONFIG_KEXEC) += machine_kexec_32.o relocate_kernel_32.o crash_32.o 29 + obj-$(CONFIG_KEXEC) += machine_kexec_32.o relocate_kernel_32.o crash.o 30 30 obj-$(CONFIG_CRASH_DUMP) += crash_dump_32.o 31 31 obj-$(CONFIG_X86_NUMAQ) += numaq_32.o 32 32 obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o
+4 -15
arch/x86/kernel/Makefile_64
··· 9 9 x8664_ksyms_64.o i387_64.o syscall_64.o vsyscall_64.o \ 10 10 setup64.o bootflag.o e820_64.o reboot_64.o quirks.o i8237.o \ 11 11 pci-dma_64.o pci-nommu_64.o alternative.o hpet.o tsc_64.o bugs_64.o \ 12 - perfctr-watchdog.o i8253.o 12 + i8253.o 13 13 14 14 obj-$(CONFIG_STACKTRACE) += stacktrace.o 15 - obj-$(CONFIG_X86_MCE) += mce_64.o therm_throt.o 16 - obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o 17 - obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o 18 - obj-$(CONFIG_MTRR) += cpu/mtrr/ 19 - obj-$(CONFIG_ACPI) += acpi/ 15 + obj-y += cpu/ 16 + obj-y += acpi/ 20 17 obj-$(CONFIG_X86_MSR) += msr.o 21 18 obj-$(CONFIG_MICROCODE) += microcode.o 22 19 obj-$(CONFIG_X86_CPUID) += cpuid.o 23 20 obj-$(CONFIG_SMP) += smp_64.o smpboot_64.o trampoline_64.o tsc_sync.o 24 21 obj-y += apic_64.o nmi_64.o 25 22 obj-y += io_apic_64.o mpparse_64.o genapic_64.o genapic_flat_64.o 26 - obj-$(CONFIG_KEXEC) += machine_kexec_64.o relocate_kernel_64.o crash_64.o 23 + obj-$(CONFIG_KEXEC) += machine_kexec_64.o relocate_kernel_64.o crash.o 27 24 obj-$(CONFIG_CRASH_DUMP) += crash_dump_64.o 28 25 obj-$(CONFIG_PM) += suspend_64.o 29 26 obj-$(CONFIG_HIBERNATION) += suspend_asm_64.o 30 - obj-$(CONFIG_CPU_FREQ) += cpu/cpufreq/ 31 27 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 32 28 obj-$(CONFIG_IOMMU) += pci-gart_64.o aperture_64.o 33 29 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o ··· 38 42 obj-$(CONFIG_PCI) += early-quirks.o 39 43 40 44 obj-y += topology.o 41 - obj-y += intel_cacheinfo.o 42 - obj-y += addon_cpuid_features.o 43 45 obj-y += pcspeaker.o 44 46 45 47 CFLAGS_vsyscall_64.o := $(PROFILING) -g0 46 - 47 - therm_throt-y += cpu/mcheck/therm_throt.o 48 - intel_cacheinfo-y += cpu/intel_cacheinfo.o 49 - addon_cpuid_features-y += cpu/addon_cpuid_features.o 50 - perfctr-watchdog-y += cpu/perfctr-watchdog.o
+6 -4
arch/x86/kernel/acpi/Makefile
··· 1 - ifeq ($(CONFIG_X86_32),y) 2 - include ${srctree}/arch/x86/kernel/acpi/Makefile_32 3 - else 4 - include ${srctree}/arch/x86/kernel/acpi/Makefile_64 1 + obj-$(CONFIG_ACPI) += boot.o 2 + obj-$(CONFIG_ACPI_SLEEP) += sleep_$(BITS).o wakeup_$(BITS).o 3 + 4 + ifneq ($(CONFIG_ACPI_PROCESSOR),) 5 + obj-y += cstate.o processor.o 5 6 endif 7 +
-7
arch/x86/kernel/acpi/Makefile_32
··· 1 - obj-$(CONFIG_ACPI) += boot.o 2 - obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o 3 - 4 - ifneq ($(CONFIG_ACPI_PROCESSOR),) 5 - obj-y += cstate.o processor.o 6 - endif 7 -
-7
arch/x86/kernel/acpi/Makefile_64
··· 1 - obj-y := boot.o 2 - obj-$(CONFIG_ACPI_SLEEP) += sleep_64.o wakeup_64.o 3 - 4 - ifneq ($(CONFIG_ACPI_PROCESSOR),) 5 - obj-y += processor.o cstate.o 6 - endif 7 -
+53 -48
arch/x86/kernel/acpi/wakeup_64.S
··· 4 4 #include <asm/pgtable.h> 5 5 #include <asm/page.h> 6 6 #include <asm/msr.h> 7 + #include <asm/asm-offsets.h> 7 8 8 9 # Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2 9 10 # ··· 343 342 xorl %eax, %eax 344 343 call save_processor_state 345 344 346 - movq %rsp, saved_context_esp(%rip) 347 - movq %rax, saved_context_eax(%rip) 348 - movq %rbx, saved_context_ebx(%rip) 349 - movq %rcx, saved_context_ecx(%rip) 350 - movq %rdx, saved_context_edx(%rip) 351 - movq %rbp, saved_context_ebp(%rip) 352 - movq %rsi, saved_context_esi(%rip) 353 - movq %rdi, saved_context_edi(%rip) 354 - movq %r8, saved_context_r08(%rip) 355 - movq %r9, saved_context_r09(%rip) 356 - movq %r10, saved_context_r10(%rip) 357 - movq %r11, saved_context_r11(%rip) 358 - movq %r12, saved_context_r12(%rip) 359 - movq %r13, saved_context_r13(%rip) 360 - movq %r14, saved_context_r14(%rip) 361 - movq %r15, saved_context_r15(%rip) 362 - pushfq ; popq saved_context_eflags(%rip) 345 + movq $saved_context, %rax 346 + movq %rsp, pt_regs_rsp(%rax) 347 + movq %rbp, pt_regs_rbp(%rax) 348 + movq %rsi, pt_regs_rsi(%rax) 349 + movq %rdi, pt_regs_rdi(%rax) 350 + movq %rbx, pt_regs_rbx(%rax) 351 + movq %rcx, pt_regs_rcx(%rax) 352 + movq %rdx, pt_regs_rdx(%rax) 353 + movq %r8, pt_regs_r8(%rax) 354 + movq %r9, pt_regs_r9(%rax) 355 + movq %r10, pt_regs_r10(%rax) 356 + movq %r11, pt_regs_r11(%rax) 357 + movq %r12, pt_regs_r12(%rax) 358 + movq %r13, pt_regs_r13(%rax) 359 + movq %r14, pt_regs_r14(%rax) 360 + movq %r15, pt_regs_r15(%rax) 361 + pushfq 362 + popq pt_regs_eflags(%rax) 363 363 364 364 movq $.L97, saved_rip(%rip) 365 365 366 - movq %rsp,saved_rsp 367 - movq %rbp,saved_rbp 368 - movq %rbx,saved_rbx 369 - movq %rdi,saved_rdi 370 - movq %rsi,saved_rsi 366 + movq %rsp, saved_rsp 367 + movq %rbp, saved_rbp 368 + movq %rbx, saved_rbx 369 + movq %rdi, saved_rdi 370 + movq %rsi, saved_rsi 371 371 372 372 addq $8, %rsp 373 373 movl $3, %edi ··· 379 377 .L99: 380 378 .align 4 381 379 movl $24, %eax 382 - movw %ax, %ds 383 - movq saved_context+58(%rip), %rax 384 - movq %rax, %cr4 385 - movq saved_context+50(%rip), %rax 386 - movq %rax, %cr3 387 - movq saved_context+42(%rip), %rax 388 - movq %rax, %cr2 389 - movq saved_context+34(%rip), %rax 390 - movq %rax, %cr0 391 - pushq saved_context_eflags(%rip) ; popfq 392 - movq saved_context_esp(%rip), %rsp 393 - movq saved_context_ebp(%rip), %rbp 394 - movq saved_context_eax(%rip), %rax 395 - movq saved_context_ebx(%rip), %rbx 396 - movq saved_context_ecx(%rip), %rcx 397 - movq saved_context_edx(%rip), %rdx 398 - movq saved_context_esi(%rip), %rsi 399 - movq saved_context_edi(%rip), %rdi 400 - movq saved_context_r08(%rip), %r8 401 - movq saved_context_r09(%rip), %r9 402 - movq saved_context_r10(%rip), %r10 403 - movq saved_context_r11(%rip), %r11 404 - movq saved_context_r12(%rip), %r12 405 - movq saved_context_r13(%rip), %r13 406 - movq saved_context_r14(%rip), %r14 407 - movq saved_context_r15(%rip), %r15 380 + movw %ax, %ds 381 + 382 + /* We don't restore %rax, it must be 0 anyway */ 383 + movq $saved_context, %rax 384 + movq saved_context_cr4(%rax), %rbx 385 + movq %rbx, %cr4 386 + movq saved_context_cr3(%rax), %rbx 387 + movq %rbx, %cr3 388 + movq saved_context_cr2(%rax), %rbx 389 + movq %rbx, %cr2 390 + movq saved_context_cr0(%rax), %rbx 391 + movq %rbx, %cr0 392 + pushq pt_regs_eflags(%rax) 393 + popfq 394 + movq pt_regs_rsp(%rax), %rsp 395 + movq pt_regs_rbp(%rax), %rbp 396 + movq pt_regs_rsi(%rax), %rsi 397 + movq pt_regs_rdi(%rax), %rdi 398 + movq pt_regs_rbx(%rax), %rbx 399 + movq pt_regs_rcx(%rax), %rcx 400 + movq pt_regs_rdx(%rax), %rdx 401 + movq pt_regs_r8(%rax), %r8 402 + movq pt_regs_r9(%rax), %r9 403 + movq pt_regs_r10(%rax), %r10 404 + movq pt_regs_r11(%rax), %r11 405 + movq pt_regs_r12(%rax), %r12 406 + movq pt_regs_r13(%rax), %r13 407 + movq pt_regs_r14(%rax), %r14 408 + movq pt_regs_r15(%rax), %r15 408 409 409 410 xorl %eax, %eax 410 411 addq $8, %rsp
+14
arch/x86/kernel/apic_64.c
··· 287 287 apic_write(APIC_SPIV, value); 288 288 } 289 289 290 + void lapic_shutdown(void) 291 + { 292 + unsigned long flags; 293 + 294 + if (!cpu_has_apic) 295 + return; 296 + 297 + local_irq_save(flags); 298 + 299 + disable_local_APIC(); 300 + 301 + local_irq_restore(flags); 302 + } 303 + 290 304 /* 291 305 * This is to verify that we're looking at a real local APIC. 292 306 * Check these against your board if the CPUs aren't getting
+28
arch/x86/kernel/asm-offsets_64.c
··· 76 76 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); 77 77 DEFINE(pbe_next, offsetof(struct pbe, next)); 78 78 BLANK(); 79 + #define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry)) 80 + ENTRY(rbx); 81 + ENTRY(rbx); 82 + ENTRY(rcx); 83 + ENTRY(rdx); 84 + ENTRY(rsp); 85 + ENTRY(rbp); 86 + ENTRY(rsi); 87 + ENTRY(rdi); 88 + ENTRY(r8); 89 + ENTRY(r9); 90 + ENTRY(r10); 91 + ENTRY(r11); 92 + ENTRY(r12); 93 + ENTRY(r13); 94 + ENTRY(r14); 95 + ENTRY(r15); 96 + ENTRY(eflags); 97 + BLANK(); 98 + #undef ENTRY 99 + #define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry)) 100 + ENTRY(cr0); 101 + ENTRY(cr2); 102 + ENTRY(cr3); 103 + ENTRY(cr4); 104 + ENTRY(cr8); 105 + BLANK(); 106 + #undef ENTRY 79 107 DEFINE(TSS_ist, offsetof(struct tss_struct, ist)); 80 108 BLANK(); 81 109 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
+12 -12
arch/x86/kernel/cpu/Makefile
··· 2 2 # Makefile for x86-compatible CPU details and quirks 3 3 # 4 4 5 - obj-y := common.o proc.o bugs.o 5 + obj-y := intel_cacheinfo.o addon_cpuid_features.o 6 6 7 - obj-y += amd.o 8 - obj-y += cyrix.o 9 - obj-y += centaur.o 10 - obj-y += transmeta.o 11 - obj-y += intel.o intel_cacheinfo.o addon_cpuid_features.o 12 - obj-y += nexgen.o 13 - obj-y += umc.o 7 + obj-$(CONFIG_X86_32) += common.o proc.o bugs.o 8 + obj-$(CONFIG_X86_32) += amd.o 9 + obj-$(CONFIG_X86_32) += cyrix.o 10 + obj-$(CONFIG_X86_32) += centaur.o 11 + obj-$(CONFIG_X86_32) += transmeta.o 12 + obj-$(CONFIG_X86_32) += intel.o 13 + obj-$(CONFIG_X86_32) += nexgen.o 14 + obj-$(CONFIG_X86_32) += umc.o 14 15 15 - obj-$(CONFIG_X86_MCE) += mcheck/ 16 - 17 - obj-$(CONFIG_MTRR) += mtrr/ 18 - obj-$(CONFIG_CPU_FREQ) += cpufreq/ 16 + obj-$(CONFIG_X86_MCE) += mcheck/ 17 + obj-$(CONFIG_MTRR) += mtrr/ 18 + obj-$(CONFIG_CPU_FREQ) += cpufreq/ 19 19 20 20 obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
+6 -2
arch/x86/kernel/cpu/mcheck/Makefile
··· 1 - obj-y = mce.o k7.o p4.o p5.o p6.o winchip.o therm_throt.o 2 - obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o 1 + obj-y = mce_$(BITS).o therm_throt.o 2 + 3 + obj-$(CONFIG_X86_32) += k7.o p4.o p5.o p6.o winchip.o 4 + obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o 5 + obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o 6 + obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o
arch/x86/kernel/cpu/mcheck/mce.c arch/x86/kernel/cpu/mcheck/mce_32.c
-137
arch/x86/kernel/crash_32.c
··· 1 - /* 2 - * Architecture specific (i386) functions for kexec based crash dumps. 3 - * 4 - * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 5 - * 6 - * Copyright (C) IBM Corporation, 2004. All rights reserved. 7 - * 8 - */ 9 - 10 - #include <linux/init.h> 11 - #include <linux/types.h> 12 - #include <linux/kernel.h> 13 - #include <linux/smp.h> 14 - #include <linux/reboot.h> 15 - #include <linux/kexec.h> 16 - #include <linux/delay.h> 17 - #include <linux/elf.h> 18 - #include <linux/elfcore.h> 19 - 20 - #include <asm/processor.h> 21 - #include <asm/hardirq.h> 22 - #include <asm/nmi.h> 23 - #include <asm/hw_irq.h> 24 - #include <asm/apic.h> 25 - #include <linux/kdebug.h> 26 - #include <asm/smp.h> 27 - 28 - #include <mach_ipi.h> 29 - 30 - 31 - /* This keeps a track of which one is crashing cpu. */ 32 - static int crashing_cpu; 33 - 34 - #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 35 - static atomic_t waiting_for_crash_ipi; 36 - 37 - static int crash_nmi_callback(struct notifier_block *self, 38 - unsigned long val, void *data) 39 - { 40 - struct pt_regs *regs; 41 - struct pt_regs fixed_regs; 42 - int cpu; 43 - 44 - if (val != DIE_NMI_IPI) 45 - return NOTIFY_OK; 46 - 47 - regs = ((struct die_args *)data)->regs; 48 - cpu = raw_smp_processor_id(); 49 - 50 - /* Don't do anything if this handler is invoked on crashing cpu. 51 - * Otherwise, system will completely hang. Crashing cpu can get 52 - * an NMI if system was initially booted with nmi_watchdog parameter. 53 - */ 54 - if (cpu == crashing_cpu) 55 - return NOTIFY_STOP; 56 - local_irq_disable(); 57 - 58 - if (!user_mode_vm(regs)) { 59 - crash_fixup_ss_esp(&fixed_regs, regs); 60 - regs = &fixed_regs; 61 - } 62 - crash_save_cpu(regs, cpu); 63 - disable_local_APIC(); 64 - atomic_dec(&waiting_for_crash_ipi); 65 - /* Assume hlt works */ 66 - halt(); 67 - for (;;) 68 - cpu_relax(); 69 - 70 - return 1; 71 - } 72 - 73 - static void smp_send_nmi_allbutself(void) 74 - { 75 - cpumask_t mask = cpu_online_map; 76 - cpu_clear(safe_smp_processor_id(), mask); 77 - if (!cpus_empty(mask)) 78 - send_IPI_mask(mask, NMI_VECTOR); 79 - } 80 - 81 - static struct notifier_block crash_nmi_nb = { 82 - .notifier_call = crash_nmi_callback, 83 - }; 84 - 85 - static void nmi_shootdown_cpus(void) 86 - { 87 - unsigned long msecs; 88 - 89 - atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); 90 - /* Would it be better to replace the trap vector here? */ 91 - if (register_die_notifier(&crash_nmi_nb)) 92 - return; /* return what? */ 93 - /* Ensure the new callback function is set before sending 94 - * out the NMI 95 - */ 96 - wmb(); 97 - 98 - smp_send_nmi_allbutself(); 99 - 100 - msecs = 1000; /* Wait at most a second for the other cpus to stop */ 101 - while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { 102 - mdelay(1); 103 - msecs--; 104 - } 105 - 106 - /* Leave the nmi callback set */ 107 - disable_local_APIC(); 108 - } 109 - #else 110 - static void nmi_shootdown_cpus(void) 111 - { 112 - /* There are no cpus to shootdown */ 113 - } 114 - #endif 115 - 116 - void machine_crash_shutdown(struct pt_regs *regs) 117 - { 118 - /* This function is only called after the system 119 - * has panicked or is otherwise in a critical state. 120 - * The minimum amount of code to allow a kexec'd kernel 121 - * to run successfully needs to happen here. 122 - * 123 - * In practice this means shooting down the other cpus in 124 - * an SMP system. 125 - */ 126 - /* The kernel is broken so disable interrupts */ 127 - local_irq_disable(); 128 - 129 - /* Make a note of crashing cpu. Will be used in NMI callback.*/ 130 - crashing_cpu = safe_smp_processor_id(); 131 - nmi_shootdown_cpus(); 132 - lapic_shutdown(); 133 - #if defined(CONFIG_X86_IO_APIC) 134 - disable_IO_APIC(); 135 - #endif 136 - crash_save_cpu(regs, safe_smp_processor_id()); 137 - }
+38 -29
arch/x86/kernel/crash_64.c arch/x86/kernel/crash.c
··· 1 1 /* 2 - * Architecture specific (x86_64) functions for kexec based crash dumps. 2 + * Architecture specific (i386/x86_64) functions for kexec based crash dumps. 3 3 * 4 4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 5 5 * ··· 11 11 #include <linux/types.h> 12 12 #include <linux/kernel.h> 13 13 #include <linux/smp.h> 14 - #include <linux/irq.h> 15 14 #include <linux/reboot.h> 16 15 #include <linux/kexec.h> 17 16 #include <linux/delay.h> 18 17 #include <linux/elf.h> 19 18 #include <linux/elfcore.h> 20 - #include <linux/kdebug.h> 21 19 22 20 #include <asm/processor.h> 23 21 #include <asm/hardirq.h> 24 22 #include <asm/nmi.h> 25 23 #include <asm/hw_irq.h> 24 + #include <asm/apic.h> 25 + #include <linux/kdebug.h> 26 + #include <asm/smp.h> 27 + 28 + #ifdef X86_32 29 + #include <mach_ipi.h> 30 + #else 26 31 #include <asm/mach_apic.h> 32 + #endif 27 33 28 34 /* This keeps a track of which one is crashing cpu. */ 29 35 static int crashing_cpu; 30 36 31 - #ifdef CONFIG_SMP 37 + #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 32 38 static atomic_t waiting_for_crash_ipi; 33 39 34 40 static int crash_nmi_callback(struct notifier_block *self, 35 - unsigned long val, void *data) 41 + unsigned long val, void *data) 36 42 { 37 43 struct pt_regs *regs; 44 + #ifdef X86_32 45 + struct pt_regs fixed_regs; 46 + #endif 38 47 int cpu; 39 48 40 49 if (val != DIE_NMI_IPI) ··· 52 43 regs = ((struct die_args *)data)->regs; 53 44 cpu = raw_smp_processor_id(); 54 45 55 - /* 56 - * Don't do anything if this handler is invoked on crashing cpu. 46 + /* Don't do anything if this handler is invoked on crashing cpu. 57 47 * Otherwise, system will completely hang. Crashing cpu can get 58 48 * an NMI if system was initially booted with nmi_watchdog parameter. 59 49 */ ··· 60 52 return NOTIFY_STOP; 61 53 local_irq_disable(); 62 54 55 + #ifdef X86_32 56 + if (!user_mode_vm(regs)) { 57 + crash_fixup_ss_esp(&fixed_regs, regs); 58 + regs = &fixed_regs; 59 + } 60 + #endif 63 61 crash_save_cpu(regs, cpu); 64 62 disable_local_APIC(); 65 63 atomic_dec(&waiting_for_crash_ipi); 66 64 /* Assume hlt works */ 67 - for(;;) 68 - halt(); 65 + halt(); 66 + for (;;) 67 + cpu_relax(); 69 68 70 69 return 1; 71 70 } 72 71 73 72 static void smp_send_nmi_allbutself(void) 74 73 { 75 - send_IPI_allbutself(NMI_VECTOR); 74 + cpumask_t mask = cpu_online_map; 75 + cpu_clear(safe_smp_processor_id(), mask); 76 + if (!cpus_empty(mask)) 77 + send_IPI_mask(mask, NMI_VECTOR); 76 78 } 77 - 78 - /* 79 - * This code is a best effort heuristic to get the 80 - * other cpus to stop executing. So races with 81 - * cpu hotplug shouldn't matter. 82 - */ 83 79 84 80 static struct notifier_block crash_nmi_nb = { 85 81 .notifier_call = crash_nmi_callback, ··· 94 82 unsigned long msecs; 95 83 96 84 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); 85 + /* Would it be better to replace the trap vector here? */ 97 86 if (register_die_notifier(&crash_nmi_nb)) 98 - return; /* return what? */ 99 - 100 - /* 101 - * Ensure the new callback function is set before sending 87 + return; /* return what? */ 88 + /* Ensure the new callback function is set before sending 102 89 * out the NMI 103 90 */ 104 91 wmb(); ··· 109 98 mdelay(1); 110 99 msecs--; 111 100 } 101 + 112 102 /* Leave the nmi callback set */ 113 103 disable_local_APIC(); 114 104 } ··· 122 110 123 111 void machine_crash_shutdown(struct pt_regs *regs) 124 112 { 125 - /* 126 - * This function is only called after the system 113 + /* This function is only called after the system 127 114 * has panicked or is otherwise in a critical state. 128 115 * The minimum amount of code to allow a kexec'd kernel 129 116 * to run successfully needs to happen here. ··· 134 123 local_irq_disable(); 135 124 136 125 /* Make a note of crashing cpu. Will be used in NMI callback.*/ 137 - crashing_cpu = smp_processor_id(); 126 + crashing_cpu = safe_smp_processor_id(); 138 127 nmi_shootdown_cpus(); 139 - 140 - if(cpu_has_apic) 141 - disable_local_APIC(); 142 - 128 + lapic_shutdown(); 129 + #if defined(CONFIG_X86_IO_APIC) 143 130 disable_IO_APIC(); 144 - 145 - crash_save_cpu(regs, smp_processor_id()); 131 + #endif 132 + crash_save_cpu(regs, safe_smp_processor_id()); 146 133 }
+1 -6
arch/x86/kernel/head_32.S
··· 124 124 movsl 125 125 movl boot_params - __PAGE_OFFSET + NEW_CL_POINTER,%esi 126 126 andl %esi,%esi 127 - jnz 2f # New command line protocol 128 - cmpw $(OLD_CL_MAGIC),OLD_CL_MAGIC_ADDR 129 - jne 1f 130 - movzwl OLD_CL_OFFSET,%esi 131 - addl $(OLD_CL_BASE_ADDR),%esi 132 - 2: 127 + jz 1f # No comand line 133 128 movl $(boot_command_line - __PAGE_OFFSET),%edi 134 129 movl $(COMMAND_LINE_SIZE/4),%ecx 135 130 rep
+87 -79
arch/x86/kernel/mce_64.c arch/x86/kernel/cpu/mcheck/mce_64.c
··· 1 1 /* 2 2 * Machine check handler. 3 3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. 4 - * Rest from unknown author(s). 5 - * 2004 Andi Kleen. Rewrote most of it. 4 + * Rest from unknown author(s). 5 + * 2004 Andi Kleen. Rewrote most of it. 6 6 */ 7 7 8 8 #include <linux/init.h> ··· 23 23 #include <linux/ctype.h> 24 24 #include <linux/kmod.h> 25 25 #include <linux/kdebug.h> 26 - #include <asm/processor.h> 26 + #include <asm/processor.h> 27 27 #include <asm/msr.h> 28 28 #include <asm/mce.h> 29 29 #include <asm/uaccess.h> ··· 63 63 * separate MCEs from kernel messages to avoid bogus bug reports. 64 64 */ 65 65 66 - struct mce_log mcelog = { 66 + struct mce_log mcelog = { 67 67 MCE_LOG_SIGNATURE, 68 68 MCE_LOG_LEN, 69 - }; 69 + }; 70 70 71 71 void mce_log(struct mce *mce) 72 72 { ··· 111 111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n", 112 112 m->cpu, m->mcgstatus, m->bank, m->status); 113 113 if (m->rip) { 114 - printk(KERN_EMERG 115 - "RIP%s %02x:<%016Lx> ", 114 + printk(KERN_EMERG "RIP%s %02x:<%016Lx> ", 116 115 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", 117 116 m->cs, m->rip); 118 117 if (m->cs == __KERNEL_CS) 119 118 print_symbol("{%s}", m->rip); 120 119 printk("\n"); 121 120 } 122 - printk(KERN_EMERG "TSC %Lx ", m->tsc); 121 + printk(KERN_EMERG "TSC %Lx ", m->tsc); 123 122 if (m->addr) 124 123 printk("ADDR %Lx ", m->addr); 125 124 if (m->misc) 126 - printk("MISC %Lx ", m->misc); 125 + printk("MISC %Lx ", m->misc); 127 126 printk("\n"); 128 127 printk(KERN_EMERG "This is not a software problem!\n"); 129 - printk(KERN_EMERG 130 - "Run through mcelog --ascii to decode and contact your hardware vendor\n"); 128 + printk(KERN_EMERG "Run through mcelog --ascii to decode " 129 + "and contact your hardware vendor\n"); 131 130 } 132 131 133 132 static void mce_panic(char *msg, struct mce *backup, unsigned long start) 134 - { 133 + { 135 134 int i; 136 135 137 136 oops_begin(); 138 137 for (i = 0; i < MCE_LOG_LEN; i++) { 139 138 unsigned long tsc = mcelog.entry[i].tsc; 139 + 140 140 if (time_before(tsc, start)) 141 141 continue; 142 - print_mce(&mcelog.entry[i]); 142 + print_mce(&mcelog.entry[i]); 143 143 if (backup && mcelog.entry[i].tsc == backup->tsc) 144 144 backup = NULL; 145 145 } 146 146 if (backup) 147 147 print_mce(backup); 148 148 panic(msg); 149 - } 149 + } 150 150 151 151 static int mce_available(struct cpuinfo_x86 *c) 152 152 { ··· 170 170 } 171 171 } 172 172 173 - /* 173 + /* 174 174 * The actual machine check handler 175 175 */ 176 - 177 176 void do_machine_check(struct pt_regs * regs, long error_code) 178 177 { 179 178 struct mce m, panicm; ··· 193 194 atomic_inc(&mce_entry); 194 195 195 196 if (regs) 196 - notify_die(DIE_NMI, "machine check", regs, error_code, 18, SIGKILL); 197 + notify_die(DIE_NMI, "machine check", regs, error_code, 18, 198 + SIGKILL); 197 199 if (!banks) 198 200 goto out2; 199 201 ··· 204 204 /* if the restart IP is not valid, we're done for */ 205 205 if (!(m.mcgstatus & MCG_STATUS_RIPV)) 206 206 no_way_out = 1; 207 - 207 + 208 208 rdtscll(mcestart); 209 209 barrier(); 210 210 211 211 for (i = 0; i < banks; i++) { 212 212 if (!bank[i]) 213 213 continue; 214 - 215 - m.misc = 0; 214 + 215 + m.misc = 0; 216 216 m.addr = 0; 217 217 m.bank = i; 218 218 m.tsc = 0; ··· 372 372 if (mce_notify_user()) { 373 373 next_interval = max(next_interval/2, HZ/100); 374 374 } else { 375 - next_interval = min(next_interval*2, 375 + next_interval = min(next_interval * 2, 376 376 (int)round_jiffies_relative(check_interval*HZ)); 377 377 } 378 378 ··· 423 423 }; 424 424 425 425 static __init int periodic_mcheck_init(void) 426 - { 426 + { 427 427 next_interval = check_interval * HZ; 428 428 if (next_interval) 429 429 schedule_delayed_work(&mcheck_work, 430 430 round_jiffies_relative(next_interval)); 431 431 idle_notifier_register(&mce_idle_notifier); 432 432 return 0; 433 - } 433 + } 434 434 __initcall(periodic_mcheck_init); 435 435 436 436 437 - /* 437 + /* 438 438 * Initialize Machine Checks for a CPU. 439 439 */ 440 440 static void mce_init(void *dummy) ··· 444 444 445 445 rdmsrl(MSR_IA32_MCG_CAP, cap); 446 446 banks = cap & 0xff; 447 - if (banks > NR_BANKS) { 447 + if (banks > NR_BANKS) { 448 448 printk(KERN_INFO "MCE: warning: using only %d banks\n", banks); 449 - banks = NR_BANKS; 449 + banks = NR_BANKS; 450 450 } 451 451 /* Use accurate RIP reporting if available. */ 452 452 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) ··· 464 464 for (i = 0; i < banks; i++) { 465 465 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); 466 466 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); 467 - } 467 + } 468 468 } 469 469 470 470 /* Add per CPU specific workarounds here */ 471 471 static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) 472 - { 472 + { 473 473 /* This should be disabled by the BIOS, but isn't always */ 474 474 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) { 475 - /* disable GART TBL walk error reporting, which trips off 475 + /* disable GART TBL walk error reporting, which trips off 476 476 incorrectly with the IOMMU & 3ware & Cerberus. */ 477 477 clear_bit(10, &bank[4]); 478 478 /* Lots of broken BIOS around that don't clear them ··· 480 480 mce_bootlog = 0; 481 481 } 482 482 483 - } 483 + } 484 484 485 485 static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) 486 486 { ··· 496 496 } 497 497 } 498 498 499 - /* 499 + /* 500 500 * Called for each booted CPU to set up machine checks. 501 - * Must be called with preempt off. 501 + * Must be called with preempt off. 502 502 */ 503 503 void __cpuinit mcheck_init(struct cpuinfo_x86 *c) 504 504 { 505 505 static cpumask_t mce_cpus = CPU_MASK_NONE; 506 506 507 - mce_cpu_quirks(c); 507 + mce_cpu_quirks(c); 508 508 509 509 if (mce_dont_init || 510 510 cpu_test_and_set(smp_processor_id(), mce_cpus) || ··· 553 553 return 0; 554 554 } 555 555 556 - static void collect_tscs(void *data) 557 - { 556 + static void collect_tscs(void *data) 557 + { 558 558 unsigned long *cpu_tsc = (unsigned long *)data; 559 - rdtscll(cpu_tsc[smp_processor_id()]); 560 - } 561 559 562 - static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off) 560 + rdtscll(cpu_tsc[smp_processor_id()]); 561 + } 562 + 563 + static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, 564 + loff_t *off) 563 565 { 564 566 unsigned long *cpu_tsc; 565 567 static DECLARE_MUTEX(mce_read_sem); ··· 573 571 if (!cpu_tsc) 574 572 return -ENOMEM; 575 573 576 - down(&mce_read_sem); 574 + down(&mce_read_sem); 577 575 next = rcu_dereference(mcelog.next); 578 576 579 577 /* Only supports full reads right now */ 580 - if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { 578 + if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { 581 579 up(&mce_read_sem); 582 580 kfree(cpu_tsc); 583 581 return -EINVAL; 584 582 } 585 583 586 584 err = 0; 587 - for (i = 0; i < next; i++) { 585 + for (i = 0; i < next; i++) { 588 586 unsigned long start = jiffies; 587 + 589 588 while (!mcelog.entry[i].finished) { 590 589 if (time_after_eq(jiffies, start + 2)) { 591 590 memset(mcelog.entry + i,0, sizeof(struct mce)); ··· 596 593 } 597 594 smp_rmb(); 598 595 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce)); 599 - buf += sizeof(struct mce); 596 + buf += sizeof(struct mce); 600 597 timeout: 601 598 ; 602 - } 599 + } 603 600 604 601 memset(mcelog.entry, 0, next * sizeof(struct mce)); 605 602 mcelog.next = 0; 606 603 607 604 synchronize_sched(); 608 605 609 - /* Collect entries that were still getting written before the synchronize. */ 610 - 606 + /* 607 + * Collect entries that were still getting written before the 608 + * synchronize. 609 + */ 611 610 on_each_cpu(collect_tscs, cpu_tsc, 1, 1); 612 - for (i = next; i < MCE_LOG_LEN; i++) { 613 - if (mcelog.entry[i].finished && 614 - mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { 615 - err |= copy_to_user(buf, mcelog.entry+i, sizeof(struct mce)); 611 + for (i = next; i < MCE_LOG_LEN; i++) { 612 + if (mcelog.entry[i].finished && 613 + mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { 614 + err |= copy_to_user(buf, mcelog.entry+i, 615 + sizeof(struct mce)); 616 616 smp_rmb(); 617 617 buf += sizeof(struct mce); 618 618 memset(&mcelog.entry[i], 0, sizeof(struct mce)); 619 619 } 620 - } 620 + } 621 621 up(&mce_read_sem); 622 622 kfree(cpu_tsc); 623 - return err ? -EFAULT : buf - ubuf; 623 + return err ? -EFAULT : buf - ubuf; 624 624 } 625 625 626 626 static unsigned int mce_poll(struct file *file, poll_table *wait) ··· 634 628 return 0; 635 629 } 636 630 637 - static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg) 631 + static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, 632 + unsigned long arg) 638 633 { 639 634 int __user *p = (int __user *)arg; 635 + 640 636 if (!capable(CAP_SYS_ADMIN)) 641 - return -EPERM; 637 + return -EPERM; 642 638 switch (cmd) { 643 - case MCE_GET_RECORD_LEN: 639 + case MCE_GET_RECORD_LEN: 644 640 return put_user(sizeof(struct mce), p); 645 641 case MCE_GET_LOG_LEN: 646 - return put_user(MCE_LOG_LEN, p); 642 + return put_user(MCE_LOG_LEN, p); 647 643 case MCE_GETCLEAR_FLAGS: { 648 644 unsigned flags; 649 - do { 645 + 646 + do { 650 647 flags = mcelog.flags; 651 - } while (cmpxchg(&mcelog.flags, flags, 0) != flags); 652 - return put_user(flags, p); 648 + } while (cmpxchg(&mcelog.flags, flags, 0) != flags); 649 + return put_user(flags, p); 653 650 } 654 651 default: 655 - return -ENOTTY; 656 - } 652 + return -ENOTTY; 653 + } 657 654 } 658 655 659 656 static const struct file_operations mce_chrdev_ops = { ··· 687 678 set_in_cr4(X86_CR4_MCE); 688 679 } 689 680 690 - /* 691 - * Old style boot options parsing. Only for compatibility. 681 + /* 682 + * Old style boot options parsing. Only for compatibility. 692 683 */ 693 - 694 684 static int __init mcheck_disable(char *str) 695 685 { 696 686 mce_dont_init = 1; ··· 710 702 else if (isdigit(str[0])) 711 703 get_option(&str, &tolerant); 712 704 else 713 - printk("mce= argument %s ignored. Please use /sys", str); 705 + printk("mce= argument %s ignored. Please use /sys", str); 714 706 return 1; 715 707 } 716 708 717 709 __setup("nomce", mcheck_disable); 718 710 __setup("mce=", mcheck_enable); 719 711 720 - /* 712 + /* 721 713 * Sysfs support 722 - */ 714 + */ 723 715 724 716 /* On resume clear all MCE state. Don't want to see leftovers from the BIOS. 725 717 Only one CPU is active at this time, the others get readded later using ··· 731 723 } 732 724 733 725 /* Reinit MCEs after user configuration changes */ 734 - static void mce_restart(void) 735 - { 726 + static void mce_restart(void) 727 + { 736 728 if (next_interval) 737 729 cancel_delayed_work(&mcheck_work); 738 730 /* Timer race is harmless here */ 739 - on_each_cpu(mce_init, NULL, 1, 1); 731 + on_each_cpu(mce_init, NULL, 1, 1); 740 732 next_interval = check_interval * HZ; 741 733 if (next_interval) 742 734 schedule_delayed_work(&mcheck_work, ··· 752 744 753 745 /* Why are there no generic functions for this? */ 754 746 #define ACCESSOR(name, var, start) \ 755 - static ssize_t show_ ## name(struct sys_device *s, char *buf) { \ 756 - return sprintf(buf, "%lx\n", (unsigned long)var); \ 757 - } \ 747 + static ssize_t show_ ## name(struct sys_device *s, char *buf) { \ 748 + return sprintf(buf, "%lx\n", (unsigned long)var); \ 749 + } \ 758 750 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \ 759 - char *end; \ 760 - unsigned long new = simple_strtoul(buf, &end, 0); \ 761 - if (end == buf) return -EINVAL; \ 762 - var = new; \ 763 - start; \ 764 - return end-buf; \ 765 - } \ 751 + char *end; \ 752 + unsigned long new = simple_strtoul(buf, &end, 0); \ 753 + if (end == buf) return -EINVAL; \ 754 + var = new; \ 755 + start; \ 756 + return end-buf; \ 757 + } \ 766 758 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); 767 759 768 760 /* TBD should generate these dynamically based on number of available banks */
arch/x86/kernel/mce_amd_64.c arch/x86/kernel/cpu/mcheck/mce_amd_64.c
arch/x86/kernel/mce_intel_64.c arch/x86/kernel/cpu/mcheck/mce_intel_64.c
+54 -1
arch/x86/kernel/quirks.c
··· 60 60 NONE_FORCE_HPET_RESUME, 61 61 OLD_ICH_FORCE_HPET_RESUME, 62 62 ICH_FORCE_HPET_RESUME, 63 - VT8237_FORCE_HPET_RESUME 63 + VT8237_FORCE_HPET_RESUME, 64 + NVIDIA_FORCE_HPET_RESUME, 64 65 } force_hpet_resume_type; 65 66 66 67 static void __iomem *rcba_base; ··· 322 321 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, 323 322 vt8237_force_enable_hpet); 324 323 324 + /* 325 + * Undocumented chipset feature taken from LinuxBIOS. 326 + */ 327 + static void nvidia_force_hpet_resume(void) 328 + { 329 + pci_write_config_dword(cached_dev, 0x44, 0xfed00001); 330 + printk(KERN_DEBUG "Force enabled HPET at resume\n"); 331 + } 332 + 333 + static void nvidia_force_enable_hpet(struct pci_dev *dev) 334 + { 335 + u32 uninitialized_var(val); 336 + 337 + if (!hpet_force_user || hpet_address || force_hpet_address) 338 + return; 339 + 340 + pci_write_config_dword(dev, 0x44, 0xfed00001); 341 + pci_read_config_dword(dev, 0x44, &val); 342 + force_hpet_address = val & 0xfffffffe; 343 + force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME; 344 + printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", 345 + force_hpet_address); 346 + cached_dev = dev; 347 + return; 348 + } 349 + 350 + /* ISA Bridges */ 351 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050, 352 + nvidia_force_enable_hpet); 353 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051, 354 + nvidia_force_enable_hpet); 355 + 356 + /* LPC bridges */ 357 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360, 358 + nvidia_force_enable_hpet); 359 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361, 360 + nvidia_force_enable_hpet); 361 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362, 362 + nvidia_force_enable_hpet); 363 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363, 364 + nvidia_force_enable_hpet); 365 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364, 366 + nvidia_force_enable_hpet); 367 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365, 368 + nvidia_force_enable_hpet); 369 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366, 370 + nvidia_force_enable_hpet); 371 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367, 372 + nvidia_force_enable_hpet); 325 373 326 374 void force_hpet_resume(void) 327 375 { ··· 383 333 384 334 case VT8237_FORCE_HPET_RESUME: 385 335 return vt8237_force_hpet_resume(); 336 + 337 + case NVIDIA_FORCE_HPET_RESUME: 338 + return nvidia_force_hpet_resume(); 386 339 387 340 default: 388 341 break;
+1 -1
arch/x86/kernel/smpboot_64.c
··· 388 388 389 389 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); 390 390 391 - for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) { 391 + for (i = 0; i < ARRAY_SIZE(regs); i++) { 392 392 printk("... APIC #%d %s: ", apicid, names[i]); 393 393 394 394 /*
-6
arch/x86/kernel/suspend_64.c
··· 19 19 20 20 struct saved_context saved_context; 21 21 22 - unsigned long saved_context_eax, saved_context_ebx, saved_context_ecx, saved_context_edx; 23 - unsigned long saved_context_esp, saved_context_ebp, saved_context_esi, saved_context_edi; 24 - unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_context_r11; 25 - unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15; 26 - unsigned long saved_context_eflags; 27 - 28 22 void __save_processor_state(struct saved_context *ctxt) 29 23 { 30 24 kernel_fpu_begin();
+37 -35
arch/x86/kernel/suspend_asm_64.S
··· 17 17 #include <asm/asm-offsets.h> 18 18 19 19 ENTRY(swsusp_arch_suspend) 20 - 21 - movq %rsp, saved_context_esp(%rip) 22 - movq %rax, saved_context_eax(%rip) 23 - movq %rbx, saved_context_ebx(%rip) 24 - movq %rcx, saved_context_ecx(%rip) 25 - movq %rdx, saved_context_edx(%rip) 26 - movq %rbp, saved_context_ebp(%rip) 27 - movq %rsi, saved_context_esi(%rip) 28 - movq %rdi, saved_context_edi(%rip) 29 - movq %r8, saved_context_r08(%rip) 30 - movq %r9, saved_context_r09(%rip) 31 - movq %r10, saved_context_r10(%rip) 32 - movq %r11, saved_context_r11(%rip) 33 - movq %r12, saved_context_r12(%rip) 34 - movq %r13, saved_context_r13(%rip) 35 - movq %r14, saved_context_r14(%rip) 36 - movq %r15, saved_context_r15(%rip) 37 - pushfq ; popq saved_context_eflags(%rip) 20 + movq $saved_context, %rax 21 + movq %rsp, pt_regs_rsp(%rax) 22 + movq %rbp, pt_regs_rbp(%rax) 23 + movq %rsi, pt_regs_rsi(%rax) 24 + movq %rdi, pt_regs_rdi(%rax) 25 + movq %rbx, pt_regs_rbx(%rax) 26 + movq %rcx, pt_regs_rcx(%rax) 27 + movq %rdx, pt_regs_rdx(%rax) 28 + movq %r8, pt_regs_r8(%rax) 29 + movq %r9, pt_regs_r9(%rax) 30 + movq %r10, pt_regs_r10(%rax) 31 + movq %r11, pt_regs_r11(%rax) 32 + movq %r12, pt_regs_r12(%rax) 33 + movq %r13, pt_regs_r13(%rax) 34 + movq %r14, pt_regs_r14(%rax) 35 + movq %r15, pt_regs_r15(%rax) 36 + pushfq 37 + popq pt_regs_eflags(%rax) 38 38 39 39 /* save the address of restore_registers */ 40 40 movq $restore_registers, %rax ··· 113 113 movq %rcx, %cr3 114 114 movq %rax, %cr4; # turn PGE back on 115 115 116 - movq saved_context_esp(%rip), %rsp 117 - movq saved_context_ebp(%rip), %rbp 118 - /* restore GPRs (we don't restore %rax, it must be 0 anyway) */ 119 - movq saved_context_ebx(%rip), %rbx 120 - movq saved_context_ecx(%rip), %rcx 121 - movq saved_context_edx(%rip), %rdx 122 - movq saved_context_esi(%rip), %rsi 123 - movq saved_context_edi(%rip), %rdi 124 - movq saved_context_r08(%rip), %r8 125 - movq saved_context_r09(%rip), %r9 126 - movq saved_context_r10(%rip), %r10 127 - movq saved_context_r11(%rip), %r11 128 - movq saved_context_r12(%rip), %r12 129 - movq saved_context_r13(%rip), %r13 130 - movq saved_context_r14(%rip), %r14 131 - movq saved_context_r15(%rip), %r15 132 - pushq saved_context_eflags(%rip) ; popfq 116 + /* We don't restore %rax, it must be 0 anyway */ 117 + movq $saved_context, %rax 118 + movq pt_regs_rsp(%rax), %rsp 119 + movq pt_regs_rbp(%rax), %rbp 120 + movq pt_regs_rsi(%rax), %rsi 121 + movq pt_regs_rdi(%rax), %rdi 122 + movq pt_regs_rbx(%rax), %rbx 123 + movq pt_regs_rcx(%rax), %rcx 124 + movq pt_regs_rdx(%rax), %rdx 125 + movq pt_regs_r8(%rax), %r8 126 + movq pt_regs_r9(%rax), %r9 127 + movq pt_regs_r10(%rax), %r10 128 + movq pt_regs_r11(%rax), %r11 129 + movq pt_regs_r12(%rax), %r12 130 + movq pt_regs_r13(%rax), %r13 131 + movq pt_regs_r14(%rax), %r14 132 + movq pt_regs_r15(%rax), %r15 133 + pushq pt_regs_eflags(%rax) 134 + popfq 133 135 134 136 xorq %rax, %rax 135 137
+22 -17
arch/x86/kernel/tsc_32.c
··· 131 131 { 132 132 unsigned long long start, end; 133 133 unsigned long count; 134 - u64 delta64; 134 + u64 delta64 = (u64)ULLONG_MAX; 135 135 int i; 136 136 unsigned long flags; 137 137 138 138 local_irq_save(flags); 139 139 140 - /* run 3 times to ensure the cache is warm */ 140 + /* run 3 times to ensure the cache is warm and to get an accurate reading */ 141 141 for (i = 0; i < 3; i++) { 142 142 mach_prepare_counter(); 143 143 rdtscll(start); 144 144 mach_countup(&count); 145 145 rdtscll(end); 146 + 147 + /* 148 + * Error: ECTCNEVERSET 149 + * The CTC wasn't reliable: we got a hit on the very first read, 150 + * or the CPU was so fast/slow that the quotient wouldn't fit in 151 + * 32 bits.. 152 + */ 153 + if (count <= 1) 154 + continue; 155 + 156 + /* cpu freq too slow: */ 157 + if ((end - start) <= CALIBRATE_TIME_MSEC) 158 + continue; 159 + 160 + /* 161 + * We want the minimum time of all runs in case one of them 162 + * is inaccurate due to SMI or other delay 163 + */ 164 + delta64 = min(delta64, (end - start)); 146 165 } 147 - /* 148 - * Error: ECTCNEVERSET 149 - * The CTC wasn't reliable: we got a hit on the very first read, 150 - * or the CPU was so fast/slow that the quotient wouldn't fit in 151 - * 32 bits.. 152 - */ 153 - if (count <= 1) 154 - goto err; 155 166 156 - delta64 = end - start; 157 - 158 - /* cpu freq too fast: */ 167 + /* cpu freq too fast (or every run was bad): */ 159 168 if (delta64 > (1ULL<<32)) 160 - goto err; 161 - 162 - /* cpu freq too slow: */ 163 - if (delta64 <= CALIBRATE_TIME_MSEC) 164 169 goto err; 165 170 166 171 delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
-17
arch/x86/oprofile/Kconfig
··· 1 - config PROFILING 2 - bool "Profiling support (EXPERIMENTAL)" 3 - help 4 - Say Y here to enable the extended profiling support mechanisms used 5 - by profilers such as OProfile. 6 - 7 - 8 - config OPROFILE 9 - tristate "OProfile system profiling (EXPERIMENTAL)" 10 - depends on PROFILING 11 - help 12 - OProfile is a profiling system capable of profiling the 13 - whole system, include the kernel, kernel modules, libraries, 14 - and applications. 15 - 16 - If unsure, say N. 17 -
+2
arch/x86_64/Kconfig
··· 833 833 834 834 source fs/Kconfig 835 835 836 + source "kernel/Kconfig.instrumentation" 837 + 836 838 source "arch/x86_64/Kconfig.debug" 837 839 838 840 source "security/Kconfig"
+6
arch/x86_64/Makefile
··· 24 24 # Fill in SRCARCH 25 25 SRCARCH := x86 26 26 27 + # BITS is used as extension for files which are available in a 32 bit 28 + # and a 64 bit version to simplify shared Makefiles. 29 + # e.g.: obj-y += foo_$(BITS).o 30 + BITS := 64 31 + export BITS 32 + 27 33 LDFLAGS := -m elf_x86_64 28 34 OBJCOPYFLAGS := -O binary -R .note -R .comment -S 29 35 LDFLAGS_vmlinux :=
+1 -26
include/asm-x86/Kbuild
··· 11 11 header-y += ucontext.h 12 12 header-y += vsyscall32.h 13 13 14 - unifdef-y += a.out_32.h 15 - unifdef-y += a.out_64.h 16 - unifdef-y += byteorder_32.h 17 - unifdef-y += byteorder_64.h 18 14 unifdef-y += e820.h 19 - unifdef-y += elf_32.h 20 - unifdef-y += elf_64.h 21 15 unifdef-y += ist.h 22 16 unifdef-y += mce.h 23 - unifdef-y += msgbuf_32.h 24 - unifdef-y += msgbuf_64.h 25 - unifdef-y += msr_32.h 26 - unifdef-y += msr_64.h 27 17 unifdef-y += msr.h 28 - unifdef-y += mtrr_32.h 29 - unifdef-y += mtrr_64.h 30 18 unifdef-y += mtrr.h 31 19 unifdef-y += page_32.h 32 20 unifdef-y += page_64.h 33 21 unifdef-y += posix_types_32.h 34 22 unifdef-y += posix_types_64.h 35 - unifdef-y += ptrace_32.h 36 - unifdef-y += ptrace_64.h 37 - unifdef-y += setup_32.h 38 - unifdef-y += setup_64.h 39 - unifdef-y += shmbuf_32.h 40 - unifdef-y += shmbuf_64.h 41 - unifdef-y += sigcontext_32.h 42 - unifdef-y += sigcontext_64.h 43 - unifdef-y += signal_32.h 44 - unifdef-y += signal_64.h 45 - unifdef-y += stat_32.h 46 - unifdef-y += stat_64.h 47 - unifdef-y += statfs_32.h 48 - unifdef-y += statfs_64.h 23 + unifdef-y += ptrace.h 49 24 unifdef-y += unistd_32.h 50 25 unifdef-y += unistd_64.h 51 26 unifdef-y += user_32.h
+25 -8
include/asm-x86/a.out.h
··· 1 + #ifndef _ASM_X86_A_OUT_H 2 + #define _ASM_X86_A_OUT_H 3 + 4 + struct exec 5 + { 6 + unsigned int a_info; /* Use macros N_MAGIC, etc for access */ 7 + unsigned a_text; /* length of text, in bytes */ 8 + unsigned a_data; /* length of data, in bytes */ 9 + unsigned a_bss; /* length of uninitialized data area for file, in bytes */ 10 + unsigned a_syms; /* length of symbol table data in file, in bytes */ 11 + unsigned a_entry; /* start address */ 12 + unsigned a_trsize; /* length of relocation info for text, in bytes */ 13 + unsigned a_drsize; /* length of relocation info for data, in bytes */ 14 + }; 15 + 16 + #define N_TRSIZE(a) ((a).a_trsize) 17 + #define N_DRSIZE(a) ((a).a_drsize) 18 + #define N_SYMSIZE(a) ((a).a_syms) 19 + 1 20 #ifdef __KERNEL__ 21 + # include <linux/thread_info.h> 22 + # define STACK_TOP TASK_SIZE 2 23 # ifdef CONFIG_X86_32 3 - # include "a.out_32.h" 24 + # define STACK_TOP_MAX STACK_TOP 4 25 # else 5 - # include "a.out_64.h" 6 - # endif 7 - #else 8 - # ifdef __i386__ 9 - # include "a.out_32.h" 10 - # else 11 - # include "a.out_64.h" 26 + # define STACK_TOP_MAX TASK_SIZE64 12 27 # endif 13 28 #endif 29 + 30 + #endif /* _ASM_X86_A_OUT_H */
-27
include/asm-x86/a.out_32.h
··· 1 - #ifndef __I386_A_OUT_H__ 2 - #define __I386_A_OUT_H__ 3 - 4 - struct exec 5 - { 6 - unsigned long a_info; /* Use macros N_MAGIC, etc for access */ 7 - unsigned a_text; /* length of text, in bytes */ 8 - unsigned a_data; /* length of data, in bytes */ 9 - unsigned a_bss; /* length of uninitialized data area for file, in bytes */ 10 - unsigned a_syms; /* length of symbol table data in file, in bytes */ 11 - unsigned a_entry; /* start address */ 12 - unsigned a_trsize; /* length of relocation info for text, in bytes */ 13 - unsigned a_drsize; /* length of relocation info for data, in bytes */ 14 - }; 15 - 16 - #define N_TRSIZE(a) ((a).a_trsize) 17 - #define N_DRSIZE(a) ((a).a_drsize) 18 - #define N_SYMSIZE(a) ((a).a_syms) 19 - 20 - #ifdef __KERNEL__ 21 - 22 - #define STACK_TOP TASK_SIZE 23 - #define STACK_TOP_MAX STACK_TOP 24 - 25 - #endif 26 - 27 - #endif /* __A_OUT_GNU_H__ */
-28
include/asm-x86/a.out_64.h
··· 1 - #ifndef __X8664_A_OUT_H__ 2 - #define __X8664_A_OUT_H__ 3 - 4 - /* 32bit a.out */ 5 - 6 - struct exec 7 - { 8 - unsigned int a_info; /* Use macros N_MAGIC, etc for access */ 9 - unsigned a_text; /* length of text, in bytes */ 10 - unsigned a_data; /* length of data, in bytes */ 11 - unsigned a_bss; /* length of uninitialized data area for file, in bytes */ 12 - unsigned a_syms; /* length of symbol table data in file, in bytes */ 13 - unsigned a_entry; /* start address */ 14 - unsigned a_trsize; /* length of relocation info for text, in bytes */ 15 - unsigned a_drsize; /* length of relocation info for data, in bytes */ 16 - }; 17 - 18 - #define N_TRSIZE(a) ((a).a_trsize) 19 - #define N_DRSIZE(a) ((a).a_drsize) 20 - #define N_SYMSIZE(a) ((a).a_syms) 21 - 22 - #ifdef __KERNEL__ 23 - #include <linux/thread_info.h> 24 - #define STACK_TOP TASK_SIZE 25 - #define STACK_TOP_MAX TASK_SIZE64 26 - #endif 27 - 28 - #endif /* __A_OUT_GNU_H__ */
+1
include/asm-x86/apic_64.h
··· 69 69 extern void connect_bsp_APIC (void); 70 70 extern void disconnect_bsp_APIC (int virt_wire_setup); 71 71 extern void disable_local_APIC (void); 72 + extern void lapic_shutdown (void); 72 73 extern int verify_local_APIC (void); 73 74 extern void cache_APIC_registers (void); 74 75 extern void sync_Arb_IDs (void);
+42 -1
include/asm-x86/bitops_32.h
··· 80 80 :"Ir" (nr)); 81 81 } 82 82 83 + /* 84 + * clear_bit_unlock - Clears a bit in memory 85 + * @nr: Bit to clear 86 + * @addr: Address to start counting from 87 + * 88 + * clear_bit() is atomic and implies release semantics before the memory 89 + * operation. It can be used for an unlock. 90 + */ 91 + static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) 92 + { 93 + barrier(); 94 + clear_bit(nr, addr); 95 + } 96 + 83 97 static inline void __clear_bit(int nr, volatile unsigned long * addr) 84 98 { 85 99 __asm__ __volatile__( ··· 101 87 :"+m" (ADDR) 102 88 :"Ir" (nr)); 103 89 } 90 + 91 + /* 92 + * __clear_bit_unlock - Clears a bit in memory 93 + * @nr: Bit to clear 94 + * @addr: Address to start counting from 95 + * 96 + * __clear_bit() is non-atomic and implies release semantics before the memory 97 + * operation. It can be used for an unlock if no other CPUs can concurrently 98 + * modify other bits in the word. 99 + * 100 + * No memory barrier is required here, because x86 cannot reorder stores past 101 + * older loads. Same principle as spin_unlock. 102 + */ 103 + static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) 104 + { 105 + barrier(); 106 + __clear_bit(nr, addr); 107 + } 108 + 104 109 #define smp_mb__before_clear_bit() barrier() 105 110 #define smp_mb__after_clear_bit() barrier() 106 111 ··· 177 144 :"Ir" (nr) : "memory"); 178 145 return oldbit; 179 146 } 147 + 148 + /** 149 + * test_and_set_bit_lock - Set a bit and return its old value for lock 150 + * @nr: Bit to set 151 + * @addr: Address to count from 152 + * 153 + * This is the same as test_and_set_bit on x86 154 + */ 155 + #define test_and_set_bit_lock test_and_set_bit 180 156 181 157 /** 182 158 * __test_and_set_bit - Set a bit and return its old value ··· 448 406 } 449 407 450 408 #include <asm-generic/bitops/hweight.h> 451 - #include <asm-generic/bitops/lock.h> 452 409 453 410 #endif /* __KERNEL__ */ 454 411
+41 -1
include/asm-x86/bitops_64.h
··· 72 72 :"dIr" (nr)); 73 73 } 74 74 75 + /* 76 + * clear_bit_unlock - Clears a bit in memory 77 + * @nr: Bit to clear 78 + * @addr: Address to start counting from 79 + * 80 + * clear_bit() is atomic and implies release semantics before the memory 81 + * operation. It can be used for an unlock. 82 + */ 83 + static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) 84 + { 85 + barrier(); 86 + clear_bit(nr, addr); 87 + } 88 + 75 89 static __inline__ void __clear_bit(int nr, volatile void * addr) 76 90 { 77 91 __asm__ __volatile__( 78 92 "btrl %1,%0" 79 93 :ADDR 80 94 :"dIr" (nr)); 95 + } 96 + 97 + /* 98 + * __clear_bit_unlock - Clears a bit in memory 99 + * @nr: Bit to clear 100 + * @addr: Address to start counting from 101 + * 102 + * __clear_bit() is non-atomic and implies release semantics before the memory 103 + * operation. It can be used for an unlock if no other CPUs can concurrently 104 + * modify other bits in the word. 105 + * 106 + * No memory barrier is required here, because x86 cannot reorder stores past 107 + * older loads. Same principle as spin_unlock. 108 + */ 109 + static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) 110 + { 111 + barrier(); 112 + __clear_bit(nr, addr); 81 113 } 82 114 83 115 #define smp_mb__before_clear_bit() barrier() ··· 167 135 :"dIr" (nr) : "memory"); 168 136 return oldbit; 169 137 } 138 + 139 + /** 140 + * test_and_set_bit_lock - Set a bit and return its old value for lock 141 + * @nr: Bit to set 142 + * @addr: Address to count from 143 + * 144 + * This is the same as test_and_set_bit on x86 145 + */ 146 + #define test_and_set_bit_lock test_and_set_bit 170 147 171 148 /** 172 149 * __test_and_set_bit - Set a bit and return its old value ··· 453 412 #define ARCH_HAS_FAST_MULTIPLIER 1 454 413 455 414 #include <asm-generic/bitops/hweight.h> 456 - #include <asm-generic/bitops/lock.h> 457 415 458 416 #endif /* __KERNEL__ */ 459 417
+70 -11
include/asm-x86/byteorder.h
··· 1 - #ifdef __KERNEL__ 2 - # ifdef CONFIG_X86_32 3 - # include "byteorder_32.h" 4 - # else 5 - # include "byteorder_64.h" 6 - # endif 1 + #ifndef _ASM_X86_BYTEORDER_H 2 + #define _ASM_X86_BYTEORDER_H 3 + 4 + #include <asm/types.h> 5 + #include <linux/compiler.h> 6 + 7 + #ifdef __GNUC__ 8 + 9 + #ifdef __i386__ 10 + 11 + static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) 12 + { 13 + #ifdef CONFIG_X86_BSWAP 14 + __asm__("bswap %0" : "=r" (x) : "0" (x)); 7 15 #else 8 - # ifdef __i386__ 9 - # include "byteorder_32.h" 10 - # else 11 - # include "byteorder_64.h" 12 - # endif 16 + __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */ 17 + "rorl $16,%0\n\t" /* swap words */ 18 + "xchgb %b0,%h0" /* swap higher bytes */ 19 + :"=q" (x) 20 + : "0" (x)); 13 21 #endif 22 + return x; 23 + } 24 + 25 + static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val) 26 + { 27 + union { 28 + struct { __u32 a,b; } s; 29 + __u64 u; 30 + } v; 31 + v.u = val; 32 + #ifdef CONFIG_X86_BSWAP 33 + asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" 34 + : "=r" (v.s.a), "=r" (v.s.b) 35 + : "0" (v.s.a), "1" (v.s.b)); 36 + #else 37 + v.s.a = ___arch__swab32(v.s.a); 38 + v.s.b = ___arch__swab32(v.s.b); 39 + asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); 40 + #endif 41 + return v.u; 42 + } 43 + 44 + #else /* __i386__ */ 45 + 46 + static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) 47 + { 48 + __asm__("bswapq %0" : "=r" (x) : "0" (x)); 49 + return x; 50 + } 51 + 52 + static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) 53 + { 54 + __asm__("bswapl %0" : "=r" (x) : "0" (x)); 55 + return x; 56 + } 57 + 58 + #endif 59 + 60 + /* Do not define swab16. Gcc is smart enough to recognize "C" version and 61 + convert it into rotation or exhange. */ 62 + 63 + #define __arch__swab64(x) ___arch__swab64(x) 64 + #define __arch__swab32(x) ___arch__swab32(x) 65 + 66 + #define __BYTEORDER_HAS_U64__ 67 + 68 + #endif /* __GNUC__ */ 69 + 70 + #include <linux/byteorder/little_endian.h> 71 + 72 + #endif /* _ASM_X86_BYTEORDER_H */
-58
include/asm-x86/byteorder_32.h
··· 1 - #ifndef _I386_BYTEORDER_H 2 - #define _I386_BYTEORDER_H 3 - 4 - #include <asm/types.h> 5 - #include <linux/compiler.h> 6 - 7 - #ifdef __GNUC__ 8 - 9 - /* For avoiding bswap on i386 */ 10 - #ifdef __KERNEL__ 11 - #endif 12 - 13 - static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) 14 - { 15 - #ifdef CONFIG_X86_BSWAP 16 - __asm__("bswap %0" : "=r" (x) : "0" (x)); 17 - #else 18 - __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */ 19 - "rorl $16,%0\n\t" /* swap words */ 20 - "xchgb %b0,%h0" /* swap higher bytes */ 21 - :"=q" (x) 22 - : "0" (x)); 23 - #endif 24 - return x; 25 - } 26 - 27 - static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val) 28 - { 29 - union { 30 - struct { __u32 a,b; } s; 31 - __u64 u; 32 - } v; 33 - v.u = val; 34 - #ifdef CONFIG_X86_BSWAP 35 - asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" 36 - : "=r" (v.s.a), "=r" (v.s.b) 37 - : "0" (v.s.a), "1" (v.s.b)); 38 - #else 39 - v.s.a = ___arch__swab32(v.s.a); 40 - v.s.b = ___arch__swab32(v.s.b); 41 - asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); 42 - #endif 43 - return v.u; 44 - } 45 - 46 - /* Do not define swab16. Gcc is smart enough to recognize "C" version and 47 - convert it into rotation or exhange. */ 48 - 49 - #define __arch__swab64(x) ___arch__swab64(x) 50 - #define __arch__swab32(x) ___arch__swab32(x) 51 - 52 - #define __BYTEORDER_HAS_U64__ 53 - 54 - #endif /* __GNUC__ */ 55 - 56 - #include <linux/byteorder/little_endian.h> 57 - 58 - #endif /* _I386_BYTEORDER_H */
-33
include/asm-x86/byteorder_64.h
··· 1 - #ifndef _X86_64_BYTEORDER_H 2 - #define _X86_64_BYTEORDER_H 3 - 4 - #include <asm/types.h> 5 - #include <linux/compiler.h> 6 - 7 - #ifdef __GNUC__ 8 - 9 - static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) 10 - { 11 - __asm__("bswapq %0" : "=r" (x) : "0" (x)); 12 - return x; 13 - } 14 - 15 - static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) 16 - { 17 - __asm__("bswapl %0" : "=r" (x) : "0" (x)); 18 - return x; 19 - } 20 - 21 - /* Do not define swab16. Gcc is smart enough to recognize "C" version and 22 - convert it into rotation or exhange. */ 23 - 24 - #define __arch__swab32(x) ___arch__swab32(x) 25 - #define __arch__swab64(x) ___arch__swab64(x) 26 - 27 - #endif /* __GNUC__ */ 28 - 29 - #define __BYTEORDER_HAS_U64__ 30 - 31 - #include <linux/byteorder/little_endian.h> 32 - 33 - #endif /* _X86_64_BYTEORDER_H */
+57 -3
include/asm-x86/div64.h
··· 1 + #ifndef _ASM_X86_DIV64_H 2 + #define _ASM_X86_DIV64_H 3 + 1 4 #ifdef CONFIG_X86_32 2 - # include "div64_32.h" 5 + 6 + #include <linux/types.h> 7 + 8 + /* 9 + * do_div() is NOT a C function. It wants to return 10 + * two values (the quotient and the remainder), but 11 + * since that doesn't work very well in C, what it 12 + * does is: 13 + * 14 + * - modifies the 64-bit dividend _in_place_ 15 + * - returns the 32-bit remainder 16 + * 17 + * This ends up being the most efficient "calling 18 + * convention" on x86. 19 + */ 20 + #define do_div(n,base) ({ \ 21 + unsigned long __upper, __low, __high, __mod, __base; \ 22 + __base = (base); \ 23 + asm("":"=a" (__low), "=d" (__high):"A" (n)); \ 24 + __upper = __high; \ 25 + if (__high) { \ 26 + __upper = __high % (__base); \ 27 + __high = __high / (__base); \ 28 + } \ 29 + asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \ 30 + asm("":"=A" (n):"a" (__low),"d" (__high)); \ 31 + __mod; \ 32 + }) 33 + 34 + /* 35 + * (long)X = ((long long)divs) / (long)div 36 + * (long)rem = ((long long)divs) % (long)div 37 + * 38 + * Warning, this will do an exception if X overflows. 39 + */ 40 + #define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) 41 + 42 + static inline long 43 + div_ll_X_l_rem(long long divs, long div, long *rem) 44 + { 45 + long dum2; 46 + __asm__("divl %2":"=a"(dum2), "=d"(*rem) 47 + : "rm"(div), "A"(divs)); 48 + 49 + return dum2; 50 + 51 + } 52 + 53 + extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); 54 + 3 55 #else 4 - # include "div64_64.h" 5 - #endif 56 + # include <asm-generic/div64.h> 57 + #endif /* CONFIG_X86_32 */ 58 + 59 + #endif /* _ASM_X86_DIV64_H */
-52
include/asm-x86/div64_32.h
··· 1 - #ifndef __I386_DIV64 2 - #define __I386_DIV64 3 - 4 - #include <linux/types.h> 5 - 6 - /* 7 - * do_div() is NOT a C function. It wants to return 8 - * two values (the quotient and the remainder), but 9 - * since that doesn't work very well in C, what it 10 - * does is: 11 - * 12 - * - modifies the 64-bit dividend _in_place_ 13 - * - returns the 32-bit remainder 14 - * 15 - * This ends up being the most efficient "calling 16 - * convention" on x86. 17 - */ 18 - #define do_div(n,base) ({ \ 19 - unsigned long __upper, __low, __high, __mod, __base; \ 20 - __base = (base); \ 21 - asm("":"=a" (__low), "=d" (__high):"A" (n)); \ 22 - __upper = __high; \ 23 - if (__high) { \ 24 - __upper = __high % (__base); \ 25 - __high = __high / (__base); \ 26 - } \ 27 - asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \ 28 - asm("":"=A" (n):"a" (__low),"d" (__high)); \ 29 - __mod; \ 30 - }) 31 - 32 - /* 33 - * (long)X = ((long long)divs) / (long)div 34 - * (long)rem = ((long long)divs) % (long)div 35 - * 36 - * Warning, this will do an exception if X overflows. 37 - */ 38 - #define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) 39 - 40 - static inline long 41 - div_ll_X_l_rem(long long divs, long div, long *rem) 42 - { 43 - long dum2; 44 - __asm__("divl %2":"=a"(dum2), "=d"(*rem) 45 - : "rm"(div), "A"(divs)); 46 - 47 - return dum2; 48 - 49 - } 50 - 51 - extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); 52 - #endif
-1
include/asm-x86/div64_64.h
··· 1 - #include <asm-generic/div64.h>
+288 -11
include/asm-x86/elf.h
··· 1 - #ifdef __KERNEL__ 2 - # ifdef CONFIG_X86_32 3 - # include "elf_32.h" 4 - # else 5 - # include "elf_64.h" 6 - # endif 1 + #ifndef _ASM_X86_ELF_H 2 + #define _ASM_X86_ELF_H 3 + 4 + /* 5 + * ELF register definitions.. 6 + */ 7 + 8 + #include <asm/ptrace.h> 9 + #include <asm/user.h> 10 + #include <asm/auxvec.h> 11 + 12 + typedef unsigned long elf_greg_t; 13 + 14 + #define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) 15 + typedef elf_greg_t elf_gregset_t[ELF_NGREG]; 16 + 17 + typedef struct user_i387_struct elf_fpregset_t; 18 + 19 + #ifdef __i386__ 20 + 21 + typedef struct user_fxsr_struct elf_fpxregset_t; 22 + 23 + #define R_386_NONE 0 24 + #define R_386_32 1 25 + #define R_386_PC32 2 26 + #define R_386_GOT32 3 27 + #define R_386_PLT32 4 28 + #define R_386_COPY 5 29 + #define R_386_GLOB_DAT 6 30 + #define R_386_JMP_SLOT 7 31 + #define R_386_RELATIVE 8 32 + #define R_386_GOTOFF 9 33 + #define R_386_GOTPC 10 34 + #define R_386_NUM 11 35 + 36 + /* 37 + * These are used to set parameters in the core dumps. 38 + */ 39 + #define ELF_CLASS ELFCLASS32 40 + #define ELF_DATA ELFDATA2LSB 41 + #define ELF_ARCH EM_386 42 + 7 43 #else 8 - # ifdef __i386__ 9 - # include "elf_32.h" 10 - # else 11 - # include "elf_64.h" 12 - # endif 44 + 45 + /* x86-64 relocation types */ 46 + #define R_X86_64_NONE 0 /* No reloc */ 47 + #define R_X86_64_64 1 /* Direct 64 bit */ 48 + #define R_X86_64_PC32 2 /* PC relative 32 bit signed */ 49 + #define R_X86_64_GOT32 3 /* 32 bit GOT entry */ 50 + #define R_X86_64_PLT32 4 /* 32 bit PLT address */ 51 + #define R_X86_64_COPY 5 /* Copy symbol at runtime */ 52 + #define R_X86_64_GLOB_DAT 6 /* Create GOT entry */ 53 + #define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ 54 + #define R_X86_64_RELATIVE 8 /* Adjust by program base */ 55 + #define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative 56 + offset to GOT */ 57 + #define R_X86_64_32 10 /* Direct 32 bit zero extended */ 58 + #define R_X86_64_32S 11 /* Direct 32 bit sign extended */ 59 + #define R_X86_64_16 12 /* Direct 16 bit zero extended */ 60 + #define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */ 61 + #define R_X86_64_8 14 /* Direct 8 bit sign extended */ 62 + #define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */ 63 + 64 + #define R_X86_64_NUM 16 65 + 66 + /* 67 + * These are used to set parameters in the core dumps. 68 + */ 69 + #define ELF_CLASS ELFCLASS64 70 + #define ELF_DATA ELFDATA2LSB 71 + #define ELF_ARCH EM_X86_64 72 + 73 + #endif 74 + 75 + #ifdef __KERNEL__ 76 + 77 + #ifdef CONFIG_X86_32 78 + #include <asm/processor.h> 79 + #include <asm/system.h> /* for savesegment */ 80 + #include <asm/desc.h> 81 + 82 + /* 83 + * This is used to ensure we don't load something for the wrong architecture. 84 + */ 85 + #define elf_check_arch(x) \ 86 + (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) 87 + 88 + /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx 89 + contains a pointer to a function which might be registered using `atexit'. 90 + This provides a mean for the dynamic linker to call DT_FINI functions for 91 + shared libraries that have been loaded before the code runs. 92 + 93 + A value of 0 tells we have no such handler. 94 + 95 + We might as well make sure everything else is cleared too (except for %esp), 96 + just to make things more deterministic. 97 + */ 98 + #define ELF_PLAT_INIT(_r, load_addr) do { \ 99 + _r->ebx = 0; _r->ecx = 0; _r->edx = 0; \ 100 + _r->esi = 0; _r->edi = 0; _r->ebp = 0; \ 101 + _r->eax = 0; \ 102 + } while (0) 103 + 104 + /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is 105 + now struct_user_regs, they are different) */ 106 + 107 + #define ELF_CORE_COPY_REGS(pr_reg, regs) \ 108 + pr_reg[0] = regs->ebx; \ 109 + pr_reg[1] = regs->ecx; \ 110 + pr_reg[2] = regs->edx; \ 111 + pr_reg[3] = regs->esi; \ 112 + pr_reg[4] = regs->edi; \ 113 + pr_reg[5] = regs->ebp; \ 114 + pr_reg[6] = regs->eax; \ 115 + pr_reg[7] = regs->xds & 0xffff; \ 116 + pr_reg[8] = regs->xes & 0xffff; \ 117 + pr_reg[9] = regs->xfs & 0xffff; \ 118 + savesegment(gs,pr_reg[10]); \ 119 + pr_reg[11] = regs->orig_eax; \ 120 + pr_reg[12] = regs->eip; \ 121 + pr_reg[13] = regs->xcs & 0xffff; \ 122 + pr_reg[14] = regs->eflags; \ 123 + pr_reg[15] = regs->esp; \ 124 + pr_reg[16] = regs->xss & 0xffff; 125 + 126 + #define ELF_PLATFORM (utsname()->machine) 127 + #define set_personality_64bit() do { } while (0) 128 + extern unsigned int vdso_enabled; 129 + 130 + #else /* CONFIG_X86_32 */ 131 + 132 + #include <asm/processor.h> 133 + 134 + /* 135 + * This is used to ensure we don't load something for the wrong architecture. 136 + */ 137 + #define elf_check_arch(x) \ 138 + ((x)->e_machine == EM_X86_64) 139 + 140 + #define ELF_PLAT_INIT(_r, load_addr) do { \ 141 + struct task_struct *cur = current; \ 142 + (_r)->rbx = 0; (_r)->rcx = 0; (_r)->rdx = 0; \ 143 + (_r)->rsi = 0; (_r)->rdi = 0; (_r)->rbp = 0; \ 144 + (_r)->rax = 0; \ 145 + (_r)->r8 = 0; \ 146 + (_r)->r9 = 0; \ 147 + (_r)->r10 = 0; \ 148 + (_r)->r11 = 0; \ 149 + (_r)->r12 = 0; \ 150 + (_r)->r13 = 0; \ 151 + (_r)->r14 = 0; \ 152 + (_r)->r15 = 0; \ 153 + cur->thread.fs = 0; cur->thread.gs = 0; \ 154 + cur->thread.fsindex = 0; cur->thread.gsindex = 0; \ 155 + cur->thread.ds = 0; cur->thread.es = 0; \ 156 + clear_thread_flag(TIF_IA32); \ 157 + } while (0) 158 + 159 + /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is 160 + now struct_user_regs, they are different). Assumes current is the process 161 + getting dumped. */ 162 + 163 + #define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ 164 + unsigned v; \ 165 + (pr_reg)[0] = (regs)->r15; \ 166 + (pr_reg)[1] = (regs)->r14; \ 167 + (pr_reg)[2] = (regs)->r13; \ 168 + (pr_reg)[3] = (regs)->r12; \ 169 + (pr_reg)[4] = (regs)->rbp; \ 170 + (pr_reg)[5] = (regs)->rbx; \ 171 + (pr_reg)[6] = (regs)->r11; \ 172 + (pr_reg)[7] = (regs)->r10; \ 173 + (pr_reg)[8] = (regs)->r9; \ 174 + (pr_reg)[9] = (regs)->r8; \ 175 + (pr_reg)[10] = (regs)->rax; \ 176 + (pr_reg)[11] = (regs)->rcx; \ 177 + (pr_reg)[12] = (regs)->rdx; \ 178 + (pr_reg)[13] = (regs)->rsi; \ 179 + (pr_reg)[14] = (regs)->rdi; \ 180 + (pr_reg)[15] = (regs)->orig_rax; \ 181 + (pr_reg)[16] = (regs)->rip; \ 182 + (pr_reg)[17] = (regs)->cs; \ 183 + (pr_reg)[18] = (regs)->eflags; \ 184 + (pr_reg)[19] = (regs)->rsp; \ 185 + (pr_reg)[20] = (regs)->ss; \ 186 + (pr_reg)[21] = current->thread.fs; \ 187 + (pr_reg)[22] = current->thread.gs; \ 188 + asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \ 189 + asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \ 190 + asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \ 191 + asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \ 192 + } while(0); 193 + 194 + /* I'm not sure if we can use '-' here */ 195 + #define ELF_PLATFORM ("x86_64") 196 + extern void set_personality_64bit(void); 197 + extern int vdso_enabled; 198 + 199 + #endif /* !CONFIG_X86_32 */ 200 + 201 + #define USE_ELF_CORE_DUMP 202 + #define ELF_EXEC_PAGESIZE 4096 203 + 204 + /* This is the location that an ET_DYN program is loaded if exec'ed. Typical 205 + use of this is to invoke "./ld.so someprog" to test out a new version of 206 + the loader. We need to make sure that it is out of the way of the program 207 + that it will "exec", and that there is sufficient room for the brk. */ 208 + 209 + #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) 210 + 211 + /* This yields a mask that user programs can use to figure out what 212 + instruction set this CPU supports. This could be done in user space, 213 + but it's not easy, and we've already done it here. */ 214 + 215 + #define ELF_HWCAP (boot_cpu_data.x86_capability[0]) 216 + 217 + /* This yields a string that ld.so will use to load implementation 218 + specific libraries for optimization. This is more specific in 219 + intent than poking at uname or /proc/cpuinfo. 220 + 221 + For the moment, we have only optimizations for the Intel generations, 222 + but that could change... */ 223 + 224 + #define SET_PERSONALITY(ex, ibcs2) set_personality_64bit() 225 + 226 + /* 227 + * An executable for which elf_read_implies_exec() returns TRUE will 228 + * have the READ_IMPLIES_EXEC personality flag set automatically. 229 + */ 230 + #define elf_read_implies_exec(ex, executable_stack) \ 231 + (executable_stack != EXSTACK_DISABLE_X) 232 + 233 + struct task_struct; 234 + 235 + extern int dump_task_regs (struct task_struct *, elf_gregset_t *); 236 + extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); 237 + 238 + #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) 239 + #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) 240 + 241 + #ifdef CONFIG_X86_32 242 + extern int dump_task_extended_fpu (struct task_struct *, 243 + struct user_fxsr_struct *); 244 + #define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) \ 245 + dump_task_extended_fpu(tsk, elf_xfpregs) 246 + #define ELF_CORE_XFPREG_TYPE NT_PRXFPREG 247 + 248 + #define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) 249 + #define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) 250 + #define VDSO_PRELINK 0 251 + 252 + #define VDSO_SYM(x) \ 253 + (VDSO_CURRENT_BASE + (unsigned long)(x) - VDSO_PRELINK) 254 + 255 + #define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE) 256 + #define VDSO_EHDR ((const struct elfhdr *) VDSO_CURRENT_BASE) 257 + 258 + extern void __kernel_vsyscall; 259 + 260 + #define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall) 261 + 262 + /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ 263 + 264 + #define ARCH_DLINFO \ 265 + do if (vdso_enabled) { \ 266 + NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ 267 + NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ 268 + } while (0) 269 + 270 + #else /* CONFIG_X86_32 */ 271 + 272 + /* 1GB for 64bit, 8MB for 32bit */ 273 + #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) 274 + 275 + #define ARCH_DLINFO \ 276 + do if (vdso_enabled) { \ 277 + NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\ 278 + } while (0) 279 + 280 + #endif /* !CONFIG_X86_32 */ 281 + 282 + struct linux_binprm; 283 + 284 + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 285 + extern int arch_setup_additional_pages(struct linux_binprm *bprm, 286 + int executable_stack); 287 + 288 + #endif /* __KERNEL__ */ 289 + 13 290 #endif
-165
include/asm-x86/elf_32.h
··· 1 - #ifndef __ASMi386_ELF_H 2 - #define __ASMi386_ELF_H 3 - 4 - /* 5 - * ELF register definitions.. 6 - */ 7 - 8 - #include <asm/ptrace.h> 9 - #include <asm/user.h> 10 - #include <asm/auxvec.h> 11 - 12 - #define R_386_NONE 0 13 - #define R_386_32 1 14 - #define R_386_PC32 2 15 - #define R_386_GOT32 3 16 - #define R_386_PLT32 4 17 - #define R_386_COPY 5 18 - #define R_386_GLOB_DAT 6 19 - #define R_386_JMP_SLOT 7 20 - #define R_386_RELATIVE 8 21 - #define R_386_GOTOFF 9 22 - #define R_386_GOTPC 10 23 - #define R_386_NUM 11 24 - 25 - typedef unsigned long elf_greg_t; 26 - 27 - #define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) 28 - typedef elf_greg_t elf_gregset_t[ELF_NGREG]; 29 - 30 - typedef struct user_i387_struct elf_fpregset_t; 31 - typedef struct user_fxsr_struct elf_fpxregset_t; 32 - 33 - /* 34 - * This is used to ensure we don't load something for the wrong architecture. 35 - */ 36 - #define elf_check_arch(x) \ 37 - (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) 38 - 39 - /* 40 - * These are used to set parameters in the core dumps. 41 - */ 42 - #define ELF_CLASS ELFCLASS32 43 - #define ELF_DATA ELFDATA2LSB 44 - #define ELF_ARCH EM_386 45 - 46 - #ifdef __KERNEL__ 47 - 48 - #include <asm/processor.h> 49 - #include <asm/system.h> /* for savesegment */ 50 - #include <asm/desc.h> 51 - 52 - /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx 53 - contains a pointer to a function which might be registered using `atexit'. 54 - This provides a mean for the dynamic linker to call DT_FINI functions for 55 - shared libraries that have been loaded before the code runs. 56 - 57 - A value of 0 tells we have no such handler. 58 - 59 - We might as well make sure everything else is cleared too (except for %esp), 60 - just to make things more deterministic. 61 - */ 62 - #define ELF_PLAT_INIT(_r, load_addr) do { \ 63 - _r->ebx = 0; _r->ecx = 0; _r->edx = 0; \ 64 - _r->esi = 0; _r->edi = 0; _r->ebp = 0; \ 65 - _r->eax = 0; \ 66 - } while (0) 67 - 68 - #define USE_ELF_CORE_DUMP 69 - #define ELF_EXEC_PAGESIZE 4096 70 - 71 - /* This is the location that an ET_DYN program is loaded if exec'ed. Typical 72 - use of this is to invoke "./ld.so someprog" to test out a new version of 73 - the loader. We need to make sure that it is out of the way of the program 74 - that it will "exec", and that there is sufficient room for the brk. */ 75 - 76 - #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) 77 - 78 - /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is 79 - now struct_user_regs, they are different) */ 80 - 81 - #define ELF_CORE_COPY_REGS(pr_reg, regs) \ 82 - pr_reg[0] = regs->ebx; \ 83 - pr_reg[1] = regs->ecx; \ 84 - pr_reg[2] = regs->edx; \ 85 - pr_reg[3] = regs->esi; \ 86 - pr_reg[4] = regs->edi; \ 87 - pr_reg[5] = regs->ebp; \ 88 - pr_reg[6] = regs->eax; \ 89 - pr_reg[7] = regs->xds & 0xffff; \ 90 - pr_reg[8] = regs->xes & 0xffff; \ 91 - pr_reg[9] = regs->xfs & 0xffff; \ 92 - savesegment(gs,pr_reg[10]); \ 93 - pr_reg[11] = regs->orig_eax; \ 94 - pr_reg[12] = regs->eip; \ 95 - pr_reg[13] = regs->xcs & 0xffff; \ 96 - pr_reg[14] = regs->eflags; \ 97 - pr_reg[15] = regs->esp; \ 98 - pr_reg[16] = regs->xss & 0xffff; 99 - 100 - /* This yields a mask that user programs can use to figure out what 101 - instruction set this CPU supports. This could be done in user space, 102 - but it's not easy, and we've already done it here. */ 103 - 104 - #define ELF_HWCAP (boot_cpu_data.x86_capability[0]) 105 - 106 - /* This yields a string that ld.so will use to load implementation 107 - specific libraries for optimization. This is more specific in 108 - intent than poking at uname or /proc/cpuinfo. 109 - 110 - For the moment, we have only optimizations for the Intel generations, 111 - but that could change... */ 112 - 113 - #define ELF_PLATFORM (utsname()->machine) 114 - 115 - #define SET_PERSONALITY(ex, ibcs2) do { } while (0) 116 - 117 - /* 118 - * An executable for which elf_read_implies_exec() returns TRUE will 119 - * have the READ_IMPLIES_EXEC personality flag set automatically. 120 - */ 121 - #define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X) 122 - 123 - struct task_struct; 124 - 125 - extern int dump_task_regs (struct task_struct *, elf_gregset_t *); 126 - extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); 127 - extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct *); 128 - 129 - #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) 130 - #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) 131 - #define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs) 132 - #define ELF_CORE_XFPREG_TYPE NT_PRXFPREG 133 - 134 - #define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) 135 - #define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) 136 - #define VDSO_PRELINK 0 137 - 138 - #define VDSO_SYM(x) \ 139 - (VDSO_CURRENT_BASE + (unsigned long)(x) - VDSO_PRELINK) 140 - 141 - #define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE) 142 - #define VDSO_EHDR ((const struct elfhdr *) VDSO_CURRENT_BASE) 143 - 144 - extern void __kernel_vsyscall; 145 - 146 - #define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall) 147 - 148 - struct linux_binprm; 149 - 150 - #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 151 - extern int arch_setup_additional_pages(struct linux_binprm *bprm, 152 - int executable_stack); 153 - 154 - extern unsigned int vdso_enabled; 155 - 156 - /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ 157 - #define ARCH_DLINFO \ 158 - do if (vdso_enabled) { \ 159 - NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ 160 - NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ 161 - } while (0) 162 - 163 - #endif 164 - 165 - #endif
-180
include/asm-x86/elf_64.h
··· 1 - #ifndef __ASM_X86_64_ELF_H 2 - #define __ASM_X86_64_ELF_H 3 - 4 - /* 5 - * ELF register definitions.. 6 - */ 7 - 8 - #include <asm/ptrace.h> 9 - #include <asm/user.h> 10 - 11 - /* x86-64 relocation types */ 12 - #define R_X86_64_NONE 0 /* No reloc */ 13 - #define R_X86_64_64 1 /* Direct 64 bit */ 14 - #define R_X86_64_PC32 2 /* PC relative 32 bit signed */ 15 - #define R_X86_64_GOT32 3 /* 32 bit GOT entry */ 16 - #define R_X86_64_PLT32 4 /* 32 bit PLT address */ 17 - #define R_X86_64_COPY 5 /* Copy symbol at runtime */ 18 - #define R_X86_64_GLOB_DAT 6 /* Create GOT entry */ 19 - #define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ 20 - #define R_X86_64_RELATIVE 8 /* Adjust by program base */ 21 - #define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative 22 - offset to GOT */ 23 - #define R_X86_64_32 10 /* Direct 32 bit zero extended */ 24 - #define R_X86_64_32S 11 /* Direct 32 bit sign extended */ 25 - #define R_X86_64_16 12 /* Direct 16 bit zero extended */ 26 - #define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */ 27 - #define R_X86_64_8 14 /* Direct 8 bit sign extended */ 28 - #define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */ 29 - 30 - #define R_X86_64_NUM 16 31 - 32 - typedef unsigned long elf_greg_t; 33 - 34 - #define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) 35 - typedef elf_greg_t elf_gregset_t[ELF_NGREG]; 36 - 37 - typedef struct user_i387_struct elf_fpregset_t; 38 - 39 - /* 40 - * These are used to set parameters in the core dumps. 41 - */ 42 - #define ELF_CLASS ELFCLASS64 43 - #define ELF_DATA ELFDATA2LSB 44 - #define ELF_ARCH EM_X86_64 45 - 46 - #ifdef __KERNEL__ 47 - #include <asm/processor.h> 48 - 49 - /* 50 - * This is used to ensure we don't load something for the wrong architecture. 51 - */ 52 - #define elf_check_arch(x) \ 53 - ((x)->e_machine == EM_X86_64) 54 - 55 - 56 - /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx 57 - contains a pointer to a function which might be registered using `atexit'. 58 - This provides a mean for the dynamic linker to call DT_FINI functions for 59 - shared libraries that have been loaded before the code runs. 60 - 61 - A value of 0 tells we have no such handler. 62 - 63 - We might as well make sure everything else is cleared too (except for %esp), 64 - just to make things more deterministic. 65 - */ 66 - #define ELF_PLAT_INIT(_r, load_addr) do { \ 67 - struct task_struct *cur = current; \ 68 - (_r)->rbx = 0; (_r)->rcx = 0; (_r)->rdx = 0; \ 69 - (_r)->rsi = 0; (_r)->rdi = 0; (_r)->rbp = 0; \ 70 - (_r)->rax = 0; \ 71 - (_r)->r8 = 0; \ 72 - (_r)->r9 = 0; \ 73 - (_r)->r10 = 0; \ 74 - (_r)->r11 = 0; \ 75 - (_r)->r12 = 0; \ 76 - (_r)->r13 = 0; \ 77 - (_r)->r14 = 0; \ 78 - (_r)->r15 = 0; \ 79 - cur->thread.fs = 0; cur->thread.gs = 0; \ 80 - cur->thread.fsindex = 0; cur->thread.gsindex = 0; \ 81 - cur->thread.ds = 0; cur->thread.es = 0; \ 82 - clear_thread_flag(TIF_IA32); \ 83 - } while (0) 84 - 85 - #define USE_ELF_CORE_DUMP 86 - #define ELF_EXEC_PAGESIZE 4096 87 - 88 - /* This is the location that an ET_DYN program is loaded if exec'ed. Typical 89 - use of this is to invoke "./ld.so someprog" to test out a new version of 90 - the loader. We need to make sure that it is out of the way of the program 91 - that it will "exec", and that there is sufficient room for the brk. */ 92 - 93 - #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) 94 - 95 - /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is 96 - now struct_user_regs, they are different). Assumes current is the process 97 - getting dumped. */ 98 - 99 - #define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ 100 - unsigned v; \ 101 - (pr_reg)[0] = (regs)->r15; \ 102 - (pr_reg)[1] = (regs)->r14; \ 103 - (pr_reg)[2] = (regs)->r13; \ 104 - (pr_reg)[3] = (regs)->r12; \ 105 - (pr_reg)[4] = (regs)->rbp; \ 106 - (pr_reg)[5] = (regs)->rbx; \ 107 - (pr_reg)[6] = (regs)->r11; \ 108 - (pr_reg)[7] = (regs)->r10; \ 109 - (pr_reg)[8] = (regs)->r9; \ 110 - (pr_reg)[9] = (regs)->r8; \ 111 - (pr_reg)[10] = (regs)->rax; \ 112 - (pr_reg)[11] = (regs)->rcx; \ 113 - (pr_reg)[12] = (regs)->rdx; \ 114 - (pr_reg)[13] = (regs)->rsi; \ 115 - (pr_reg)[14] = (regs)->rdi; \ 116 - (pr_reg)[15] = (regs)->orig_rax; \ 117 - (pr_reg)[16] = (regs)->rip; \ 118 - (pr_reg)[17] = (regs)->cs; \ 119 - (pr_reg)[18] = (regs)->eflags; \ 120 - (pr_reg)[19] = (regs)->rsp; \ 121 - (pr_reg)[20] = (regs)->ss; \ 122 - (pr_reg)[21] = current->thread.fs; \ 123 - (pr_reg)[22] = current->thread.gs; \ 124 - asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \ 125 - asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \ 126 - asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \ 127 - asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \ 128 - } while(0); 129 - 130 - /* This yields a mask that user programs can use to figure out what 131 - instruction set this CPU supports. This could be done in user space, 132 - but it's not easy, and we've already done it here. */ 133 - 134 - #define ELF_HWCAP (boot_cpu_data.x86_capability[0]) 135 - 136 - /* This yields a string that ld.so will use to load implementation 137 - specific libraries for optimization. This is more specific in 138 - intent than poking at uname or /proc/cpuinfo. 139 - 140 - For the moment, we have only optimizations for the Intel generations, 141 - but that could change... */ 142 - 143 - /* I'm not sure if we can use '-' here */ 144 - #define ELF_PLATFORM ("x86_64") 145 - 146 - extern void set_personality_64bit(void); 147 - #define SET_PERSONALITY(ex, ibcs2) set_personality_64bit() 148 - /* 149 - * An executable for which elf_read_implies_exec() returns TRUE will 150 - * have the READ_IMPLIES_EXEC personality flag set automatically. 151 - */ 152 - #define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X) 153 - 154 - struct task_struct; 155 - 156 - extern int dump_task_regs (struct task_struct *, elf_gregset_t *); 157 - extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); 158 - 159 - #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) 160 - #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) 161 - 162 - /* 1GB for 64bit, 8MB for 32bit */ 163 - #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) 164 - 165 - 166 - #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 167 - struct linux_binprm; 168 - extern int arch_setup_additional_pages(struct linux_binprm *bprm, 169 - int executable_stack); 170 - 171 - extern int vdso_enabled; 172 - 173 - #define ARCH_DLINFO \ 174 - do if (vdso_enabled) { \ 175 - NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\ 176 - } while (0) 177 - 178 - #endif 179 - 180 - #endif
+22 -4
include/asm-x86/mmu.h
··· 1 - #ifdef CONFIG_X86_32 2 - # include "mmu_32.h" 3 - #else 4 - # include "mmu_64.h" 1 + #ifndef _ASM_X86_MMU_H 2 + #define _ASM_X86_MMU_H 3 + 4 + #include <linux/spinlock.h> 5 + #include <linux/mutex.h> 6 + 7 + /* 8 + * The x86 doesn't have a mmu context, but 9 + * we put the segment information here. 10 + * 11 + * cpu_vm_mask is used to optimize ldt flushing. 12 + */ 13 + typedef struct { 14 + void *ldt; 15 + #ifdef CONFIG_X86_64 16 + rwlock_t ldtlock; 5 17 #endif 18 + int size; 19 + struct mutex lock; 20 + void *vdso; 21 + } mm_context_t; 22 + 23 + #endif /* _ASM_X86_MMU_H */
-18
include/asm-x86/mmu_32.h
··· 1 - #ifndef __i386_MMU_H 2 - #define __i386_MMU_H 3 - 4 - #include <linux/mutex.h> 5 - /* 6 - * The i386 doesn't have a mmu context, but 7 - * we put the segment information here. 8 - * 9 - * cpu_vm_mask is used to optimize ldt flushing. 10 - */ 11 - typedef struct { 12 - int size; 13 - struct mutex lock; 14 - void *ldt; 15 - void *vdso; 16 - } mm_context_t; 17 - 18 - #endif
-21
include/asm-x86/mmu_64.h
··· 1 - #ifndef __x86_64_MMU_H 2 - #define __x86_64_MMU_H 3 - 4 - #include <linux/spinlock.h> 5 - #include <linux/mutex.h> 6 - 7 - /* 8 - * The x86_64 doesn't have a mmu context, but 9 - * we put the segment information here. 10 - * 11 - * cpu_vm_mask is used to optimize ldt flushing. 12 - */ 13 - typedef struct { 14 - void *ldt; 15 - rwlock_t ldtlock; 16 - int size; 17 - struct mutex lock; 18 - void *vdso; 19 - } mm_context_t; 20 - 21 - #endif
+38 -12
include/asm-x86/msgbuf.h
··· 1 - #ifdef __KERNEL__ 2 - # ifdef CONFIG_X86_32 3 - # include "msgbuf_32.h" 4 - # else 5 - # include "msgbuf_64.h" 6 - # endif 7 - #else 8 - # ifdef __i386__ 9 - # include "msgbuf_32.h" 10 - # else 11 - # include "msgbuf_64.h" 12 - # endif 1 + #ifndef _ASM_X86_MSGBUF_H 2 + #define _ASM_X86_MSGBUF_H 3 + 4 + /* 5 + * The msqid64_ds structure for i386 architecture. 6 + * Note extra padding because this structure is passed back and forth 7 + * between kernel and user space. 8 + * 9 + * Pad space on i386 is left for: 10 + * - 64-bit time_t to solve y2038 problem 11 + * - 2 miscellaneous 32-bit values 12 + * 13 + * Pad space on x8664 is left for: 14 + * - 2 miscellaneous 64-bit values 15 + */ 16 + struct msqid64_ds { 17 + struct ipc64_perm msg_perm; 18 + __kernel_time_t msg_stime; /* last msgsnd time */ 19 + #ifdef __i386__ 20 + unsigned long __unused1; 13 21 #endif 22 + __kernel_time_t msg_rtime; /* last msgrcv time */ 23 + #ifdef __i386__ 24 + unsigned long __unused2; 25 + #endif 26 + __kernel_time_t msg_ctime; /* last change time */ 27 + #ifdef __i386__ 28 + unsigned long __unused3; 29 + #endif 30 + unsigned long msg_cbytes; /* current number of bytes on queue */ 31 + unsigned long msg_qnum; /* number of messages in queue */ 32 + unsigned long msg_qbytes; /* max number of bytes on queue */ 33 + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ 34 + __kernel_pid_t msg_lrpid; /* last receive pid */ 35 + unsigned long __unused4; 36 + unsigned long __unused5; 37 + }; 38 + 39 + #endif /* _ASM_X86_MSGBUF_H */
-31
include/asm-x86/msgbuf_32.h
··· 1 - #ifndef _I386_MSGBUF_H 2 - #define _I386_MSGBUF_H 3 - 4 - /* 5 - * The msqid64_ds structure for i386 architecture. 6 - * Note extra padding because this structure is passed back and forth 7 - * between kernel and user space. 8 - * 9 - * Pad space is left for: 10 - * - 64-bit time_t to solve y2038 problem 11 - * - 2 miscellaneous 32-bit values 12 - */ 13 - 14 - struct msqid64_ds { 15 - struct ipc64_perm msg_perm; 16 - __kernel_time_t msg_stime; /* last msgsnd time */ 17 - unsigned long __unused1; 18 - __kernel_time_t msg_rtime; /* last msgrcv time */ 19 - unsigned long __unused2; 20 - __kernel_time_t msg_ctime; /* last change time */ 21 - unsigned long __unused3; 22 - unsigned long msg_cbytes; /* current number of bytes on queue */ 23 - unsigned long msg_qnum; /* number of messages in queue */ 24 - unsigned long msg_qbytes; /* max number of bytes on queue */ 25 - __kernel_pid_t msg_lspid; /* pid of last msgsnd */ 26 - __kernel_pid_t msg_lrpid; /* last receive pid */ 27 - unsigned long __unused4; 28 - unsigned long __unused5; 29 - }; 30 - 31 - #endif /* _I386_MSGBUF_H */
-27
include/asm-x86/msgbuf_64.h
··· 1 - #ifndef _X8664_MSGBUF_H 2 - #define _X8664_MSGBUF_H 3 - 4 - /* 5 - * The msqid64_ds structure for x86-64 architecture. 6 - * Note extra padding because this structure is passed back and forth 7 - * between kernel and user space. 8 - * 9 - * Pad space is left for: 10 - * - 2 miscellaneous 64-bit values 11 - */ 12 - 13 - struct msqid64_ds { 14 - struct ipc64_perm msg_perm; 15 - __kernel_time_t msg_stime; /* last msgsnd time */ 16 - __kernel_time_t msg_rtime; /* last msgrcv time */ 17 - __kernel_time_t msg_ctime; /* last change time */ 18 - unsigned long msg_cbytes; /* current number of bytes on queue */ 19 - unsigned long msg_qnum; /* number of messages in queue */ 20 - unsigned long msg_qbytes; /* max number of bytes on queue */ 21 - __kernel_pid_t msg_lspid; /* pid of last msgsnd */ 22 - __kernel_pid_t msg_lrpid; /* last receive pid */ 23 - unsigned long __unused4; 24 - unsigned long __unused5; 25 - }; 26 - 27 - #endif
+347 -10
include/asm-x86/msr.h
··· 1 + #ifndef __ASM_X86_MSR_H_ 2 + #define __ASM_X86_MSR_H_ 3 + 4 + #include <asm/msr-index.h> 5 + 6 + #ifdef __i386__ 7 + 1 8 #ifdef __KERNEL__ 2 - # ifdef CONFIG_X86_32 3 - # include "msr_32.h" 4 - # else 5 - # include "msr_64.h" 6 - # endif 9 + #ifndef __ASSEMBLY__ 10 + 11 + #include <asm/errno.h> 12 + 13 + static inline unsigned long long native_read_msr(unsigned int msr) 14 + { 15 + unsigned long long val; 16 + 17 + asm volatile("rdmsr" : "=A" (val) : "c" (msr)); 18 + return val; 19 + } 20 + 21 + static inline unsigned long long native_read_msr_safe(unsigned int msr, 22 + int *err) 23 + { 24 + unsigned long long val; 25 + 26 + asm volatile("2: rdmsr ; xorl %0,%0\n" 27 + "1:\n\t" 28 + ".section .fixup,\"ax\"\n\t" 29 + "3: movl %3,%0 ; jmp 1b\n\t" 30 + ".previous\n\t" 31 + ".section __ex_table,\"a\"\n" 32 + " .align 4\n\t" 33 + " .long 2b,3b\n\t" 34 + ".previous" 35 + : "=r" (*err), "=A" (val) 36 + : "c" (msr), "i" (-EFAULT)); 37 + 38 + return val; 39 + } 40 + 41 + static inline void native_write_msr(unsigned int msr, unsigned long long val) 42 + { 43 + asm volatile("wrmsr" : : "c" (msr), "A"(val)); 44 + } 45 + 46 + static inline int native_write_msr_safe(unsigned int msr, 47 + unsigned long long val) 48 + { 49 + int err; 50 + asm volatile("2: wrmsr ; xorl %0,%0\n" 51 + "1:\n\t" 52 + ".section .fixup,\"ax\"\n\t" 53 + "3: movl %4,%0 ; jmp 1b\n\t" 54 + ".previous\n\t" 55 + ".section __ex_table,\"a\"\n" 56 + " .align 4\n\t" 57 + " .long 2b,3b\n\t" 58 + ".previous" 59 + : "=a" (err) 60 + : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)), 61 + "i" (-EFAULT)); 62 + return err; 63 + } 64 + 65 + static inline unsigned long long native_read_tsc(void) 66 + { 67 + unsigned long long val; 68 + asm volatile("rdtsc" : "=A" (val)); 69 + return val; 70 + } 71 + 72 + static inline unsigned long long native_read_pmc(void) 73 + { 74 + unsigned long long val; 75 + asm volatile("rdpmc" : "=A" (val)); 76 + return val; 77 + } 78 + 79 + #ifdef CONFIG_PARAVIRT 80 + #include <asm/paravirt.h> 7 81 #else 8 - # ifdef __i386__ 9 - # include "msr_32.h" 10 - # else 11 - # include "msr_64.h" 12 - # endif 82 + #include <linux/errno.h> 83 + /* 84 + * Access to machine-specific registers (available on 586 and better only) 85 + * Note: the rd* operations modify the parameters directly (without using 86 + * pointer indirection), this allows gcc to optimize better 87 + */ 88 + 89 + #define rdmsr(msr,val1,val2) \ 90 + do { \ 91 + u64 __val = native_read_msr(msr); \ 92 + (val1) = (u32)__val; \ 93 + (val2) = (u32)(__val >> 32); \ 94 + } while(0) 95 + 96 + static inline void wrmsr(u32 __msr, u32 __low, u32 __high) 97 + { 98 + native_write_msr(__msr, ((u64)__high << 32) | __low); 99 + } 100 + 101 + #define rdmsrl(msr,val) \ 102 + ((val) = native_read_msr(msr)) 103 + 104 + #define wrmsrl(msr,val) native_write_msr(msr, val) 105 + 106 + /* wrmsr with exception handling */ 107 + static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high) 108 + { 109 + return native_write_msr_safe(__msr, ((u64)__high << 32) | __low); 110 + } 111 + 112 + /* rdmsr with exception handling */ 113 + #define rdmsr_safe(msr,p1,p2) \ 114 + ({ \ 115 + int __err; \ 116 + u64 __val = native_read_msr_safe(msr, &__err); \ 117 + (*p1) = (u32)__val; \ 118 + (*p2) = (u32)(__val >> 32); \ 119 + __err; \ 120 + }) 121 + 122 + #define rdtscl(low) \ 123 + ((low) = (u32)native_read_tsc()) 124 + 125 + #define rdtscll(val) \ 126 + ((val) = native_read_tsc()) 127 + 128 + #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 129 + 130 + #define rdpmc(counter,low,high) \ 131 + do { \ 132 + u64 _l = native_read_pmc(); \ 133 + (low) = (u32)_l; \ 134 + (high) = (u32)(_l >> 32); \ 135 + } while(0) 136 + #endif /* !CONFIG_PARAVIRT */ 137 + 138 + #ifdef CONFIG_SMP 139 + void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 140 + void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 141 + int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 142 + int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 143 + #else /* CONFIG_SMP */ 144 + static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 145 + { 146 + rdmsr(msr_no, *l, *h); 147 + } 148 + static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 149 + { 150 + wrmsr(msr_no, l, h); 151 + } 152 + static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 153 + { 154 + return rdmsr_safe(msr_no, l, h); 155 + } 156 + static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 157 + { 158 + return wrmsr_safe(msr_no, l, h); 159 + } 160 + #endif /* CONFIG_SMP */ 161 + #endif /* ! __ASSEMBLY__ */ 162 + #endif /* __KERNEL__ */ 163 + 164 + #else /* __i386__ */ 165 + 166 + #ifndef __ASSEMBLY__ 167 + #include <linux/errno.h> 168 + /* 169 + * Access to machine-specific registers (available on 586 and better only) 170 + * Note: the rd* operations modify the parameters directly (without using 171 + * pointer indirection), this allows gcc to optimize better 172 + */ 173 + 174 + #define rdmsr(msr,val1,val2) \ 175 + __asm__ __volatile__("rdmsr" \ 176 + : "=a" (val1), "=d" (val2) \ 177 + : "c" (msr)) 178 + 179 + 180 + #define rdmsrl(msr,val) do { unsigned long a__,b__; \ 181 + __asm__ __volatile__("rdmsr" \ 182 + : "=a" (a__), "=d" (b__) \ 183 + : "c" (msr)); \ 184 + val = a__ | (b__<<32); \ 185 + } while(0) 186 + 187 + #define wrmsr(msr,val1,val2) \ 188 + __asm__ __volatile__("wrmsr" \ 189 + : /* no outputs */ \ 190 + : "c" (msr), "a" (val1), "d" (val2)) 191 + 192 + #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) 193 + 194 + /* wrmsr with exception handling */ 195 + #define wrmsr_safe(msr,a,b) ({ int ret__; \ 196 + asm volatile("2: wrmsr ; xorl %0,%0\n" \ 197 + "1:\n\t" \ 198 + ".section .fixup,\"ax\"\n\t" \ 199 + "3: movl %4,%0 ; jmp 1b\n\t" \ 200 + ".previous\n\t" \ 201 + ".section __ex_table,\"a\"\n" \ 202 + " .align 8\n\t" \ 203 + " .quad 2b,3b\n\t" \ 204 + ".previous" \ 205 + : "=a" (ret__) \ 206 + : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \ 207 + ret__; }) 208 + 209 + #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) 210 + 211 + #define rdmsr_safe(msr,a,b) \ 212 + ({ int ret__; \ 213 + asm volatile ("1: rdmsr\n" \ 214 + "2:\n" \ 215 + ".section .fixup,\"ax\"\n" \ 216 + "3: movl %4,%0\n" \ 217 + " jmp 2b\n" \ 218 + ".previous\n" \ 219 + ".section __ex_table,\"a\"\n" \ 220 + " .align 8\n" \ 221 + " .quad 1b,3b\n" \ 222 + ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \ 223 + :"c"(msr), "i"(-EIO), "0"(0)); \ 224 + ret__; }) 225 + 226 + #define rdtsc(low,high) \ 227 + __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) 228 + 229 + #define rdtscl(low) \ 230 + __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") 231 + 232 + #define rdtscp(low,high,aux) \ 233 + asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux)) 234 + 235 + #define rdtscll(val) do { \ 236 + unsigned int __a,__d; \ 237 + asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ 238 + (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ 239 + } while(0) 240 + 241 + #define rdtscpll(val, aux) do { \ 242 + unsigned long __a, __d; \ 243 + asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \ 244 + (val) = (__d << 32) | __a; \ 245 + } while (0) 246 + 247 + #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 248 + 249 + #define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0) 250 + 251 + #define rdpmc(counter,low,high) \ 252 + __asm__ __volatile__("rdpmc" \ 253 + : "=a" (low), "=d" (high) \ 254 + : "c" (counter)) 255 + 256 + static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, 257 + unsigned int *ecx, unsigned int *edx) 258 + { 259 + __asm__("cpuid" 260 + : "=a" (*eax), 261 + "=b" (*ebx), 262 + "=c" (*ecx), 263 + "=d" (*edx) 264 + : "0" (op)); 265 + } 266 + 267 + /* Some CPUID calls want 'count' to be placed in ecx */ 268 + static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, 269 + int *edx) 270 + { 271 + __asm__("cpuid" 272 + : "=a" (*eax), 273 + "=b" (*ebx), 274 + "=c" (*ecx), 275 + "=d" (*edx) 276 + : "0" (op), "c" (count)); 277 + } 278 + 279 + /* 280 + * CPUID functions returning a single datum 281 + */ 282 + static inline unsigned int cpuid_eax(unsigned int op) 283 + { 284 + unsigned int eax; 285 + 286 + __asm__("cpuid" 287 + : "=a" (eax) 288 + : "0" (op) 289 + : "bx", "cx", "dx"); 290 + return eax; 291 + } 292 + static inline unsigned int cpuid_ebx(unsigned int op) 293 + { 294 + unsigned int eax, ebx; 295 + 296 + __asm__("cpuid" 297 + : "=a" (eax), "=b" (ebx) 298 + : "0" (op) 299 + : "cx", "dx" ); 300 + return ebx; 301 + } 302 + static inline unsigned int cpuid_ecx(unsigned int op) 303 + { 304 + unsigned int eax, ecx; 305 + 306 + __asm__("cpuid" 307 + : "=a" (eax), "=c" (ecx) 308 + : "0" (op) 309 + : "bx", "dx" ); 310 + return ecx; 311 + } 312 + static inline unsigned int cpuid_edx(unsigned int op) 313 + { 314 + unsigned int eax, edx; 315 + 316 + __asm__("cpuid" 317 + : "=a" (eax), "=d" (edx) 318 + : "0" (op) 319 + : "bx", "cx"); 320 + return edx; 321 + } 322 + 323 + #ifdef CONFIG_SMP 324 + void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 325 + void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 326 + int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 327 + int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 328 + #else /* CONFIG_SMP */ 329 + static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 330 + { 331 + rdmsr(msr_no, *l, *h); 332 + } 333 + static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 334 + { 335 + wrmsr(msr_no, l, h); 336 + } 337 + static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 338 + { 339 + return rdmsr_safe(msr_no, l, h); 340 + } 341 + static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 342 + { 343 + return wrmsr_safe(msr_no, l, h); 344 + } 345 + #endif /* CONFIG_SMP */ 346 + #endif /* __ASSEMBLY__ */ 347 + 348 + #endif /* !__i386__ */ 349 + 13 350 #endif
-161
include/asm-x86/msr_32.h
··· 1 - #ifndef __ASM_MSR_H 2 - #define __ASM_MSR_H 3 - 4 - #include <asm/msr-index.h> 5 - 6 - #ifdef __KERNEL__ 7 - #ifndef __ASSEMBLY__ 8 - 9 - #include <asm/errno.h> 10 - 11 - static inline unsigned long long native_read_msr(unsigned int msr) 12 - { 13 - unsigned long long val; 14 - 15 - asm volatile("rdmsr" : "=A" (val) : "c" (msr)); 16 - return val; 17 - } 18 - 19 - static inline unsigned long long native_read_msr_safe(unsigned int msr, 20 - int *err) 21 - { 22 - unsigned long long val; 23 - 24 - asm volatile("2: rdmsr ; xorl %0,%0\n" 25 - "1:\n\t" 26 - ".section .fixup,\"ax\"\n\t" 27 - "3: movl %3,%0 ; jmp 1b\n\t" 28 - ".previous\n\t" 29 - ".section __ex_table,\"a\"\n" 30 - " .align 4\n\t" 31 - " .long 2b,3b\n\t" 32 - ".previous" 33 - : "=r" (*err), "=A" (val) 34 - : "c" (msr), "i" (-EFAULT)); 35 - 36 - return val; 37 - } 38 - 39 - static inline void native_write_msr(unsigned int msr, unsigned long long val) 40 - { 41 - asm volatile("wrmsr" : : "c" (msr), "A"(val)); 42 - } 43 - 44 - static inline int native_write_msr_safe(unsigned int msr, 45 - unsigned long long val) 46 - { 47 - int err; 48 - asm volatile("2: wrmsr ; xorl %0,%0\n" 49 - "1:\n\t" 50 - ".section .fixup,\"ax\"\n\t" 51 - "3: movl %4,%0 ; jmp 1b\n\t" 52 - ".previous\n\t" 53 - ".section __ex_table,\"a\"\n" 54 - " .align 4\n\t" 55 - " .long 2b,3b\n\t" 56 - ".previous" 57 - : "=a" (err) 58 - : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)), 59 - "i" (-EFAULT)); 60 - return err; 61 - } 62 - 63 - static inline unsigned long long native_read_tsc(void) 64 - { 65 - unsigned long long val; 66 - asm volatile("rdtsc" : "=A" (val)); 67 - return val; 68 - } 69 - 70 - static inline unsigned long long native_read_pmc(void) 71 - { 72 - unsigned long long val; 73 - asm volatile("rdpmc" : "=A" (val)); 74 - return val; 75 - } 76 - 77 - #ifdef CONFIG_PARAVIRT 78 - #include <asm/paravirt.h> 79 - #else 80 - #include <linux/errno.h> 81 - /* 82 - * Access to machine-specific registers (available on 586 and better only) 83 - * Note: the rd* operations modify the parameters directly (without using 84 - * pointer indirection), this allows gcc to optimize better 85 - */ 86 - 87 - #define rdmsr(msr,val1,val2) \ 88 - do { \ 89 - u64 __val = native_read_msr(msr); \ 90 - (val1) = (u32)__val; \ 91 - (val2) = (u32)(__val >> 32); \ 92 - } while(0) 93 - 94 - static inline void wrmsr(u32 __msr, u32 __low, u32 __high) 95 - { 96 - native_write_msr(__msr, ((u64)__high << 32) | __low); 97 - } 98 - 99 - #define rdmsrl(msr,val) \ 100 - ((val) = native_read_msr(msr)) 101 - 102 - #define wrmsrl(msr,val) native_write_msr(msr, val) 103 - 104 - /* wrmsr with exception handling */ 105 - static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high) 106 - { 107 - return native_write_msr_safe(__msr, ((u64)__high << 32) | __low); 108 - } 109 - 110 - /* rdmsr with exception handling */ 111 - #define rdmsr_safe(msr,p1,p2) \ 112 - ({ \ 113 - int __err; \ 114 - u64 __val = native_read_msr_safe(msr, &__err); \ 115 - (*p1) = (u32)__val; \ 116 - (*p2) = (u32)(__val >> 32); \ 117 - __err; \ 118 - }) 119 - 120 - #define rdtscl(low) \ 121 - ((low) = (u32)native_read_tsc()) 122 - 123 - #define rdtscll(val) \ 124 - ((val) = native_read_tsc()) 125 - 126 - #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 127 - 128 - #define rdpmc(counter,low,high) \ 129 - do { \ 130 - u64 _l = native_read_pmc(); \ 131 - (low) = (u32)_l; \ 132 - (high) = (u32)(_l >> 32); \ 133 - } while(0) 134 - #endif /* !CONFIG_PARAVIRT */ 135 - 136 - #ifdef CONFIG_SMP 137 - void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 138 - void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 139 - int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 140 - int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 141 - #else /* CONFIG_SMP */ 142 - static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 143 - { 144 - rdmsr(msr_no, *l, *h); 145 - } 146 - static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 147 - { 148 - wrmsr(msr_no, l, h); 149 - } 150 - static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 151 - { 152 - return rdmsr_safe(msr_no, l, h); 153 - } 154 - static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 155 - { 156 - return wrmsr_safe(msr_no, l, h); 157 - } 158 - #endif /* CONFIG_SMP */ 159 - #endif 160 - #endif 161 - #endif /* __ASM_MSR_H */
-187
include/asm-x86/msr_64.h
··· 1 - #ifndef X86_64_MSR_H 2 - #define X86_64_MSR_H 1 3 - 4 - #include <asm/msr-index.h> 5 - 6 - #ifndef __ASSEMBLY__ 7 - #include <linux/errno.h> 8 - /* 9 - * Access to machine-specific registers (available on 586 and better only) 10 - * Note: the rd* operations modify the parameters directly (without using 11 - * pointer indirection), this allows gcc to optimize better 12 - */ 13 - 14 - #define rdmsr(msr,val1,val2) \ 15 - __asm__ __volatile__("rdmsr" \ 16 - : "=a" (val1), "=d" (val2) \ 17 - : "c" (msr)) 18 - 19 - 20 - #define rdmsrl(msr,val) do { unsigned long a__,b__; \ 21 - __asm__ __volatile__("rdmsr" \ 22 - : "=a" (a__), "=d" (b__) \ 23 - : "c" (msr)); \ 24 - val = a__ | (b__<<32); \ 25 - } while(0) 26 - 27 - #define wrmsr(msr,val1,val2) \ 28 - __asm__ __volatile__("wrmsr" \ 29 - : /* no outputs */ \ 30 - : "c" (msr), "a" (val1), "d" (val2)) 31 - 32 - #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) 33 - 34 - /* wrmsr with exception handling */ 35 - #define wrmsr_safe(msr,a,b) ({ int ret__; \ 36 - asm volatile("2: wrmsr ; xorl %0,%0\n" \ 37 - "1:\n\t" \ 38 - ".section .fixup,\"ax\"\n\t" \ 39 - "3: movl %4,%0 ; jmp 1b\n\t" \ 40 - ".previous\n\t" \ 41 - ".section __ex_table,\"a\"\n" \ 42 - " .align 8\n\t" \ 43 - " .quad 2b,3b\n\t" \ 44 - ".previous" \ 45 - : "=a" (ret__) \ 46 - : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \ 47 - ret__; }) 48 - 49 - #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) 50 - 51 - #define rdmsr_safe(msr,a,b) \ 52 - ({ int ret__; \ 53 - asm volatile ("1: rdmsr\n" \ 54 - "2:\n" \ 55 - ".section .fixup,\"ax\"\n" \ 56 - "3: movl %4,%0\n" \ 57 - " jmp 2b\n" \ 58 - ".previous\n" \ 59 - ".section __ex_table,\"a\"\n" \ 60 - " .align 8\n" \ 61 - " .quad 1b,3b\n" \ 62 - ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\ 63 - :"c"(msr), "i"(-EIO), "0"(0)); \ 64 - ret__; }) 65 - 66 - #define rdtsc(low,high) \ 67 - __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) 68 - 69 - #define rdtscl(low) \ 70 - __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") 71 - 72 - #define rdtscp(low,high,aux) \ 73 - asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux)) 74 - 75 - #define rdtscll(val) do { \ 76 - unsigned int __a,__d; \ 77 - asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ 78 - (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ 79 - } while(0) 80 - 81 - #define rdtscpll(val, aux) do { \ 82 - unsigned long __a, __d; \ 83 - asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \ 84 - (val) = (__d << 32) | __a; \ 85 - } while (0) 86 - 87 - #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 88 - 89 - #define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0) 90 - 91 - #define rdpmc(counter,low,high) \ 92 - __asm__ __volatile__("rdpmc" \ 93 - : "=a" (low), "=d" (high) \ 94 - : "c" (counter)) 95 - 96 - static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, 97 - unsigned int *ecx, unsigned int *edx) 98 - { 99 - __asm__("cpuid" 100 - : "=a" (*eax), 101 - "=b" (*ebx), 102 - "=c" (*ecx), 103 - "=d" (*edx) 104 - : "0" (op)); 105 - } 106 - 107 - /* Some CPUID calls want 'count' to be placed in ecx */ 108 - static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, 109 - int *edx) 110 - { 111 - __asm__("cpuid" 112 - : "=a" (*eax), 113 - "=b" (*ebx), 114 - "=c" (*ecx), 115 - "=d" (*edx) 116 - : "0" (op), "c" (count)); 117 - } 118 - 119 - /* 120 - * CPUID functions returning a single datum 121 - */ 122 - static inline unsigned int cpuid_eax(unsigned int op) 123 - { 124 - unsigned int eax; 125 - 126 - __asm__("cpuid" 127 - : "=a" (eax) 128 - : "0" (op) 129 - : "bx", "cx", "dx"); 130 - return eax; 131 - } 132 - static inline unsigned int cpuid_ebx(unsigned int op) 133 - { 134 - unsigned int eax, ebx; 135 - 136 - __asm__("cpuid" 137 - : "=a" (eax), "=b" (ebx) 138 - : "0" (op) 139 - : "cx", "dx" ); 140 - return ebx; 141 - } 142 - static inline unsigned int cpuid_ecx(unsigned int op) 143 - { 144 - unsigned int eax, ecx; 145 - 146 - __asm__("cpuid" 147 - : "=a" (eax), "=c" (ecx) 148 - : "0" (op) 149 - : "bx", "dx" ); 150 - return ecx; 151 - } 152 - static inline unsigned int cpuid_edx(unsigned int op) 153 - { 154 - unsigned int eax, edx; 155 - 156 - __asm__("cpuid" 157 - : "=a" (eax), "=d" (edx) 158 - : "0" (op) 159 - : "bx", "cx"); 160 - return edx; 161 - } 162 - 163 - #ifdef CONFIG_SMP 164 - void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 165 - void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 166 - int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 167 - int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 168 - #else /* CONFIG_SMP */ 169 - static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 170 - { 171 - rdmsr(msr_no, *l, *h); 172 - } 173 - static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 174 - { 175 - wrmsr(msr_no, l, h); 176 - } 177 - static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 178 - { 179 - return rdmsr_safe(msr_no, l, h); 180 - } 181 - static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 182 - { 183 - return wrmsr_safe(msr_no, l, h); 184 - } 185 - #endif /* CONFIG_SMP */ 186 - #endif /* __ASSEMBLY__ */ 187 - #endif /* X86_64_MSR_H */
+163 -12
include/asm-x86/mtrr.h
··· 1 + /* Generic MTRR (Memory Type Range Register) ioctls. 2 + 3 + Copyright (C) 1997-1999 Richard Gooch 4 + 5 + This library is free software; you can redistribute it and/or 6 + modify it under the terms of the GNU Library General Public 7 + License as published by the Free Software Foundation; either 8 + version 2 of the License, or (at your option) any later version. 9 + 10 + This library is distributed in the hope that it will be useful, 11 + but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 + Library General Public License for more details. 14 + 15 + You should have received a copy of the GNU Library General Public 16 + License along with this library; if not, write to the Free 17 + Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 + 19 + Richard Gooch may be reached by email at rgooch@atnf.csiro.au 20 + The postal address is: 21 + Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. 22 + */ 23 + #ifndef _ASM_X86_MTRR_H 24 + #define _ASM_X86_MTRR_H 25 + 26 + #include <linux/ioctl.h> 27 + #include <linux/errno.h> 28 + 29 + #define MTRR_IOCTL_BASE 'M' 30 + 31 + struct mtrr_sentry 32 + { 33 + unsigned long base; /* Base address */ 34 + unsigned int size; /* Size of region */ 35 + unsigned int type; /* Type of region */ 36 + }; 37 + 38 + /* Warning: this structure has a different order from i386 39 + on x86-64. The 32bit emulation code takes care of that. 40 + But you need to use this for 64bit, otherwise your X server 41 + will break. */ 42 + 43 + #ifdef __i386__ 44 + struct mtrr_gentry 45 + { 46 + unsigned int regnum; /* Register number */ 47 + unsigned long base; /* Base address */ 48 + unsigned int size; /* Size of region */ 49 + unsigned int type; /* Type of region */ 50 + }; 51 + 52 + #else /* __i386__ */ 53 + 54 + struct mtrr_gentry 55 + { 56 + unsigned long base; /* Base address */ 57 + unsigned int size; /* Size of region */ 58 + unsigned int regnum; /* Register number */ 59 + unsigned int type; /* Type of region */ 60 + }; 61 + #endif /* !__i386__ */ 62 + 63 + /* These are the various ioctls */ 64 + #define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) 65 + #define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) 66 + #define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry) 67 + #define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry) 68 + #define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry) 69 + #define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry) 70 + #define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry) 71 + #define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry) 72 + #define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry) 73 + #define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry) 74 + 75 + /* These are the region types */ 76 + #define MTRR_TYPE_UNCACHABLE 0 77 + #define MTRR_TYPE_WRCOMB 1 78 + /*#define MTRR_TYPE_ 2*/ 79 + /*#define MTRR_TYPE_ 3*/ 80 + #define MTRR_TYPE_WRTHROUGH 4 81 + #define MTRR_TYPE_WRPROT 5 82 + #define MTRR_TYPE_WRBACK 6 83 + #define MTRR_NUM_TYPES 7 84 + 1 85 #ifdef __KERNEL__ 2 - # ifdef CONFIG_X86_32 3 - # include "mtrr_32.h" 4 - # else 5 - # include "mtrr_64.h" 6 - # endif 7 - #else 8 - # ifdef __i386__ 9 - # include "mtrr_32.h" 10 - # else 11 - # include "mtrr_64.h" 12 - # endif 13 - #endif 86 + 87 + /* The following functions are for use by other drivers */ 88 + # ifdef CONFIG_MTRR 89 + extern void mtrr_save_fixed_ranges(void *); 90 + extern void mtrr_save_state(void); 91 + extern int mtrr_add (unsigned long base, unsigned long size, 92 + unsigned int type, char increment); 93 + extern int mtrr_add_page (unsigned long base, unsigned long size, 94 + unsigned int type, char increment); 95 + extern int mtrr_del (int reg, unsigned long base, unsigned long size); 96 + extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); 97 + extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); 98 + extern void mtrr_ap_init(void); 99 + extern void mtrr_bp_init(void); 100 + # else 101 + #define mtrr_save_fixed_ranges(arg) do {} while (0) 102 + #define mtrr_save_state() do {} while (0) 103 + static __inline__ int mtrr_add (unsigned long base, unsigned long size, 104 + unsigned int type, char increment) 105 + { 106 + return -ENODEV; 107 + } 108 + static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, 109 + unsigned int type, char increment) 110 + { 111 + return -ENODEV; 112 + } 113 + static __inline__ int mtrr_del (int reg, unsigned long base, 114 + unsigned long size) 115 + { 116 + return -ENODEV; 117 + } 118 + static __inline__ int mtrr_del_page (int reg, unsigned long base, 119 + unsigned long size) 120 + { 121 + return -ENODEV; 122 + } 123 + 124 + static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} 125 + 126 + #define mtrr_ap_init() do {} while (0) 127 + #define mtrr_bp_init() do {} while (0) 128 + # endif 129 + 130 + #ifdef CONFIG_COMPAT 131 + #include <linux/compat.h> 132 + 133 + struct mtrr_sentry32 134 + { 135 + compat_ulong_t base; /* Base address */ 136 + compat_uint_t size; /* Size of region */ 137 + compat_uint_t type; /* Type of region */ 138 + }; 139 + 140 + struct mtrr_gentry32 141 + { 142 + compat_ulong_t regnum; /* Register number */ 143 + compat_uint_t base; /* Base address */ 144 + compat_uint_t size; /* Size of region */ 145 + compat_uint_t type; /* Type of region */ 146 + }; 147 + 148 + #define MTRR_IOCTL_BASE 'M' 149 + 150 + #define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32) 151 + #define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32) 152 + #define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32) 153 + #define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32) 154 + #define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32) 155 + #define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32) 156 + #define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32) 157 + #define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32) 158 + #define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32) 159 + #define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32) 160 + #endif /* CONFIG_COMPAT */ 161 + 162 + #endif /* __KERNEL__ */ 163 + 164 + #endif /* _ASM_X86_MTRR_H */
-115
include/asm-x86/mtrr_32.h
··· 1 - /* Generic MTRR (Memory Type Range Register) ioctls. 2 - 3 - Copyright (C) 1997-1999 Richard Gooch 4 - 5 - This library is free software; you can redistribute it and/or 6 - modify it under the terms of the GNU Library General Public 7 - License as published by the Free Software Foundation; either 8 - version 2 of the License, or (at your option) any later version. 9 - 10 - This library is distributed in the hope that it will be useful, 11 - but WITHOUT ANY WARRANTY; without even the implied warranty of 12 - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 - Library General Public License for more details. 14 - 15 - You should have received a copy of the GNU Library General Public 16 - License along with this library; if not, write to the Free 17 - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 - 19 - Richard Gooch may be reached by email at rgooch@atnf.csiro.au 20 - The postal address is: 21 - Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. 22 - */ 23 - #ifndef _LINUX_MTRR_H 24 - #define _LINUX_MTRR_H 25 - 26 - #include <linux/ioctl.h> 27 - #include <linux/errno.h> 28 - 29 - #define MTRR_IOCTL_BASE 'M' 30 - 31 - struct mtrr_sentry 32 - { 33 - unsigned long base; /* Base address */ 34 - unsigned int size; /* Size of region */ 35 - unsigned int type; /* Type of region */ 36 - }; 37 - 38 - struct mtrr_gentry 39 - { 40 - unsigned int regnum; /* Register number */ 41 - unsigned long base; /* Base address */ 42 - unsigned int size; /* Size of region */ 43 - unsigned int type; /* Type of region */ 44 - }; 45 - 46 - /* These are the various ioctls */ 47 - #define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) 48 - #define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) 49 - #define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry) 50 - #define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry) 51 - #define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry) 52 - #define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry) 53 - #define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry) 54 - #define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry) 55 - #define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry) 56 - #define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry) 57 - 58 - /* These are the region types */ 59 - #define MTRR_TYPE_UNCACHABLE 0 60 - #define MTRR_TYPE_WRCOMB 1 61 - /*#define MTRR_TYPE_ 2*/ 62 - /*#define MTRR_TYPE_ 3*/ 63 - #define MTRR_TYPE_WRTHROUGH 4 64 - #define MTRR_TYPE_WRPROT 5 65 - #define MTRR_TYPE_WRBACK 6 66 - #define MTRR_NUM_TYPES 7 67 - 68 - #ifdef __KERNEL__ 69 - 70 - /* The following functions are for use by other drivers */ 71 - # ifdef CONFIG_MTRR 72 - extern void mtrr_save_fixed_ranges(void *); 73 - extern void mtrr_save_state(void); 74 - extern int mtrr_add (unsigned long base, unsigned long size, 75 - unsigned int type, char increment); 76 - extern int mtrr_add_page (unsigned long base, unsigned long size, 77 - unsigned int type, char increment); 78 - extern int mtrr_del (int reg, unsigned long base, unsigned long size); 79 - extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); 80 - extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); 81 - extern void mtrr_ap_init(void); 82 - extern void mtrr_bp_init(void); 83 - # else 84 - #define mtrr_save_fixed_ranges(arg) do {} while (0) 85 - #define mtrr_save_state() do {} while (0) 86 - static __inline__ int mtrr_add (unsigned long base, unsigned long size, 87 - unsigned int type, char increment) 88 - { 89 - return -ENODEV; 90 - } 91 - static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, 92 - unsigned int type, char increment) 93 - { 94 - return -ENODEV; 95 - } 96 - static __inline__ int mtrr_del (int reg, unsigned long base, 97 - unsigned long size) 98 - { 99 - return -ENODEV; 100 - } 101 - static __inline__ int mtrr_del_page (int reg, unsigned long base, 102 - unsigned long size) 103 - { 104 - return -ENODEV; 105 - } 106 - 107 - static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} 108 - 109 - #define mtrr_ap_init() do {} while (0) 110 - #define mtrr_bp_init() do {} while (0) 111 - # endif 112 - 113 - #endif 114 - 115 - #endif /* _LINUX_MTRR_H */
-152
include/asm-x86/mtrr_64.h
··· 1 - /* Generic MTRR (Memory Type Range Register) ioctls. 2 - 3 - Copyright (C) 1997-1999 Richard Gooch 4 - 5 - This library is free software; you can redistribute it and/or 6 - modify it under the terms of the GNU Library General Public 7 - License as published by the Free Software Foundation; either 8 - version 2 of the License, or (at your option) any later version. 9 - 10 - This library is distributed in the hope that it will be useful, 11 - but WITHOUT ANY WARRANTY; without even the implied warranty of 12 - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 - Library General Public License for more details. 14 - 15 - You should have received a copy of the GNU Library General Public 16 - License along with this library; if not, write to the Free 17 - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 - 19 - Richard Gooch may be reached by email at rgooch@atnf.csiro.au 20 - The postal address is: 21 - Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. 22 - */ 23 - #ifndef _LINUX_MTRR_H 24 - #define _LINUX_MTRR_H 25 - 26 - #include <linux/ioctl.h> 27 - 28 - #define MTRR_IOCTL_BASE 'M' 29 - 30 - struct mtrr_sentry 31 - { 32 - unsigned long base; /* Base address */ 33 - unsigned int size; /* Size of region */ 34 - unsigned int type; /* Type of region */ 35 - }; 36 - 37 - /* Warning: this structure has a different order from i386 38 - on x86-64. The 32bit emulation code takes care of that. 39 - But you need to use this for 64bit, otherwise your X server 40 - will break. */ 41 - struct mtrr_gentry 42 - { 43 - unsigned long base; /* Base address */ 44 - unsigned int size; /* Size of region */ 45 - unsigned int regnum; /* Register number */ 46 - unsigned int type; /* Type of region */ 47 - }; 48 - 49 - /* These are the various ioctls */ 50 - #define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) 51 - #define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) 52 - #define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry) 53 - #define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry) 54 - #define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry) 55 - #define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry) 56 - #define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry) 57 - #define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry) 58 - #define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry) 59 - #define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry) 60 - 61 - /* These are the region types */ 62 - #define MTRR_TYPE_UNCACHABLE 0 63 - #define MTRR_TYPE_WRCOMB 1 64 - /*#define MTRR_TYPE_ 2*/ 65 - /*#define MTRR_TYPE_ 3*/ 66 - #define MTRR_TYPE_WRTHROUGH 4 67 - #define MTRR_TYPE_WRPROT 5 68 - #define MTRR_TYPE_WRBACK 6 69 - #define MTRR_NUM_TYPES 7 70 - 71 - #ifdef __KERNEL__ 72 - 73 - /* The following functions are for use by other drivers */ 74 - # ifdef CONFIG_MTRR 75 - extern int mtrr_add (unsigned long base, unsigned long size, 76 - unsigned int type, char increment); 77 - extern int mtrr_add_page (unsigned long base, unsigned long size, 78 - unsigned int type, char increment); 79 - extern int mtrr_del (int reg, unsigned long base, unsigned long size); 80 - extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); 81 - # else 82 - static __inline__ int mtrr_add (unsigned long base, unsigned long size, 83 - unsigned int type, char increment) 84 - { 85 - return -ENODEV; 86 - } 87 - static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, 88 - unsigned int type, char increment) 89 - { 90 - return -ENODEV; 91 - } 92 - static __inline__ int mtrr_del (int reg, unsigned long base, 93 - unsigned long size) 94 - { 95 - return -ENODEV; 96 - } 97 - static __inline__ int mtrr_del_page (int reg, unsigned long base, 98 - unsigned long size) 99 - { 100 - return -ENODEV; 101 - } 102 - 103 - #endif /* CONFIG_MTRR */ 104 - 105 - #ifdef CONFIG_COMPAT 106 - #include <linux/compat.h> 107 - 108 - struct mtrr_sentry32 109 - { 110 - compat_ulong_t base; /* Base address */ 111 - compat_uint_t size; /* Size of region */ 112 - compat_uint_t type; /* Type of region */ 113 - }; 114 - 115 - struct mtrr_gentry32 116 - { 117 - compat_ulong_t regnum; /* Register number */ 118 - compat_uint_t base; /* Base address */ 119 - compat_uint_t size; /* Size of region */ 120 - compat_uint_t type; /* Type of region */ 121 - }; 122 - 123 - #define MTRR_IOCTL_BASE 'M' 124 - 125 - #define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32) 126 - #define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32) 127 - #define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32) 128 - #define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32) 129 - #define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32) 130 - #define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32) 131 - #define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32) 132 - #define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32) 133 - #define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32) 134 - #define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32) 135 - 136 - #endif /* CONFIG_COMPAT */ 137 - 138 - #ifdef CONFIG_MTRR 139 - extern void mtrr_ap_init(void); 140 - extern void mtrr_bp_init(void); 141 - extern void mtrr_save_fixed_ranges(void *); 142 - extern void mtrr_save_state(void); 143 - #else 144 - #define mtrr_ap_init() do {} while (0) 145 - #define mtrr_bp_init() do {} while (0) 146 - #define mtrr_save_fixed_ranges(arg) do {} while (0) 147 - #define mtrr_save_state() do {} while (0) 148 - #endif 149 - 150 - #endif /* __KERNEL__ */ 151 - 152 - #endif /* _LINUX_MTRR_H */
+140 -11
include/asm-x86/ptrace.h
··· 1 + #ifndef _ASM_X86_PTRACE_H 2 + #define _ASM_X86_PTRACE_H 3 + 4 + #include <linux/compiler.h> /* For __user */ 5 + #include <asm/ptrace-abi.h> 6 + 7 + #ifndef __ASSEMBLY__ 8 + 9 + #ifdef __i386__ 10 + /* this struct defines the way the registers are stored on the 11 + stack during a system call. */ 12 + 13 + struct pt_regs { 14 + long ebx; 15 + long ecx; 16 + long edx; 17 + long esi; 18 + long edi; 19 + long ebp; 20 + long eax; 21 + int xds; 22 + int xes; 23 + int xfs; 24 + /* int xgs; */ 25 + long orig_eax; 26 + long eip; 27 + int xcs; 28 + long eflags; 29 + long esp; 30 + int xss; 31 + }; 32 + 1 33 #ifdef __KERNEL__ 2 - # ifdef CONFIG_X86_32 3 - # include "ptrace_32.h" 4 - # else 5 - # include "ptrace_64.h" 6 - # endif 7 - #else 8 - # ifdef __i386__ 9 - # include "ptrace_32.h" 10 - # else 11 - # include "ptrace_64.h" 12 - # endif 34 + 35 + #include <asm/vm86.h> 36 + #include <asm/segment.h> 37 + 38 + struct task_struct; 39 + extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); 40 + 41 + /* 42 + * user_mode_vm(regs) determines whether a register set came from user mode. 43 + * This is true if V8086 mode was enabled OR if the register set was from 44 + * protected mode with RPL-3 CS value. This tricky test checks that with 45 + * one comparison. Many places in the kernel can bypass this full check 46 + * if they have already ruled out V8086 mode, so user_mode(regs) can be used. 47 + */ 48 + static inline int user_mode(struct pt_regs *regs) 49 + { 50 + return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL; 51 + } 52 + static inline int user_mode_vm(struct pt_regs *regs) 53 + { 54 + return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL; 55 + } 56 + static inline int v8086_mode(struct pt_regs *regs) 57 + { 58 + return (regs->eflags & VM_MASK); 59 + } 60 + 61 + #define instruction_pointer(regs) ((regs)->eip) 62 + #define frame_pointer(regs) ((regs)->ebp) 63 + #define stack_pointer(regs) ((regs)->esp) 64 + #define regs_return_value(regs) ((regs)->eax) 65 + 66 + extern unsigned long profile_pc(struct pt_regs *regs); 67 + #endif /* __KERNEL__ */ 68 + 69 + #else /* __i386__ */ 70 + 71 + struct pt_regs { 72 + unsigned long r15; 73 + unsigned long r14; 74 + unsigned long r13; 75 + unsigned long r12; 76 + unsigned long rbp; 77 + unsigned long rbx; 78 + /* arguments: non interrupts/non tracing syscalls only save upto here*/ 79 + unsigned long r11; 80 + unsigned long r10; 81 + unsigned long r9; 82 + unsigned long r8; 83 + unsigned long rax; 84 + unsigned long rcx; 85 + unsigned long rdx; 86 + unsigned long rsi; 87 + unsigned long rdi; 88 + unsigned long orig_rax; 89 + /* end of arguments */ 90 + /* cpu exception frame or undefined */ 91 + unsigned long rip; 92 + unsigned long cs; 93 + unsigned long eflags; 94 + unsigned long rsp; 95 + unsigned long ss; 96 + /* top of stack page */ 97 + }; 98 + 99 + #ifdef __KERNEL__ 100 + 101 + #define user_mode(regs) (!!((regs)->cs & 3)) 102 + #define user_mode_vm(regs) user_mode(regs) 103 + #define instruction_pointer(regs) ((regs)->rip) 104 + #define frame_pointer(regs) ((regs)->rbp) 105 + #define stack_pointer(regs) ((regs)->rsp) 106 + #define regs_return_value(regs) ((regs)->rax) 107 + 108 + extern unsigned long profile_pc(struct pt_regs *regs); 109 + void signal_fault(struct pt_regs *regs, void __user *frame, char *where); 110 + 111 + struct task_struct; 112 + 113 + extern unsigned long 114 + convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs); 115 + 116 + enum { 117 + EF_CF = 0x00000001, 118 + EF_PF = 0x00000004, 119 + EF_AF = 0x00000010, 120 + EF_ZF = 0x00000040, 121 + EF_SF = 0x00000080, 122 + EF_TF = 0x00000100, 123 + EF_IE = 0x00000200, 124 + EF_DF = 0x00000400, 125 + EF_OF = 0x00000800, 126 + EF_IOPL = 0x00003000, 127 + EF_IOPL_RING0 = 0x00000000, 128 + EF_IOPL_RING1 = 0x00001000, 129 + EF_IOPL_RING2 = 0x00002000, 130 + EF_NT = 0x00004000, /* nested task */ 131 + EF_RF = 0x00010000, /* resume */ 132 + EF_VM = 0x00020000, /* virtual mode */ 133 + EF_AC = 0x00040000, /* alignment */ 134 + EF_VIF = 0x00080000, /* virtual interrupt */ 135 + EF_VIP = 0x00100000, /* virtual interrupt pending */ 136 + EF_ID = 0x00200000, /* id */ 137 + }; 138 + #endif /* __KERNEL__ */ 139 + #endif /* !__i386__ */ 140 + #endif /* !__ASSEMBLY__ */ 141 + 13 142 #endif
-65
include/asm-x86/ptrace_32.h
··· 1 - #ifndef _I386_PTRACE_H 2 - #define _I386_PTRACE_H 3 - 4 - #include <asm/ptrace-abi.h> 5 - 6 - /* this struct defines the way the registers are stored on the 7 - stack during a system call. */ 8 - 9 - struct pt_regs { 10 - long ebx; 11 - long ecx; 12 - long edx; 13 - long esi; 14 - long edi; 15 - long ebp; 16 - long eax; 17 - int xds; 18 - int xes; 19 - int xfs; 20 - /* int xgs; */ 21 - long orig_eax; 22 - long eip; 23 - int xcs; 24 - long eflags; 25 - long esp; 26 - int xss; 27 - }; 28 - 29 - #ifdef __KERNEL__ 30 - 31 - #include <asm/vm86.h> 32 - #include <asm/segment.h> 33 - 34 - struct task_struct; 35 - extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); 36 - 37 - /* 38 - * user_mode_vm(regs) determines whether a register set came from user mode. 39 - * This is true if V8086 mode was enabled OR if the register set was from 40 - * protected mode with RPL-3 CS value. This tricky test checks that with 41 - * one comparison. Many places in the kernel can bypass this full check 42 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used. 43 - */ 44 - static inline int user_mode(struct pt_regs *regs) 45 - { 46 - return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL; 47 - } 48 - static inline int user_mode_vm(struct pt_regs *regs) 49 - { 50 - return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL; 51 - } 52 - static inline int v8086_mode(struct pt_regs *regs) 53 - { 54 - return (regs->eflags & VM_MASK); 55 - } 56 - 57 - #define instruction_pointer(regs) ((regs)->eip) 58 - #define frame_pointer(regs) ((regs)->ebp) 59 - #define stack_pointer(regs) ((regs)->esp) 60 - #define regs_return_value(regs) ((regs)->eax) 61 - 62 - extern unsigned long profile_pc(struct pt_regs *regs); 63 - #endif /* __KERNEL__ */ 64 - 65 - #endif
-80
include/asm-x86/ptrace_64.h
··· 1 - #ifndef _X86_64_PTRACE_H 2 - #define _X86_64_PTRACE_H 3 - 4 - #include <linux/compiler.h> /* For __user */ 5 - #include <asm/ptrace-abi.h> 6 - 7 - #ifndef __ASSEMBLY__ 8 - 9 - struct pt_regs { 10 - unsigned long r15; 11 - unsigned long r14; 12 - unsigned long r13; 13 - unsigned long r12; 14 - unsigned long rbp; 15 - unsigned long rbx; 16 - /* arguments: non interrupts/non tracing syscalls only save upto here*/ 17 - unsigned long r11; 18 - unsigned long r10; 19 - unsigned long r9; 20 - unsigned long r8; 21 - unsigned long rax; 22 - unsigned long rcx; 23 - unsigned long rdx; 24 - unsigned long rsi; 25 - unsigned long rdi; 26 - unsigned long orig_rax; 27 - /* end of arguments */ 28 - /* cpu exception frame or undefined */ 29 - unsigned long rip; 30 - unsigned long cs; 31 - unsigned long eflags; 32 - unsigned long rsp; 33 - unsigned long ss; 34 - /* top of stack page */ 35 - }; 36 - 37 - #endif 38 - 39 - #if defined(__KERNEL__) && !defined(__ASSEMBLY__) 40 - #define user_mode(regs) (!!((regs)->cs & 3)) 41 - #define user_mode_vm(regs) user_mode(regs) 42 - #define instruction_pointer(regs) ((regs)->rip) 43 - #define frame_pointer(regs) ((regs)->rbp) 44 - #define stack_pointer(regs) ((regs)->rsp) 45 - #define regs_return_value(regs) ((regs)->rax) 46 - 47 - extern unsigned long profile_pc(struct pt_regs *regs); 48 - void signal_fault(struct pt_regs *regs, void __user *frame, char *where); 49 - 50 - struct task_struct; 51 - 52 - extern unsigned long 53 - convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs); 54 - 55 - enum { 56 - EF_CF = 0x00000001, 57 - EF_PF = 0x00000004, 58 - EF_AF = 0x00000010, 59 - EF_ZF = 0x00000040, 60 - EF_SF = 0x00000080, 61 - EF_TF = 0x00000100, 62 - EF_IE = 0x00000200, 63 - EF_DF = 0x00000400, 64 - EF_OF = 0x00000800, 65 - EF_IOPL = 0x00003000, 66 - EF_IOPL_RING0 = 0x00000000, 67 - EF_IOPL_RING1 = 0x00001000, 68 - EF_IOPL_RING2 = 0x00002000, 69 - EF_NT = 0x00004000, /* nested task */ 70 - EF_RF = 0x00010000, /* resume */ 71 - EF_VM = 0x00020000, /* virtual mode */ 72 - EF_AC = 0x00040000, /* alignment */ 73 - EF_VIF = 0x00080000, /* virtual interrupt */ 74 - EF_VIP = 0x00100000, /* virtual interrupt pending */ 75 - EF_ID = 0x00200000, /* id */ 76 - }; 77 - 78 - #endif 79 - 80 - #endif
+70 -3
include/asm-x86/required-features.h
··· 1 - #ifdef CONFIG_X86_32 2 - # include "required-features_32.h" 1 + #ifndef _ASM_REQUIRED_FEATURES_H 2 + #define _ASM_REQUIRED_FEATURES_H 1 3 + 4 + /* Define minimum CPUID feature set for kernel These bits are checked 5 + really early to actually display a visible error message before the 6 + kernel dies. Make sure to assign features to the proper mask! 7 + 8 + Some requirements that are not in CPUID yet are also in the 9 + CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too. 10 + 11 + The real information is in arch/x86/Kconfig.cpu, this just converts 12 + the CONFIGs into a bitmask */ 13 + 14 + #ifndef CONFIG_MATH_EMULATION 15 + # define NEED_FPU (1<<(X86_FEATURE_FPU & 31)) 3 16 #else 4 - # include "required-features_64.h" 17 + # define NEED_FPU 0 18 + #endif 19 + 20 + #if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) 21 + # define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) 22 + # define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31)) 23 + #else 24 + # define NEED_PAE 0 25 + # define NEED_CX8 0 26 + #endif 27 + 28 + #if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64) 29 + # define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31)) 30 + #else 31 + # define NEED_CMOV 0 32 + #endif 33 + 34 + #ifdef CONFIG_X86_USE_3DNOW 35 + # define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31)) 36 + #else 37 + # define NEED_3DNOW 0 38 + #endif 39 + 40 + #ifdef CONFIG_X86_64 41 + #define NEED_PSE (1<<(X86_FEATURE_PSE & 31)) 42 + #define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) 43 + #define NEED_PGE (1<<(X86_FEATURE_PGE & 31)) 44 + #define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) 45 + #define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) 46 + #define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) 47 + #define NEED_LM (1<<(X86_FEATURE_LM & 31)) 48 + #else 49 + #define NEED_PSE 0 50 + #define NEED_MSR 0 51 + #define NEED_PGE 0 52 + #define NEED_FXSR 0 53 + #define NEED_XMM 0 54 + #define NEED_XMM2 0 55 + #define NEED_LM 0 56 + #endif 57 + 58 + #define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\ 59 + NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\ 60 + NEED_XMM|NEED_XMM2) 61 + #define SSE_MASK (NEED_XMM|NEED_XMM2) 62 + 63 + #define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) 64 + 65 + #define REQUIRED_MASK2 0 66 + #define REQUIRED_MASK3 0 67 + #define REQUIRED_MASK4 0 68 + #define REQUIRED_MASK5 0 69 + #define REQUIRED_MASK6 0 70 + #define REQUIRED_MASK7 0 71 + 5 72 #endif
-55
include/asm-x86/required-features_32.h
··· 1 - #ifndef _ASM_REQUIRED_FEATURES_H 2 - #define _ASM_REQUIRED_FEATURES_H 1 3 - 4 - /* Define minimum CPUID feature set for kernel These bits are checked 5 - really early to actually display a visible error message before the 6 - kernel dies. Make sure to assign features to the proper mask! 7 - 8 - Some requirements that are not in CPUID yet are also in the 9 - CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too. 10 - 11 - The real information is in arch/i386/Kconfig.cpu, this just converts 12 - the CONFIGs into a bitmask */ 13 - 14 - #ifndef CONFIG_MATH_EMULATION 15 - # define NEED_FPU (1<<(X86_FEATURE_FPU & 31)) 16 - #else 17 - # define NEED_FPU 0 18 - #endif 19 - 20 - #ifdef CONFIG_X86_PAE 21 - # define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) 22 - #else 23 - # define NEED_PAE 0 24 - #endif 25 - 26 - #ifdef CONFIG_X86_CMOV 27 - # define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31)) 28 - #else 29 - # define NEED_CMOV 0 30 - #endif 31 - 32 - #ifdef CONFIG_X86_PAE 33 - # define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31)) 34 - #else 35 - # define NEED_CX8 0 36 - #endif 37 - 38 - #define REQUIRED_MASK0 (NEED_FPU|NEED_PAE|NEED_CMOV|NEED_CX8) 39 - 40 - #ifdef CONFIG_X86_USE_3DNOW 41 - # define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31)) 42 - #else 43 - # define NEED_3DNOW 0 44 - #endif 45 - 46 - #define REQUIRED_MASK1 (NEED_3DNOW) 47 - 48 - #define REQUIRED_MASK2 0 49 - #define REQUIRED_MASK3 0 50 - #define REQUIRED_MASK4 0 51 - #define REQUIRED_MASK5 0 52 - #define REQUIRED_MASK6 0 53 - #define REQUIRED_MASK7 0 54 - 55 - #endif
-46
include/asm-x86/required-features_64.h
··· 1 - #ifndef _ASM_REQUIRED_FEATURES_H 2 - #define _ASM_REQUIRED_FEATURES_H 1 3 - 4 - /* Define minimum CPUID feature set for kernel These bits are checked 5 - really early to actually display a visible error message before the 6 - kernel dies. Make sure to assign features to the proper mask! 7 - 8 - The real information is in arch/x86_64/Kconfig.cpu, this just converts 9 - the CONFIGs into a bitmask */ 10 - 11 - /* x86-64 baseline features */ 12 - #define NEED_FPU (1<<(X86_FEATURE_FPU & 31)) 13 - #define NEED_PSE (1<<(X86_FEATURE_PSE & 31)) 14 - #define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) 15 - #define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) 16 - #define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31)) 17 - #define NEED_PGE (1<<(X86_FEATURE_PGE & 31)) 18 - #define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) 19 - #define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31)) 20 - #define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) 21 - #define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) 22 - 23 - #define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\ 24 - NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\ 25 - NEED_XMM|NEED_XMM2) 26 - #define SSE_MASK (NEED_XMM|NEED_XMM2) 27 - 28 - /* x86-64 baseline features */ 29 - #define NEED_LM (1<<(X86_FEATURE_LM & 31)) 30 - 31 - #ifdef CONFIG_X86_USE_3DNOW 32 - # define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31)) 33 - #else 34 - # define NEED_3DNOW 0 35 - #endif 36 - 37 - #define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) 38 - 39 - #define REQUIRED_MASK2 0 40 - #define REQUIRED_MASK3 0 41 - #define REQUIRED_MASK4 0 42 - #define REQUIRED_MASK5 0 43 - #define REQUIRED_MASK6 0 44 - #define REQUIRED_MASK7 0 45 - 46 - #endif
+61 -11
include/asm-x86/setup.h
··· 1 + #ifndef _ASM_X86_SETUP_H 2 + #define _ASM_X86_SETUP_H 3 + 4 + #define COMMAND_LINE_SIZE 2048 5 + 1 6 #ifdef __KERNEL__ 2 - # ifdef CONFIG_X86_32 3 - # include "setup_32.h" 4 - # else 5 - # include "setup_64.h" 6 - # endif 7 - #else 8 - # ifdef __i386__ 9 - # include "setup_32.h" 10 - # else 11 - # include "setup_64.h" 12 - # endif 7 + 8 + #ifdef __i386__ 9 + 10 + #include <linux/pfn.h> 11 + /* 12 + * Reserved space for vmalloc and iomap - defined in asm/page.h 13 + */ 14 + #define MAXMEM_PFN PFN_DOWN(MAXMEM) 15 + #define MAX_NONPAE_PFN (1 << 20) 16 + 17 + #endif /* __i386__ */ 18 + 19 + #define PARAM_SIZE 4096 /* sizeof(struct boot_params) */ 20 + 21 + #define OLD_CL_MAGIC 0xA33F 22 + #define OLD_CL_ADDRESS 0x020 /* Relative to real mode data */ 23 + #define NEW_CL_POINTER 0x228 /* Relative to real mode data */ 24 + 25 + #ifndef __ASSEMBLY__ 26 + #include <asm/bootparam.h> 27 + 28 + #ifndef _SETUP 29 + 30 + /* 31 + * This is set up by the setup-routine at boot-time 32 + */ 33 + extern struct boot_params boot_params; 34 + 35 + #ifdef __i386__ 36 + /* 37 + * Do NOT EVER look at the BIOS memory size location. 38 + * It does not work on many machines. 39 + */ 40 + #define LOWMEMSIZE() (0x9f000) 41 + 42 + struct e820entry; 43 + 44 + char * __init machine_specific_memory_setup(void); 45 + char *memory_setup(void); 46 + 47 + int __init copy_e820_map(struct e820entry * biosmap, int nr_map); 48 + int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map); 49 + void __init add_memory_region(unsigned long long start, 50 + unsigned long long size, int type); 51 + 52 + extern unsigned long init_pg_tables_end; 53 + 54 + #ifndef CONFIG_PARAVIRT 55 + #define paravirt_post_allocator_init() do {} while (0) 13 56 #endif 57 + 58 + #endif /* __i386__ */ 59 + #endif /* _SETUP */ 60 + #endif /* __ASSEMBLY__ */ 61 + #endif /* __KERNEL__ */ 62 + 63 + #endif /* _ASM_X86_SETUP_H */
-63
include/asm-x86/setup_32.h
··· 1 - /* 2 - * Just a place holder. We don't want to have to test x86 before 3 - * we include stuff 4 - */ 5 - 6 - #ifndef _i386_SETUP_H 7 - #define _i386_SETUP_H 8 - 9 - #define COMMAND_LINE_SIZE 2048 10 - 11 - #ifdef __KERNEL__ 12 - #include <linux/pfn.h> 13 - 14 - /* 15 - * Reserved space for vmalloc and iomap - defined in asm/page.h 16 - */ 17 - #define MAXMEM_PFN PFN_DOWN(MAXMEM) 18 - #define MAX_NONPAE_PFN (1 << 20) 19 - 20 - #define PARAM_SIZE 4096 21 - 22 - #define OLD_CL_MAGIC_ADDR 0x90020 23 - #define OLD_CL_MAGIC 0xA33F 24 - #define OLD_CL_BASE_ADDR 0x90000 25 - #define OLD_CL_OFFSET 0x90022 26 - #define NEW_CL_POINTER 0x228 /* Relative to real mode data */ 27 - 28 - #ifndef __ASSEMBLY__ 29 - 30 - #include <asm/bootparam.h> 31 - 32 - /* 33 - * This is set up by the setup-routine at boot-time 34 - */ 35 - extern struct boot_params boot_params; 36 - 37 - /* 38 - * Do NOT EVER look at the BIOS memory size location. 39 - * It does not work on many machines. 40 - */ 41 - #define LOWMEMSIZE() (0x9f000) 42 - 43 - struct e820entry; 44 - 45 - char * __init machine_specific_memory_setup(void); 46 - char *memory_setup(void); 47 - 48 - int __init copy_e820_map(struct e820entry * biosmap, int nr_map); 49 - int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map); 50 - void __init add_memory_region(unsigned long long start, 51 - unsigned long long size, int type); 52 - 53 - extern unsigned long init_pg_tables_end; 54 - 55 - #ifndef CONFIG_PARAVIRT 56 - #define paravirt_post_allocator_init() do {} while (0) 57 - #endif 58 - 59 - #endif /* __ASSEMBLY__ */ 60 - 61 - #endif /* __KERNEL__ */ 62 - 63 - #endif /* _i386_SETUP_H */
-19
include/asm-x86/setup_64.h
··· 1 - #ifndef _x8664_SETUP_H 2 - #define _x8664_SETUP_H 3 - 4 - #define COMMAND_LINE_SIZE 2048 5 - 6 - #ifdef __KERNEL__ 7 - 8 - #ifndef __ASSEMBLY__ 9 - #include <asm/bootparam.h> 10 - 11 - /* 12 - * This is set up by the setup-routine at boot-time 13 - */ 14 - extern struct boot_params boot_params; 15 - 16 - #endif /* not __ASSEMBLY__ */ 17 - #endif /* __KERNEL__ */ 18 - 19 - #endif
+50 -12
include/asm-x86/shmbuf.h
··· 1 - #ifdef __KERNEL__ 2 - # ifdef CONFIG_X86_32 3 - # include "shmbuf_32.h" 4 - # else 5 - # include "shmbuf_64.h" 6 - # endif 7 - #else 8 - # ifdef __i386__ 9 - # include "shmbuf_32.h" 10 - # else 11 - # include "shmbuf_64.h" 12 - # endif 1 + #ifndef _ASM_X86_SHMBUF_H 2 + #define _ASM_X86_SHMBUF_H 3 + 4 + /* 5 + * The shmid64_ds structure for x86 architecture. 6 + * Note extra padding because this structure is passed back and forth 7 + * between kernel and user space. 8 + * 9 + * Pad space on 32 bit is left for: 10 + * - 64-bit time_t to solve y2038 problem 11 + * - 2 miscellaneous 32-bit values 12 + * 13 + * Pad space on 64 bit is left for: 14 + * - 2 miscellaneous 64-bit values 15 + */ 16 + 17 + struct shmid64_ds { 18 + struct ipc64_perm shm_perm; /* operation perms */ 19 + size_t shm_segsz; /* size of segment (bytes) */ 20 + __kernel_time_t shm_atime; /* last attach time */ 21 + #ifdef __i386__ 22 + unsigned long __unused1; 13 23 #endif 24 + __kernel_time_t shm_dtime; /* last detach time */ 25 + #ifdef __i386__ 26 + unsigned long __unused2; 27 + #endif 28 + __kernel_time_t shm_ctime; /* last change time */ 29 + #ifdef __i386__ 30 + unsigned long __unused3; 31 + #endif 32 + __kernel_pid_t shm_cpid; /* pid of creator */ 33 + __kernel_pid_t shm_lpid; /* pid of last operator */ 34 + unsigned long shm_nattch; /* no. of current attaches */ 35 + unsigned long __unused4; 36 + unsigned long __unused5; 37 + }; 38 + 39 + struct shminfo64 { 40 + unsigned long shmmax; 41 + unsigned long shmmin; 42 + unsigned long shmmni; 43 + unsigned long shmseg; 44 + unsigned long shmall; 45 + unsigned long __unused1; 46 + unsigned long __unused2; 47 + unsigned long __unused3; 48 + unsigned long __unused4; 49 + }; 50 + 51 + #endif /* _ASM_X86_SHMBUF_H */
-42
include/asm-x86/shmbuf_32.h
··· 1 - #ifndef _I386_SHMBUF_H 2 - #define _I386_SHMBUF_H 3 - 4 - /* 5 - * The shmid64_ds structure for i386 architecture. 6 - * Note extra padding because this structure is passed back and forth 7 - * between kernel and user space. 8 - * 9 - * Pad space is left for: 10 - * - 64-bit time_t to solve y2038 problem 11 - * - 2 miscellaneous 32-bit values 12 - */ 13 - 14 - struct shmid64_ds { 15 - struct ipc64_perm shm_perm; /* operation perms */ 16 - size_t shm_segsz; /* size of segment (bytes) */ 17 - __kernel_time_t shm_atime; /* last attach time */ 18 - unsigned long __unused1; 19 - __kernel_time_t shm_dtime; /* last detach time */ 20 - unsigned long __unused2; 21 - __kernel_time_t shm_ctime; /* last change time */ 22 - unsigned long __unused3; 23 - __kernel_pid_t shm_cpid; /* pid of creator */ 24 - __kernel_pid_t shm_lpid; /* pid of last operator */ 25 - unsigned long shm_nattch; /* no. of current attaches */ 26 - unsigned long __unused4; 27 - unsigned long __unused5; 28 - }; 29 - 30 - struct shminfo64 { 31 - unsigned long shmmax; 32 - unsigned long shmmin; 33 - unsigned long shmmni; 34 - unsigned long shmseg; 35 - unsigned long shmall; 36 - unsigned long __unused1; 37 - unsigned long __unused2; 38 - unsigned long __unused3; 39 - unsigned long __unused4; 40 - }; 41 - 42 - #endif /* _I386_SHMBUF_H */
-38
include/asm-x86/shmbuf_64.h
··· 1 - #ifndef _X8664_SHMBUF_H 2 - #define _X8664_SHMBUF_H 3 - 4 - /* 5 - * The shmid64_ds structure for x8664 architecture. 6 - * Note extra padding because this structure is passed back and forth 7 - * between kernel and user space. 8 - * 9 - * Pad space is left for: 10 - * - 2 miscellaneous 64-bit values 11 - */ 12 - 13 - struct shmid64_ds { 14 - struct ipc64_perm shm_perm; /* operation perms */ 15 - size_t shm_segsz; /* size of segment (bytes) */ 16 - __kernel_time_t shm_atime; /* last attach time */ 17 - __kernel_time_t shm_dtime; /* last detach time */ 18 - __kernel_time_t shm_ctime; /* last change time */ 19 - __kernel_pid_t shm_cpid; /* pid of creator */ 20 - __kernel_pid_t shm_lpid; /* pid of last operator */ 21 - unsigned long shm_nattch; /* no. of current attaches */ 22 - unsigned long __unused4; 23 - unsigned long __unused5; 24 - }; 25 - 26 - struct shminfo64 { 27 - unsigned long shmmax; 28 - unsigned long shmmin; 29 - unsigned long shmmni; 30 - unsigned long shmseg; 31 - unsigned long shmall; 32 - unsigned long __unused1; 33 - unsigned long __unused2; 34 - unsigned long __unused3; 35 - unsigned long __unused4; 36 - }; 37 - 38 - #endif
+137 -12
include/asm-x86/sigcontext.h
··· 1 - #ifdef __KERNEL__ 2 - # ifdef CONFIG_X86_32 3 - # include "sigcontext_32.h" 4 - # else 5 - # include "sigcontext_64.h" 6 - # endif 7 - #else 8 - # ifdef __i386__ 9 - # include "sigcontext_32.h" 10 - # else 11 - # include "sigcontext_64.h" 12 - # endif 1 + #ifndef _ASM_X86_SIGCONTEXT_H 2 + #define _ASM_X86_SIGCONTEXT_H 3 + 4 + #include <linux/compiler.h> 5 + #include <asm/types.h> 6 + 7 + #ifdef __i386__ 8 + /* 9 + * As documented in the iBCS2 standard.. 10 + * 11 + * The first part of "struct _fpstate" is just the normal i387 12 + * hardware setup, the extra "status" word is used to save the 13 + * coprocessor status word before entering the handler. 14 + * 15 + * Pentium III FXSR, SSE support 16 + * Gareth Hughes <gareth@valinux.com>, May 2000 17 + * 18 + * The FPU state data structure has had to grow to accommodate the 19 + * extended FPU state required by the Streaming SIMD Extensions. 20 + * There is no documented standard to accomplish this at the moment. 21 + */ 22 + struct _fpreg { 23 + unsigned short significand[4]; 24 + unsigned short exponent; 25 + }; 26 + 27 + struct _fpxreg { 28 + unsigned short significand[4]; 29 + unsigned short exponent; 30 + unsigned short padding[3]; 31 + }; 32 + 33 + struct _xmmreg { 34 + unsigned long element[4]; 35 + }; 36 + 37 + struct _fpstate { 38 + /* Regular FPU environment */ 39 + unsigned long cw; 40 + unsigned long sw; 41 + unsigned long tag; 42 + unsigned long ipoff; 43 + unsigned long cssel; 44 + unsigned long dataoff; 45 + unsigned long datasel; 46 + struct _fpreg _st[8]; 47 + unsigned short status; 48 + unsigned short magic; /* 0xffff = regular FPU data only */ 49 + 50 + /* FXSR FPU environment */ 51 + unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */ 52 + unsigned long mxcsr; 53 + unsigned long reserved; 54 + struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ 55 + struct _xmmreg _xmm[8]; 56 + unsigned long padding[56]; 57 + }; 58 + 59 + #define X86_FXSR_MAGIC 0x0000 60 + 61 + struct sigcontext { 62 + unsigned short gs, __gsh; 63 + unsigned short fs, __fsh; 64 + unsigned short es, __esh; 65 + unsigned short ds, __dsh; 66 + unsigned long edi; 67 + unsigned long esi; 68 + unsigned long ebp; 69 + unsigned long esp; 70 + unsigned long ebx; 71 + unsigned long edx; 72 + unsigned long ecx; 73 + unsigned long eax; 74 + unsigned long trapno; 75 + unsigned long err; 76 + unsigned long eip; 77 + unsigned short cs, __csh; 78 + unsigned long eflags; 79 + unsigned long esp_at_signal; 80 + unsigned short ss, __ssh; 81 + struct _fpstate __user * fpstate; 82 + unsigned long oldmask; 83 + unsigned long cr2; 84 + }; 85 + 86 + #else /* __i386__ */ 87 + 88 + /* FXSAVE frame */ 89 + /* Note: reserved1/2 may someday contain valuable data. Always save/restore 90 + them when you change signal frames. */ 91 + struct _fpstate { 92 + __u16 cwd; 93 + __u16 swd; 94 + __u16 twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */ 95 + __u16 fop; 96 + __u64 rip; 97 + __u64 rdp; 98 + __u32 mxcsr; 99 + __u32 mxcsr_mask; 100 + __u32 st_space[32]; /* 8*16 bytes for each FP-reg */ 101 + __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */ 102 + __u32 reserved2[24]; 103 + }; 104 + 105 + struct sigcontext { 106 + unsigned long r8; 107 + unsigned long r9; 108 + unsigned long r10; 109 + unsigned long r11; 110 + unsigned long r12; 111 + unsigned long r13; 112 + unsigned long r14; 113 + unsigned long r15; 114 + unsigned long rdi; 115 + unsigned long rsi; 116 + unsigned long rbp; 117 + unsigned long rbx; 118 + unsigned long rdx; 119 + unsigned long rax; 120 + unsigned long rcx; 121 + unsigned long rsp; 122 + unsigned long rip; 123 + unsigned long eflags; /* RFLAGS */ 124 + unsigned short cs; 125 + unsigned short gs; 126 + unsigned short fs; 127 + unsigned short __pad0; 128 + unsigned long err; 129 + unsigned long trapno; 130 + unsigned long oldmask; 131 + unsigned long cr2; 132 + struct _fpstate __user *fpstate; /* zero when no FPU context */ 133 + unsigned long reserved1[8]; 134 + }; 135 + 136 + #endif /* !__i386__ */ 137 + 13 138 #endif
-85
include/asm-x86/sigcontext_32.h
··· 1 - #ifndef _ASMi386_SIGCONTEXT_H 2 - #define _ASMi386_SIGCONTEXT_H 3 - 4 - #include <linux/compiler.h> 5 - 6 - /* 7 - * As documented in the iBCS2 standard.. 8 - * 9 - * The first part of "struct _fpstate" is just the normal i387 10 - * hardware setup, the extra "status" word is used to save the 11 - * coprocessor status word before entering the handler. 12 - * 13 - * Pentium III FXSR, SSE support 14 - * Gareth Hughes <gareth@valinux.com>, May 2000 15 - * 16 - * The FPU state data structure has had to grow to accommodate the 17 - * extended FPU state required by the Streaming SIMD Extensions. 18 - * There is no documented standard to accomplish this at the moment. 19 - */ 20 - struct _fpreg { 21 - unsigned short significand[4]; 22 - unsigned short exponent; 23 - }; 24 - 25 - struct _fpxreg { 26 - unsigned short significand[4]; 27 - unsigned short exponent; 28 - unsigned short padding[3]; 29 - }; 30 - 31 - struct _xmmreg { 32 - unsigned long element[4]; 33 - }; 34 - 35 - struct _fpstate { 36 - /* Regular FPU environment */ 37 - unsigned long cw; 38 - unsigned long sw; 39 - unsigned long tag; 40 - unsigned long ipoff; 41 - unsigned long cssel; 42 - unsigned long dataoff; 43 - unsigned long datasel; 44 - struct _fpreg _st[8]; 45 - unsigned short status; 46 - unsigned short magic; /* 0xffff = regular FPU data only */ 47 - 48 - /* FXSR FPU environment */ 49 - unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */ 50 - unsigned long mxcsr; 51 - unsigned long reserved; 52 - struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ 53 - struct _xmmreg _xmm[8]; 54 - unsigned long padding[56]; 55 - }; 56 - 57 - #define X86_FXSR_MAGIC 0x0000 58 - 59 - struct sigcontext { 60 - unsigned short gs, __gsh; 61 - unsigned short fs, __fsh; 62 - unsigned short es, __esh; 63 - unsigned short ds, __dsh; 64 - unsigned long edi; 65 - unsigned long esi; 66 - unsigned long ebp; 67 - unsigned long esp; 68 - unsigned long ebx; 69 - unsigned long edx; 70 - unsigned long ecx; 71 - unsigned long eax; 72 - unsigned long trapno; 73 - unsigned long err; 74 - unsigned long eip; 75 - unsigned short cs, __csh; 76 - unsigned long eflags; 77 - unsigned long esp_at_signal; 78 - unsigned short ss, __ssh; 79 - struct _fpstate __user * fpstate; 80 - unsigned long oldmask; 81 - unsigned long cr2; 82 - }; 83 - 84 - 85 - #endif
-55
include/asm-x86/sigcontext_64.h
··· 1 - #ifndef _ASM_X86_64_SIGCONTEXT_H 2 - #define _ASM_X86_64_SIGCONTEXT_H 3 - 4 - #include <asm/types.h> 5 - #include <linux/compiler.h> 6 - 7 - /* FXSAVE frame */ 8 - /* Note: reserved1/2 may someday contain valuable data. Always save/restore 9 - them when you change signal frames. */ 10 - struct _fpstate { 11 - __u16 cwd; 12 - __u16 swd; 13 - __u16 twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */ 14 - __u16 fop; 15 - __u64 rip; 16 - __u64 rdp; 17 - __u32 mxcsr; 18 - __u32 mxcsr_mask; 19 - __u32 st_space[32]; /* 8*16 bytes for each FP-reg */ 20 - __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */ 21 - __u32 reserved2[24]; 22 - }; 23 - 24 - struct sigcontext { 25 - unsigned long r8; 26 - unsigned long r9; 27 - unsigned long r10; 28 - unsigned long r11; 29 - unsigned long r12; 30 - unsigned long r13; 31 - unsigned long r14; 32 - unsigned long r15; 33 - unsigned long rdi; 34 - unsigned long rsi; 35 - unsigned long rbp; 36 - unsigned long rbx; 37 - unsigned long rdx; 38 - unsigned long rax; 39 - unsigned long rcx; 40 - unsigned long rsp; 41 - unsigned long rip; 42 - unsigned long eflags; /* RFLAGS */ 43 - unsigned short cs; 44 - unsigned short gs; 45 - unsigned short fs; 46 - unsigned short __pad0; 47 - unsigned long err; 48 - unsigned long trapno; 49 - unsigned long oldmask; 50 - unsigned long cr2; 51 - struct _fpstate __user *fpstate; /* zero when no FPU context */ 52 - unsigned long reserved1[8]; 53 - }; 54 - 55 - #endif
+263 -10
include/asm-x86/signal.h
··· 1 + #ifndef _ASM_X86_SIGNAL_H 2 + #define _ASM_X86_SIGNAL_H 3 + 4 + #ifndef __ASSEMBLY__ 5 + #include <linux/types.h> 6 + #include <linux/time.h> 7 + #include <linux/compiler.h> 8 + 9 + /* Avoid too many header ordering problems. */ 10 + struct siginfo; 11 + 1 12 #ifdef __KERNEL__ 2 - # ifdef CONFIG_X86_32 3 - # include "signal_32.h" 4 - # else 5 - # include "signal_64.h" 6 - # endif 13 + #include <linux/linkage.h> 14 + 15 + /* Most things should be clean enough to redefine this at will, if care 16 + is taken to make libc match. */ 17 + 18 + #define _NSIG 64 19 + 20 + #ifdef __i386__ 21 + # define _NSIG_BPW 32 7 22 #else 8 - # ifdef __i386__ 9 - # include "signal_32.h" 10 - # else 11 - # include "signal_64.h" 12 - # endif 23 + # define _NSIG_BPW 64 24 + #endif 25 + 26 + #define _NSIG_WORDS (_NSIG / _NSIG_BPW) 27 + 28 + typedef unsigned long old_sigset_t; /* at least 32 bits */ 29 + 30 + typedef struct { 31 + unsigned long sig[_NSIG_WORDS]; 32 + } sigset_t; 33 + 34 + #else 35 + /* Here we must cater to libcs that poke about in kernel headers. */ 36 + 37 + #define NSIG 32 38 + typedef unsigned long sigset_t; 39 + 40 + #endif /* __KERNEL__ */ 41 + #endif /* __ASSEMBLY__ */ 42 + 43 + #define SIGHUP 1 44 + #define SIGINT 2 45 + #define SIGQUIT 3 46 + #define SIGILL 4 47 + #define SIGTRAP 5 48 + #define SIGABRT 6 49 + #define SIGIOT 6 50 + #define SIGBUS 7 51 + #define SIGFPE 8 52 + #define SIGKILL 9 53 + #define SIGUSR1 10 54 + #define SIGSEGV 11 55 + #define SIGUSR2 12 56 + #define SIGPIPE 13 57 + #define SIGALRM 14 58 + #define SIGTERM 15 59 + #define SIGSTKFLT 16 60 + #define SIGCHLD 17 61 + #define SIGCONT 18 62 + #define SIGSTOP 19 63 + #define SIGTSTP 20 64 + #define SIGTTIN 21 65 + #define SIGTTOU 22 66 + #define SIGURG 23 67 + #define SIGXCPU 24 68 + #define SIGXFSZ 25 69 + #define SIGVTALRM 26 70 + #define SIGPROF 27 71 + #define SIGWINCH 28 72 + #define SIGIO 29 73 + #define SIGPOLL SIGIO 74 + /* 75 + #define SIGLOST 29 76 + */ 77 + #define SIGPWR 30 78 + #define SIGSYS 31 79 + #define SIGUNUSED 31 80 + 81 + /* These should not be considered constants from userland. */ 82 + #define SIGRTMIN 32 83 + #define SIGRTMAX _NSIG 84 + 85 + /* 86 + * SA_FLAGS values: 87 + * 88 + * SA_ONSTACK indicates that a registered stack_t will be used. 89 + * SA_RESTART flag to get restarting signals (which were the default long ago) 90 + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. 91 + * SA_RESETHAND clears the handler when the signal is delivered. 92 + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. 93 + * SA_NODEFER prevents the current signal from being masked in the handler. 94 + * 95 + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single 96 + * Unix names RESETHAND and NODEFER respectively. 97 + */ 98 + #define SA_NOCLDSTOP 0x00000001u 99 + #define SA_NOCLDWAIT 0x00000002u 100 + #define SA_SIGINFO 0x00000004u 101 + #define SA_ONSTACK 0x08000000u 102 + #define SA_RESTART 0x10000000u 103 + #define SA_NODEFER 0x40000000u 104 + #define SA_RESETHAND 0x80000000u 105 + 106 + #define SA_NOMASK SA_NODEFER 107 + #define SA_ONESHOT SA_RESETHAND 108 + 109 + #define SA_RESTORER 0x04000000 110 + 111 + /* 112 + * sigaltstack controls 113 + */ 114 + #define SS_ONSTACK 1 115 + #define SS_DISABLE 2 116 + 117 + #define MINSIGSTKSZ 2048 118 + #define SIGSTKSZ 8192 119 + 120 + #include <asm-generic/signal.h> 121 + 122 + #ifndef __ASSEMBLY__ 123 + 124 + #ifdef __i386__ 125 + # ifdef __KERNEL__ 126 + struct old_sigaction { 127 + __sighandler_t sa_handler; 128 + old_sigset_t sa_mask; 129 + unsigned long sa_flags; 130 + __sigrestore_t sa_restorer; 131 + }; 132 + 133 + struct sigaction { 134 + __sighandler_t sa_handler; 135 + unsigned long sa_flags; 136 + __sigrestore_t sa_restorer; 137 + sigset_t sa_mask; /* mask last for extensibility */ 138 + }; 139 + 140 + struct k_sigaction { 141 + struct sigaction sa; 142 + }; 143 + # else /* __KERNEL__ */ 144 + /* Here we must cater to libcs that poke about in kernel headers. */ 145 + 146 + struct sigaction { 147 + union { 148 + __sighandler_t _sa_handler; 149 + void (*_sa_sigaction)(int, struct siginfo *, void *); 150 + } _u; 151 + sigset_t sa_mask; 152 + unsigned long sa_flags; 153 + void (*sa_restorer)(void); 154 + }; 155 + 156 + #define sa_handler _u._sa_handler 157 + #define sa_sigaction _u._sa_sigaction 158 + 159 + # endif /* ! __KERNEL__ */ 160 + #else /* __i386__ */ 161 + 162 + struct sigaction { 163 + __sighandler_t sa_handler; 164 + unsigned long sa_flags; 165 + __sigrestore_t sa_restorer; 166 + sigset_t sa_mask; /* mask last for extensibility */ 167 + }; 168 + 169 + struct k_sigaction { 170 + struct sigaction sa; 171 + }; 172 + 173 + #endif /* !__i386__ */ 174 + 175 + typedef struct sigaltstack { 176 + void __user *ss_sp; 177 + int ss_flags; 178 + size_t ss_size; 179 + } stack_t; 180 + 181 + #ifdef __KERNEL__ 182 + #include <asm/sigcontext.h> 183 + 184 + #ifdef __386__ 185 + 186 + #define __HAVE_ARCH_SIG_BITOPS 187 + 188 + #define sigaddset(set,sig) \ 189 + (__builtin_constantp(sig) ? \ 190 + __const_sigaddset((set),(sig)) : \ 191 + __gen_sigaddset((set),(sig))) 192 + 193 + static __inline__ void __gen_sigaddset(sigset_t *set, int _sig) 194 + { 195 + __asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); 196 + } 197 + 198 + static __inline__ void __const_sigaddset(sigset_t *set, int _sig) 199 + { 200 + unsigned long sig = _sig - 1; 201 + set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW); 202 + } 203 + 204 + #define sigdelset(set,sig) \ 205 + (__builtin_constant_p(sig) ? \ 206 + __const_sigdelset((set),(sig)) : \ 207 + __gen_sigdelset((set),(sig))) 208 + 209 + 210 + static __inline__ void __gen_sigdelset(sigset_t *set, int _sig) 211 + { 212 + __asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); 213 + } 214 + 215 + static __inline__ void __const_sigdelset(sigset_t *set, int _sig) 216 + { 217 + unsigned long sig = _sig - 1; 218 + set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW)); 219 + } 220 + 221 + static __inline__ int __const_sigismember(sigset_t *set, int _sig) 222 + { 223 + unsigned long sig = _sig - 1; 224 + return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); 225 + } 226 + 227 + static __inline__ int __gen_sigismember(sigset_t *set, int _sig) 228 + { 229 + int ret; 230 + __asm__("btl %2,%1\n\tsbbl %0,%0" 231 + : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); 232 + return ret; 233 + } 234 + 235 + #define sigismember(set,sig) \ 236 + (__builtin_constant_p(sig) ? \ 237 + __const_sigismember((set),(sig)) : \ 238 + __gen_sigismember((set),(sig))) 239 + 240 + static __inline__ int sigfindinword(unsigned long word) 241 + { 242 + __asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc"); 243 + return word; 244 + } 245 + 246 + struct pt_regs; 247 + 248 + #define ptrace_signal_deliver(regs, cookie) \ 249 + do { \ 250 + if (current->ptrace & PT_DTRACE) { \ 251 + current->ptrace &= ~PT_DTRACE; \ 252 + (regs)->eflags &= ~TF_MASK; \ 253 + } \ 254 + } while (0) 255 + 256 + #else /* __i386__ */ 257 + 258 + #undef __HAVE_ARCH_SIG_BITOPS 259 + 260 + #define ptrace_signal_deliver(regs, cookie) do { } while (0) 261 + 262 + #endif /* !__i386__ */ 263 + #endif /* __KERNEL__ */ 264 + #endif /* __ASSEMBLY__ */ 265 + 13 266 #endif
-232
include/asm-x86/signal_32.h
··· 1 - #ifndef _ASMi386_SIGNAL_H 2 - #define _ASMi386_SIGNAL_H 3 - 4 - #include <linux/types.h> 5 - #include <linux/time.h> 6 - #include <linux/compiler.h> 7 - 8 - /* Avoid too many header ordering problems. */ 9 - struct siginfo; 10 - 11 - #ifdef __KERNEL__ 12 - 13 - #include <linux/linkage.h> 14 - 15 - /* Most things should be clean enough to redefine this at will, if care 16 - is taken to make libc match. */ 17 - 18 - #define _NSIG 64 19 - #define _NSIG_BPW 32 20 - #define _NSIG_WORDS (_NSIG / _NSIG_BPW) 21 - 22 - typedef unsigned long old_sigset_t; /* at least 32 bits */ 23 - 24 - typedef struct { 25 - unsigned long sig[_NSIG_WORDS]; 26 - } sigset_t; 27 - 28 - #else 29 - /* Here we must cater to libcs that poke about in kernel headers. */ 30 - 31 - #define NSIG 32 32 - typedef unsigned long sigset_t; 33 - 34 - #endif /* __KERNEL__ */ 35 - 36 - #define SIGHUP 1 37 - #define SIGINT 2 38 - #define SIGQUIT 3 39 - #define SIGILL 4 40 - #define SIGTRAP 5 41 - #define SIGABRT 6 42 - #define SIGIOT 6 43 - #define SIGBUS 7 44 - #define SIGFPE 8 45 - #define SIGKILL 9 46 - #define SIGUSR1 10 47 - #define SIGSEGV 11 48 - #define SIGUSR2 12 49 - #define SIGPIPE 13 50 - #define SIGALRM 14 51 - #define SIGTERM 15 52 - #define SIGSTKFLT 16 53 - #define SIGCHLD 17 54 - #define SIGCONT 18 55 - #define SIGSTOP 19 56 - #define SIGTSTP 20 57 - #define SIGTTIN 21 58 - #define SIGTTOU 22 59 - #define SIGURG 23 60 - #define SIGXCPU 24 61 - #define SIGXFSZ 25 62 - #define SIGVTALRM 26 63 - #define SIGPROF 27 64 - #define SIGWINCH 28 65 - #define SIGIO 29 66 - #define SIGPOLL SIGIO 67 - /* 68 - #define SIGLOST 29 69 - */ 70 - #define SIGPWR 30 71 - #define SIGSYS 31 72 - #define SIGUNUSED 31 73 - 74 - /* These should not be considered constants from userland. */ 75 - #define SIGRTMIN 32 76 - #define SIGRTMAX _NSIG 77 - 78 - /* 79 - * SA_FLAGS values: 80 - * 81 - * SA_ONSTACK indicates that a registered stack_t will be used. 82 - * SA_RESTART flag to get restarting signals (which were the default long ago) 83 - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. 84 - * SA_RESETHAND clears the handler when the signal is delivered. 85 - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. 86 - * SA_NODEFER prevents the current signal from being masked in the handler. 87 - * 88 - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single 89 - * Unix names RESETHAND and NODEFER respectively. 90 - */ 91 - #define SA_NOCLDSTOP 0x00000001u 92 - #define SA_NOCLDWAIT 0x00000002u 93 - #define SA_SIGINFO 0x00000004u 94 - #define SA_ONSTACK 0x08000000u 95 - #define SA_RESTART 0x10000000u 96 - #define SA_NODEFER 0x40000000u 97 - #define SA_RESETHAND 0x80000000u 98 - 99 - #define SA_NOMASK SA_NODEFER 100 - #define SA_ONESHOT SA_RESETHAND 101 - 102 - #define SA_RESTORER 0x04000000 103 - 104 - /* 105 - * sigaltstack controls 106 - */ 107 - #define SS_ONSTACK 1 108 - #define SS_DISABLE 2 109 - 110 - #define MINSIGSTKSZ 2048 111 - #define SIGSTKSZ 8192 112 - 113 - #include <asm-generic/signal.h> 114 - 115 - #ifdef __KERNEL__ 116 - struct old_sigaction { 117 - __sighandler_t sa_handler; 118 - old_sigset_t sa_mask; 119 - unsigned long sa_flags; 120 - __sigrestore_t sa_restorer; 121 - }; 122 - 123 - struct sigaction { 124 - __sighandler_t sa_handler; 125 - unsigned long sa_flags; 126 - __sigrestore_t sa_restorer; 127 - sigset_t sa_mask; /* mask last for extensibility */ 128 - }; 129 - 130 - struct k_sigaction { 131 - struct sigaction sa; 132 - }; 133 - #else 134 - /* Here we must cater to libcs that poke about in kernel headers. */ 135 - 136 - struct sigaction { 137 - union { 138 - __sighandler_t _sa_handler; 139 - void (*_sa_sigaction)(int, struct siginfo *, void *); 140 - } _u; 141 - sigset_t sa_mask; 142 - unsigned long sa_flags; 143 - void (*sa_restorer)(void); 144 - }; 145 - 146 - #define sa_handler _u._sa_handler 147 - #define sa_sigaction _u._sa_sigaction 148 - 149 - #endif /* __KERNEL__ */ 150 - 151 - typedef struct sigaltstack { 152 - void __user *ss_sp; 153 - int ss_flags; 154 - size_t ss_size; 155 - } stack_t; 156 - 157 - #ifdef __KERNEL__ 158 - #include <asm/sigcontext.h> 159 - 160 - #define __HAVE_ARCH_SIG_BITOPS 161 - 162 - #define sigaddset(set,sig) \ 163 - (__builtin_constant_p(sig) ? \ 164 - __const_sigaddset((set),(sig)) : \ 165 - __gen_sigaddset((set),(sig))) 166 - 167 - static __inline__ void __gen_sigaddset(sigset_t *set, int _sig) 168 - { 169 - __asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); 170 - } 171 - 172 - static __inline__ void __const_sigaddset(sigset_t *set, int _sig) 173 - { 174 - unsigned long sig = _sig - 1; 175 - set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW); 176 - } 177 - 178 - #define sigdelset(set,sig) \ 179 - (__builtin_constant_p(sig) ? \ 180 - __const_sigdelset((set),(sig)) : \ 181 - __gen_sigdelset((set),(sig))) 182 - 183 - 184 - static __inline__ void __gen_sigdelset(sigset_t *set, int _sig) 185 - { 186 - __asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); 187 - } 188 - 189 - static __inline__ void __const_sigdelset(sigset_t *set, int _sig) 190 - { 191 - unsigned long sig = _sig - 1; 192 - set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW)); 193 - } 194 - 195 - static __inline__ int __const_sigismember(sigset_t *set, int _sig) 196 - { 197 - unsigned long sig = _sig - 1; 198 - return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); 199 - } 200 - 201 - static __inline__ int __gen_sigismember(sigset_t *set, int _sig) 202 - { 203 - int ret; 204 - __asm__("btl %2,%1\n\tsbbl %0,%0" 205 - : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); 206 - return ret; 207 - } 208 - 209 - #define sigismember(set,sig) \ 210 - (__builtin_constant_p(sig) ? \ 211 - __const_sigismember((set),(sig)) : \ 212 - __gen_sigismember((set),(sig))) 213 - 214 - static __inline__ int sigfindinword(unsigned long word) 215 - { 216 - __asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc"); 217 - return word; 218 - } 219 - 220 - struct pt_regs; 221 - 222 - #define ptrace_signal_deliver(regs, cookie) \ 223 - do { \ 224 - if (current->ptrace & PT_DTRACE) { \ 225 - current->ptrace &= ~PT_DTRACE; \ 226 - (regs)->eflags &= ~TF_MASK; \ 227 - } \ 228 - } while (0) 229 - 230 - #endif /* __KERNEL__ */ 231 - 232 - #endif
-181
include/asm-x86/signal_64.h
··· 1 - #ifndef _ASMx8664_SIGNAL_H 2 - #define _ASMx8664_SIGNAL_H 3 - 4 - #ifndef __ASSEMBLY__ 5 - #include <linux/types.h> 6 - #include <linux/time.h> 7 - 8 - /* Avoid too many header ordering problems. */ 9 - struct siginfo; 10 - 11 - #ifdef __KERNEL__ 12 - #include <linux/linkage.h> 13 - /* Most things should be clean enough to redefine this at will, if care 14 - is taken to make libc match. */ 15 - 16 - #define _NSIG 64 17 - #define _NSIG_BPW 64 18 - #define _NSIG_WORDS (_NSIG / _NSIG_BPW) 19 - 20 - typedef unsigned long old_sigset_t; /* at least 32 bits */ 21 - 22 - typedef struct { 23 - unsigned long sig[_NSIG_WORDS]; 24 - } sigset_t; 25 - 26 - 27 - #else 28 - /* Here we must cater to libcs that poke about in kernel headers. */ 29 - 30 - #define NSIG 32 31 - typedef unsigned long sigset_t; 32 - 33 - #endif /* __KERNEL__ */ 34 - #endif 35 - 36 - #define SIGHUP 1 37 - #define SIGINT 2 38 - #define SIGQUIT 3 39 - #define SIGILL 4 40 - #define SIGTRAP 5 41 - #define SIGABRT 6 42 - #define SIGIOT 6 43 - #define SIGBUS 7 44 - #define SIGFPE 8 45 - #define SIGKILL 9 46 - #define SIGUSR1 10 47 - #define SIGSEGV 11 48 - #define SIGUSR2 12 49 - #define SIGPIPE 13 50 - #define SIGALRM 14 51 - #define SIGTERM 15 52 - #define SIGSTKFLT 16 53 - #define SIGCHLD 17 54 - #define SIGCONT 18 55 - #define SIGSTOP 19 56 - #define SIGTSTP 20 57 - #define SIGTTIN 21 58 - #define SIGTTOU 22 59 - #define SIGURG 23 60 - #define SIGXCPU 24 61 - #define SIGXFSZ 25 62 - #define SIGVTALRM 26 63 - #define SIGPROF 27 64 - #define SIGWINCH 28 65 - #define SIGIO 29 66 - #define SIGPOLL SIGIO 67 - /* 68 - #define SIGLOST 29 69 - */ 70 - #define SIGPWR 30 71 - #define SIGSYS 31 72 - #define SIGUNUSED 31 73 - 74 - /* These should not be considered constants from userland. */ 75 - #define SIGRTMIN 32 76 - #define SIGRTMAX _NSIG 77 - 78 - /* 79 - * SA_FLAGS values: 80 - * 81 - * SA_ONSTACK indicates that a registered stack_t will be used. 82 - * SA_RESTART flag to get restarting signals (which were the default long ago) 83 - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. 84 - * SA_RESETHAND clears the handler when the signal is delivered. 85 - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. 86 - * SA_NODEFER prevents the current signal from being masked in the handler. 87 - * 88 - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single 89 - * Unix names RESETHAND and NODEFER respectively. 90 - */ 91 - #define SA_NOCLDSTOP 0x00000001 92 - #define SA_NOCLDWAIT 0x00000002 93 - #define SA_SIGINFO 0x00000004 94 - #define SA_ONSTACK 0x08000000 95 - #define SA_RESTART 0x10000000 96 - #define SA_NODEFER 0x40000000 97 - #define SA_RESETHAND 0x80000000 98 - 99 - #define SA_NOMASK SA_NODEFER 100 - #define SA_ONESHOT SA_RESETHAND 101 - 102 - #define SA_RESTORER 0x04000000 103 - 104 - /* 105 - * sigaltstack controls 106 - */ 107 - #define SS_ONSTACK 1 108 - #define SS_DISABLE 2 109 - 110 - #define MINSIGSTKSZ 2048 111 - #define SIGSTKSZ 8192 112 - 113 - #include <asm-generic/signal.h> 114 - 115 - #ifndef __ASSEMBLY__ 116 - 117 - struct sigaction { 118 - __sighandler_t sa_handler; 119 - unsigned long sa_flags; 120 - __sigrestore_t sa_restorer; 121 - sigset_t sa_mask; /* mask last for extensibility */ 122 - }; 123 - 124 - struct k_sigaction { 125 - struct sigaction sa; 126 - }; 127 - 128 - typedef struct sigaltstack { 129 - void __user *ss_sp; 130 - int ss_flags; 131 - size_t ss_size; 132 - } stack_t; 133 - 134 - #ifdef __KERNEL__ 135 - #include <asm/sigcontext.h> 136 - 137 - #undef __HAVE_ARCH_SIG_BITOPS 138 - #if 0 139 - 140 - static inline void sigaddset(sigset_t *set, int _sig) 141 - { 142 - __asm__("btsq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); 143 - } 144 - 145 - static inline void sigdelset(sigset_t *set, int _sig) 146 - { 147 - __asm__("btrq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); 148 - } 149 - 150 - static inline int __const_sigismember(sigset_t *set, int _sig) 151 - { 152 - unsigned long sig = _sig - 1; 153 - return 1 & (set->sig[sig / _NSIG_BPW] >> (sig & ~(_NSIG_BPW-1))); 154 - } 155 - 156 - static inline int __gen_sigismember(sigset_t *set, int _sig) 157 - { 158 - int ret; 159 - __asm__("btq %2,%1\n\tsbbq %0,%0" 160 - : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); 161 - return ret; 162 - } 163 - 164 - #define sigismember(set,sig) \ 165 - (__builtin_constant_p(sig) ? \ 166 - __const_sigismember((set),(sig)) : \ 167 - __gen_sigismember((set),(sig))) 168 - 169 - static inline int sigfindinword(unsigned long word) 170 - { 171 - __asm__("bsfq %1,%0" : "=r"(word) : "rm"(word) : "cc"); 172 - return word; 173 - } 174 - #endif 175 - #endif 176 - 177 - #define ptrace_signal_deliver(regs, cookie) do { } while (0) 178 - 179 - #endif /* __KERNEL__ */ 180 - 181 - #endif
+2
include/asm-x86/smp_64.h
··· 76 76 77 77 #endif /* CONFIG_SMP */ 78 78 79 + #define safe_smp_processor_id() smp_processor_id() 80 + 79 81 static inline int hard_smp_processor_id(void) 80 82 { 81 83 /* we don't want to mark this access volatile - bad code generation */
+112 -11
include/asm-x86/stat.h
··· 1 - #ifdef __KERNEL__ 2 - # ifdef CONFIG_X86_32 3 - # include "stat_32.h" 4 - # else 5 - # include "stat_64.h" 6 - # endif 1 + #ifndef _ASM_X86_STAT_H 2 + #define _ASM_X86_STAT_H 3 + 4 + #define STAT_HAVE_NSEC 1 5 + 6 + #ifdef __i386__ 7 + struct stat { 8 + unsigned long st_dev; 9 + unsigned long st_ino; 10 + unsigned short st_mode; 11 + unsigned short st_nlink; 12 + unsigned short st_uid; 13 + unsigned short st_gid; 14 + unsigned long st_rdev; 15 + unsigned long st_size; 16 + unsigned long st_blksize; 17 + unsigned long st_blocks; 18 + unsigned long st_atime; 19 + unsigned long st_atime_nsec; 20 + unsigned long st_mtime; 21 + unsigned long st_mtime_nsec; 22 + unsigned long st_ctime; 23 + unsigned long st_ctime_nsec; 24 + unsigned long __unused4; 25 + unsigned long __unused5; 26 + }; 27 + 28 + #define STAT64_HAS_BROKEN_ST_INO 1 29 + 30 + /* This matches struct stat64 in glibc2.1, hence the absolutely 31 + * insane amounts of padding around dev_t's. 32 + */ 33 + struct stat64 { 34 + unsigned long long st_dev; 35 + unsigned char __pad0[4]; 36 + 37 + unsigned long __st_ino; 38 + 39 + unsigned int st_mode; 40 + unsigned int st_nlink; 41 + 42 + unsigned long st_uid; 43 + unsigned long st_gid; 44 + 45 + unsigned long long st_rdev; 46 + unsigned char __pad3[4]; 47 + 48 + long long st_size; 49 + unsigned long st_blksize; 50 + 51 + /* Number 512-byte blocks allocated. */ 52 + unsigned long long st_blocks; 53 + 54 + unsigned long st_atime; 55 + unsigned long st_atime_nsec; 56 + 57 + unsigned long st_mtime; 58 + unsigned int st_mtime_nsec; 59 + 60 + unsigned long st_ctime; 61 + unsigned long st_ctime_nsec; 62 + 63 + unsigned long long st_ino; 64 + }; 65 + 66 + #else /* __i386__ */ 67 + 68 + struct stat { 69 + unsigned long st_dev; 70 + unsigned long st_ino; 71 + unsigned long st_nlink; 72 + 73 + unsigned int st_mode; 74 + unsigned int st_uid; 75 + unsigned int st_gid; 76 + unsigned int __pad0; 77 + unsigned long st_rdev; 78 + long st_size; 79 + long st_blksize; 80 + long st_blocks; /* Number 512-byte blocks allocated. */ 81 + 82 + unsigned long st_atime; 83 + unsigned long st_atime_nsec; 84 + unsigned long st_mtime; 85 + unsigned long st_mtime_nsec; 86 + unsigned long st_ctime; 87 + unsigned long st_ctime_nsec; 88 + long __unused[3]; 89 + }; 90 + #endif 91 + 92 + /* for 32bit emulation and 32 bit kernels */ 93 + struct __old_kernel_stat { 94 + unsigned short st_dev; 95 + unsigned short st_ino; 96 + unsigned short st_mode; 97 + unsigned short st_nlink; 98 + unsigned short st_uid; 99 + unsigned short st_gid; 100 + unsigned short st_rdev; 101 + #ifdef __i386__ 102 + unsigned long st_size; 103 + unsigned long st_atime; 104 + unsigned long st_mtime; 105 + unsigned long st_ctime; 7 106 #else 8 - # ifdef __i386__ 9 - # include "stat_32.h" 10 - # else 11 - # include "stat_64.h" 12 - # endif 107 + unsigned int st_size; 108 + unsigned int st_atime; 109 + unsigned int st_mtime; 110 + unsigned int st_ctime; 111 + #endif 112 + }; 113 + 13 114 #endif
-77
include/asm-x86/stat_32.h
··· 1 - #ifndef _I386_STAT_H 2 - #define _I386_STAT_H 3 - 4 - struct __old_kernel_stat { 5 - unsigned short st_dev; 6 - unsigned short st_ino; 7 - unsigned short st_mode; 8 - unsigned short st_nlink; 9 - unsigned short st_uid; 10 - unsigned short st_gid; 11 - unsigned short st_rdev; 12 - unsigned long st_size; 13 - unsigned long st_atime; 14 - unsigned long st_mtime; 15 - unsigned long st_ctime; 16 - }; 17 - 18 - struct stat { 19 - unsigned long st_dev; 20 - unsigned long st_ino; 21 - unsigned short st_mode; 22 - unsigned short st_nlink; 23 - unsigned short st_uid; 24 - unsigned short st_gid; 25 - unsigned long st_rdev; 26 - unsigned long st_size; 27 - unsigned long st_blksize; 28 - unsigned long st_blocks; 29 - unsigned long st_atime; 30 - unsigned long st_atime_nsec; 31 - unsigned long st_mtime; 32 - unsigned long st_mtime_nsec; 33 - unsigned long st_ctime; 34 - unsigned long st_ctime_nsec; 35 - unsigned long __unused4; 36 - unsigned long __unused5; 37 - }; 38 - 39 - /* This matches struct stat64 in glibc2.1, hence the absolutely 40 - * insane amounts of padding around dev_t's. 41 - */ 42 - struct stat64 { 43 - unsigned long long st_dev; 44 - unsigned char __pad0[4]; 45 - 46 - #define STAT64_HAS_BROKEN_ST_INO 1 47 - unsigned long __st_ino; 48 - 49 - unsigned int st_mode; 50 - unsigned int st_nlink; 51 - 52 - unsigned long st_uid; 53 - unsigned long st_gid; 54 - 55 - unsigned long long st_rdev; 56 - unsigned char __pad3[4]; 57 - 58 - long long st_size; 59 - unsigned long st_blksize; 60 - 61 - unsigned long long st_blocks; /* Number 512-byte blocks allocated. */ 62 - 63 - unsigned long st_atime; 64 - unsigned long st_atime_nsec; 65 - 66 - unsigned long st_mtime; 67 - unsigned int st_mtime_nsec; 68 - 69 - unsigned long st_ctime; 70 - unsigned long st_ctime_nsec; 71 - 72 - unsigned long long st_ino; 73 - }; 74 - 75 - #define STAT_HAVE_NSEC 1 76 - 77 - #endif
-44
include/asm-x86/stat_64.h
··· 1 - #ifndef _ASM_X86_64_STAT_H 2 - #define _ASM_X86_64_STAT_H 3 - 4 - #define STAT_HAVE_NSEC 1 5 - 6 - struct stat { 7 - unsigned long st_dev; 8 - unsigned long st_ino; 9 - unsigned long st_nlink; 10 - 11 - unsigned int st_mode; 12 - unsigned int st_uid; 13 - unsigned int st_gid; 14 - unsigned int __pad0; 15 - unsigned long st_rdev; 16 - long st_size; 17 - long st_blksize; 18 - long st_blocks; /* Number 512-byte blocks allocated. */ 19 - 20 - unsigned long st_atime; 21 - unsigned long st_atime_nsec; 22 - unsigned long st_mtime; 23 - unsigned long st_mtime_nsec; 24 - unsigned long st_ctime; 25 - unsigned long st_ctime_nsec; 26 - long __unused[3]; 27 - }; 28 - 29 - /* For 32bit emulation */ 30 - struct __old_kernel_stat { 31 - unsigned short st_dev; 32 - unsigned short st_ino; 33 - unsigned short st_mode; 34 - unsigned short st_nlink; 35 - unsigned short st_uid; 36 - unsigned short st_gid; 37 - unsigned short st_rdev; 38 - unsigned int st_size; 39 - unsigned int st_atime; 40 - unsigned int st_mtime; 41 - unsigned int st_ctime; 42 - }; 43 - 44 - #endif
+61 -11
include/asm-x86/statfs.h
··· 1 - #ifdef __KERNEL__ 2 - # ifdef CONFIG_X86_32 3 - # include "statfs_32.h" 4 - # else 5 - # include "statfs_64.h" 6 - # endif 1 + #ifndef _ASM_X86_STATFS_H 2 + #define _ASM_X86_STATFS_H 3 + 4 + #ifdef __i386__ 5 + #include <asm-generic/statfs.h> 7 6 #else 8 - # ifdef __i386__ 9 - # include "statfs_32.h" 10 - # else 11 - # include "statfs_64.h" 12 - # endif 7 + 8 + #ifndef __KERNEL_STRICT_NAMES 9 + 10 + #include <linux/types.h> 11 + 12 + typedef __kernel_fsid_t fsid_t; 13 + 14 + #endif 15 + 16 + /* 17 + * This is ugly -- we're already 64-bit clean, so just duplicate the 18 + * definitions. 19 + */ 20 + struct statfs { 21 + long f_type; 22 + long f_bsize; 23 + long f_blocks; 24 + long f_bfree; 25 + long f_bavail; 26 + long f_files; 27 + long f_ffree; 28 + __kernel_fsid_t f_fsid; 29 + long f_namelen; 30 + long f_frsize; 31 + long f_spare[5]; 32 + }; 33 + 34 + struct statfs64 { 35 + long f_type; 36 + long f_bsize; 37 + long f_blocks; 38 + long f_bfree; 39 + long f_bavail; 40 + long f_files; 41 + long f_ffree; 42 + __kernel_fsid_t f_fsid; 43 + long f_namelen; 44 + long f_frsize; 45 + long f_spare[5]; 46 + }; 47 + 48 + struct compat_statfs64 { 49 + __u32 f_type; 50 + __u32 f_bsize; 51 + __u64 f_blocks; 52 + __u64 f_bfree; 53 + __u64 f_bavail; 54 + __u64 f_files; 55 + __u64 f_ffree; 56 + __kernel_fsid_t f_fsid; 57 + __u32 f_namelen; 58 + __u32 f_frsize; 59 + __u32 f_spare[5]; 60 + } __attribute__((packed)); 61 + 62 + #endif /* !__i386__ */ 13 63 #endif
-6
include/asm-x86/statfs_32.h
··· 1 - #ifndef _I386_STATFS_H 2 - #define _I386_STATFS_H 3 - 4 - #include <asm-generic/statfs.h> 5 - 6 - #endif
-58
include/asm-x86/statfs_64.h
··· 1 - #ifndef _X86_64_STATFS_H 2 - #define _X86_64_STATFS_H 3 - 4 - #ifndef __KERNEL_STRICT_NAMES 5 - 6 - #include <linux/types.h> 7 - 8 - typedef __kernel_fsid_t fsid_t; 9 - 10 - #endif 11 - 12 - /* 13 - * This is ugly -- we're already 64-bit clean, so just duplicate the 14 - * definitions. 15 - */ 16 - struct statfs { 17 - long f_type; 18 - long f_bsize; 19 - long f_blocks; 20 - long f_bfree; 21 - long f_bavail; 22 - long f_files; 23 - long f_ffree; 24 - __kernel_fsid_t f_fsid; 25 - long f_namelen; 26 - long f_frsize; 27 - long f_spare[5]; 28 - }; 29 - 30 - struct statfs64 { 31 - long f_type; 32 - long f_bsize; 33 - long f_blocks; 34 - long f_bfree; 35 - long f_bavail; 36 - long f_files; 37 - long f_ffree; 38 - __kernel_fsid_t f_fsid; 39 - long f_namelen; 40 - long f_frsize; 41 - long f_spare[5]; 42 - }; 43 - 44 - struct compat_statfs64 { 45 - __u32 f_type; 46 - __u32 f_bsize; 47 - __u64 f_blocks; 48 - __u64 f_bfree; 49 - __u64 f_bavail; 50 - __u64 f_files; 51 - __u64 f_ffree; 52 - __kernel_fsid_t f_fsid; 53 - __u32 f_namelen; 54 - __u32 f_frsize; 55 - __u32 f_spare[5]; 56 - } __attribute__((packed)); 57 - 58 - #endif
+7 -16
include/asm-x86/suspend_64.h
··· 3 3 * Based on code 4 4 * Copyright 2001 Patrick Mochel <mochel@osdl.org> 5 5 */ 6 + #ifndef __ASM_X86_64_SUSPEND_H 7 + #define __ASM_X86_64_SUSPEND_H 8 + 6 9 #include <asm/desc.h> 7 10 #include <asm/i387.h> 8 11 ··· 15 12 return 0; 16 13 } 17 14 18 - /* Image of the saved processor state. If you touch this, fix acpi_wakeup.S. */ 15 + /* Image of the saved processor state. If you touch this, fix acpi/wakeup.S. */ 19 16 struct saved_context { 17 + struct pt_regs regs; 20 18 u16 ds, es, fs, gs, ss; 21 19 unsigned long gs_base, gs_kernel_base, fs_base; 22 20 unsigned long cr0, cr2, cr3, cr4, cr8; ··· 33 29 unsigned long tr; 34 30 unsigned long safety; 35 31 unsigned long return_address; 36 - unsigned long eflags; 37 32 } __attribute__((packed)); 38 - 39 - /* We'll access these from assembly, so we'd better have them outside struct */ 40 - extern unsigned long saved_context_eax, saved_context_ebx, saved_context_ecx, saved_context_edx; 41 - extern unsigned long saved_context_esp, saved_context_ebp, saved_context_esi, saved_context_edi; 42 - extern unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_context_r11; 43 - extern unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15; 44 - extern unsigned long saved_context_eflags; 45 33 46 34 #define loaddebug(thread,register) \ 47 35 set_debugreg((thread)->debugreg##register, register) 48 36 49 37 extern void fix_processor_context(void); 50 38 51 - extern unsigned long saved_rip; 52 - extern unsigned long saved_rsp; 53 - extern unsigned long saved_rbp; 54 - extern unsigned long saved_rbx; 55 - extern unsigned long saved_rsi; 56 - extern unsigned long saved_rdi; 57 - 58 39 /* routines for saving/restoring kernel state */ 59 40 extern int acpi_save_state_mem(void); 60 41 extern char core_restore_code; 61 42 extern char restore_registers; 43 + 44 + #endif /* __ASM_X86_64_SUSPEND_H */