Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ia64/xen: Remove Xen support for ia64

ia64 has not been supported by Xen since 4.2 so it's time to drop
Xen/ia64 from Linux as well.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>

authored by

Boris Ostrovsky and committed by
Tony Luck
d52eefb4 374b1057

+2 -5118
-12
arch/ia64/Kconfig
··· 147 147 over full virtualization. However, when run without a hypervisor 148 148 the kernel is theoretically slower and slightly larger. 149 149 150 - 151 - source "arch/ia64/xen/Kconfig" 152 - 153 150 endif 154 151 155 152 choice ··· 172 175 SGI-SN2 For SGI Altix systems 173 176 SGI-UV For SGI UV systems 174 177 Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/> 175 - Xen-domU For xen domU system 176 178 177 179 If you don't know what to do, choose "generic". 178 180 ··· 226 230 config IA64_HP_SIM 227 231 bool "Ski-simulator" 228 232 select SWIOTLB 229 - 230 - config IA64_XEN_GUEST 231 - bool "Xen guest" 232 - select SWIOTLB 233 - depends on XEN 234 - help 235 - Build a kernel that runs on Xen guest domain. At this moment only 236 - 16KB page size in supported. 237 233 238 234 endchoice 239 235
-2
arch/ia64/Makefile
··· 51 51 core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ 52 52 core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ 53 53 core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ 54 - core-$(CONFIG_IA64_XEN_GUEST) += arch/ia64/dig/ 55 54 core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ 56 55 core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ 57 56 core-$(CONFIG_KVM) += arch/ia64/kvm/ 58 - core-$(CONFIG_XEN) += arch/ia64/xen/ 59 57 60 58 drivers-$(CONFIG_PCI) += arch/ia64/pci/ 61 59 drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
-199
arch/ia64/configs/xen_domu_defconfig
··· 1 - CONFIG_EXPERIMENTAL=y 2 - CONFIG_SYSVIPC=y 3 - CONFIG_POSIX_MQUEUE=y 4 - CONFIG_IKCONFIG=y 5 - CONFIG_IKCONFIG_PROC=y 6 - CONFIG_LOG_BUF_SHIFT=20 7 - CONFIG_SYSFS_DEPRECATED_V2=y 8 - CONFIG_BLK_DEV_INITRD=y 9 - CONFIG_KALLSYMS_ALL=y 10 - CONFIG_MODULES=y 11 - CONFIG_MODULE_UNLOAD=y 12 - CONFIG_MODVERSIONS=y 13 - CONFIG_MODULE_SRCVERSION_ALL=y 14 - # CONFIG_BLK_DEV_BSG is not set 15 - CONFIG_PARAVIRT_GUEST=y 16 - CONFIG_IA64_XEN_GUEST=y 17 - CONFIG_MCKINLEY=y 18 - CONFIG_IA64_CYCLONE=y 19 - CONFIG_SMP=y 20 - CONFIG_NR_CPUS=16 21 - CONFIG_HOTPLUG_CPU=y 22 - CONFIG_PERMIT_BSP_REMOVE=y 23 - CONFIG_FORCE_CPEI_RETARGET=y 24 - CONFIG_IA64_MCA_RECOVERY=y 25 - CONFIG_PERFMON=y 26 - CONFIG_IA64_PALINFO=y 27 - CONFIG_KEXEC=y 28 - CONFIG_EFI_VARS=y 29 - CONFIG_BINFMT_MISC=m 30 - CONFIG_ACPI_PROCFS=y 31 - CONFIG_ACPI_BUTTON=m 32 - CONFIG_ACPI_FAN=m 33 - CONFIG_ACPI_PROCESSOR=m 34 - CONFIG_ACPI_CONTAINER=m 35 - CONFIG_HOTPLUG_PCI=y 36 - CONFIG_HOTPLUG_PCI_ACPI=m 37 - CONFIG_PACKET=y 38 - CONFIG_UNIX=y 39 - CONFIG_INET=y 40 - CONFIG_IP_MULTICAST=y 41 - CONFIG_ARPD=y 42 - CONFIG_SYN_COOKIES=y 43 - # CONFIG_INET_LRO is not set 44 - # CONFIG_IPV6 is not set 45 - CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 46 - CONFIG_BLK_DEV_LOOP=m 47 - CONFIG_BLK_DEV_CRYPTOLOOP=m 48 - CONFIG_BLK_DEV_NBD=m 49 - CONFIG_BLK_DEV_RAM=y 50 - CONFIG_IDE=y 51 - CONFIG_BLK_DEV_IDECD=y 52 - CONFIG_BLK_DEV_GENERIC=y 53 - CONFIG_BLK_DEV_CMD64X=y 54 - CONFIG_BLK_DEV_PIIX=y 55 - CONFIG_SCSI=y 56 - CONFIG_BLK_DEV_SD=y 57 - CONFIG_CHR_DEV_ST=m 58 - CONFIG_BLK_DEV_SR=m 59 - CONFIG_CHR_DEV_SG=m 60 - CONFIG_SCSI_SYM53C8XX_2=y 61 - CONFIG_SCSI_QLOGIC_1280=y 62 - CONFIG_MD=y 63 - CONFIG_BLK_DEV_MD=m 64 - CONFIG_MD_LINEAR=m 65 - CONFIG_MD_RAID0=m 66 - CONFIG_MD_RAID1=m 67 - CONFIG_MD_MULTIPATH=m 68 - CONFIG_BLK_DEV_DM=m 69 - CONFIG_DM_CRYPT=m 70 - CONFIG_DM_SNAPSHOT=m 71 - CONFIG_DM_MIRROR=m 72 - CONFIG_DM_ZERO=m 73 - CONFIG_FUSION=y 74 - CONFIG_FUSION_SPI=y 75 - CONFIG_FUSION_FC=y 76 - CONFIG_FUSION_CTL=y 77 - CONFIG_NETDEVICES=y 78 - CONFIG_DUMMY=m 79 - CONFIG_NET_ETHERNET=y 80 - CONFIG_NET_TULIP=y 81 - CONFIG_TULIP=m 82 - CONFIG_NET_PCI=y 83 - CONFIG_NET_VENDOR_INTEL=y 84 - CONFIG_E100=m 85 - CONFIG_E1000=y 86 - CONFIG_TIGON3=y 87 - CONFIG_NETCONSOLE=y 88 - # CONFIG_SERIO_SERPORT is not set 89 - CONFIG_GAMEPORT=m 90 - CONFIG_SERIAL_NONSTANDARD=y 91 - CONFIG_SERIAL_8250=y 92 - CONFIG_SERIAL_8250_CONSOLE=y 93 - CONFIG_SERIAL_8250_NR_UARTS=6 94 - CONFIG_SERIAL_8250_EXTENDED=y 95 - CONFIG_SERIAL_8250_SHARE_IRQ=y 96 - # CONFIG_HW_RANDOM is not set 97 - CONFIG_EFI_RTC=y 98 - CONFIG_RAW_DRIVER=m 99 - CONFIG_HPET=y 100 - CONFIG_AGP=m 101 - CONFIG_DRM=m 102 - CONFIG_DRM_TDFX=m 103 - CONFIG_DRM_R128=m 104 - CONFIG_DRM_RADEON=m 105 - CONFIG_DRM_MGA=m 106 - CONFIG_DRM_SIS=m 107 - CONFIG_HID_GYRATION=y 108 - CONFIG_HID_NTRIG=y 109 - CONFIG_HID_PANTHERLORD=y 110 - CONFIG_HID_PETALYNX=y 111 - CONFIG_HID_SAMSUNG=y 112 - CONFIG_HID_SONY=y 113 - CONFIG_HID_SUNPLUS=y 114 - CONFIG_HID_TOPSEED=y 115 - CONFIG_USB=y 116 - CONFIG_USB_DEVICEFS=y 117 - CONFIG_USB_EHCI_HCD=m 118 - CONFIG_USB_OHCI_HCD=m 119 - CONFIG_USB_UHCI_HCD=y 120 - CONFIG_USB_STORAGE=m 121 - CONFIG_EXT2_FS=y 122 - CONFIG_EXT2_FS_XATTR=y 123 - CONFIG_EXT2_FS_POSIX_ACL=y 124 - CONFIG_EXT2_FS_SECURITY=y 125 - CONFIG_EXT3_FS=y 126 - CONFIG_EXT3_FS_POSIX_ACL=y 127 - CONFIG_EXT3_FS_SECURITY=y 128 - CONFIG_REISERFS_FS=y 129 - CONFIG_REISERFS_FS_XATTR=y 130 - CONFIG_REISERFS_FS_POSIX_ACL=y 131 - CONFIG_REISERFS_FS_SECURITY=y 132 - CONFIG_XFS_FS=y 133 - CONFIG_AUTOFS_FS=y 134 - CONFIG_AUTOFS4_FS=y 135 - CONFIG_ISO9660_FS=m 136 - CONFIG_JOLIET=y 137 - CONFIG_UDF_FS=m 138 - CONFIG_VFAT_FS=y 139 - CONFIG_NTFS_FS=m 140 - CONFIG_PROC_KCORE=y 141 - CONFIG_TMPFS=y 142 - CONFIG_HUGETLBFS=y 143 - CONFIG_NFS_FS=m 144 - CONFIG_NFS_V3=y 145 - CONFIG_NFS_V4=y 146 - CONFIG_NFSD=m 147 - CONFIG_NFSD_V4=y 148 - CONFIG_SMB_FS=m 149 - CONFIG_SMB_NLS_DEFAULT=y 150 - CONFIG_CIFS=m 151 - CONFIG_PARTITION_ADVANCED=y 152 - CONFIG_SGI_PARTITION=y 153 - CONFIG_EFI_PARTITION=y 154 - CONFIG_NLS_CODEPAGE_437=y 155 - CONFIG_NLS_CODEPAGE_737=m 156 - CONFIG_NLS_CODEPAGE_775=m 157 - CONFIG_NLS_CODEPAGE_850=m 158 - CONFIG_NLS_CODEPAGE_852=m 159 - CONFIG_NLS_CODEPAGE_855=m 160 - CONFIG_NLS_CODEPAGE_857=m 161 - CONFIG_NLS_CODEPAGE_860=m 162 - CONFIG_NLS_CODEPAGE_861=m 163 - CONFIG_NLS_CODEPAGE_862=m 164 - CONFIG_NLS_CODEPAGE_863=m 165 - CONFIG_NLS_CODEPAGE_864=m 166 - CONFIG_NLS_CODEPAGE_865=m 167 - CONFIG_NLS_CODEPAGE_866=m 168 - CONFIG_NLS_CODEPAGE_869=m 169 - CONFIG_NLS_CODEPAGE_936=m 170 - CONFIG_NLS_CODEPAGE_950=m 171 - CONFIG_NLS_CODEPAGE_932=m 172 - CONFIG_NLS_CODEPAGE_949=m 173 - CONFIG_NLS_CODEPAGE_874=m 174 - CONFIG_NLS_ISO8859_8=m 175 - CONFIG_NLS_CODEPAGE_1250=m 176 - CONFIG_NLS_CODEPAGE_1251=m 177 - CONFIG_NLS_ISO8859_1=y 178 - CONFIG_NLS_ISO8859_2=m 179 - CONFIG_NLS_ISO8859_3=m 180 - CONFIG_NLS_ISO8859_4=m 181 - CONFIG_NLS_ISO8859_5=m 182 - CONFIG_NLS_ISO8859_6=m 183 - CONFIG_NLS_ISO8859_7=m 184 - CONFIG_NLS_ISO8859_9=m 185 - CONFIG_NLS_ISO8859_13=m 186 - CONFIG_NLS_ISO8859_14=m 187 - CONFIG_NLS_ISO8859_15=m 188 - CONFIG_NLS_KOI8_R=m 189 - CONFIG_NLS_KOI8_U=m 190 - CONFIG_NLS_UTF8=m 191 - CONFIG_MAGIC_SYSRQ=y 192 - CONFIG_DEBUG_KERNEL=y 193 - CONFIG_DEBUG_MUTEXES=y 194 - # CONFIG_RCU_CPU_STALL_DETECTOR is not set 195 - CONFIG_IA64_GRANULE_16MB=y 196 - CONFIG_CRYPTO_ECB=m 197 - CONFIG_CRYPTO_PCBC=m 198 - CONFIG_CRYPTO_MD5=y 199 - # CONFIG_CRYPTO_ANSI_CPRNG is not set
-2
arch/ia64/include/asm/acpi.h
··· 111 111 return "uv"; 112 112 # elif defined (CONFIG_IA64_DIG) 113 113 return "dig"; 114 - # elif defined (CONFIG_IA64_XEN_GUEST) 115 - return "xen"; 116 114 # elif defined(CONFIG_IA64_DIG_VTD) 117 115 return "dig_vtd"; 118 116 # else
-2
arch/ia64/include/asm/machvec.h
··· 113 113 # include <asm/machvec_sn2.h> 114 114 # elif defined (CONFIG_IA64_SGI_UV) 115 115 # include <asm/machvec_uv.h> 116 - # elif defined (CONFIG_IA64_XEN_GUEST) 117 - # include <asm/machvec_xen.h> 118 116 # elif defined (CONFIG_IA64_GENERIC) 119 117 120 118 # ifdef MACHVEC_PLATFORM_HEADER
-22
arch/ia64/include/asm/machvec_xen.h
··· 1 - #ifndef _ASM_IA64_MACHVEC_XEN_h 2 - #define _ASM_IA64_MACHVEC_XEN_h 3 - 4 - extern ia64_mv_setup_t dig_setup; 5 - extern ia64_mv_cpu_init_t xen_cpu_init; 6 - extern ia64_mv_irq_init_t xen_irq_init; 7 - extern ia64_mv_send_ipi_t xen_platform_send_ipi; 8 - 9 - /* 10 - * This stuff has dual use! 11 - * 12 - * For a generic kernel, the macros are used to initialize the 13 - * platform's machvec structure. When compiling a non-generic kernel, 14 - * the macros are used directly. 15 - */ 16 - #define ia64_platform_name "xen" 17 - #define platform_setup dig_setup 18 - #define platform_cpu_init xen_cpu_init 19 - #define platform_irq_init xen_irq_init 20 - #define platform_send_ipi xen_platform_send_ipi 21 - 22 - #endif /* _ASM_IA64_MACHVEC_XEN_h */
-1
arch/ia64/include/asm/meminit.h
··· 18 18 * - crash dumping code reserved region 19 19 * - Kernel memory map built from EFI memory map 20 20 * - ELF core header 21 - * - xen start info if CONFIG_XEN 22 21 * 23 22 * More could be added if necessary 24 23 */
-1
arch/ia64/include/asm/paravirt.h
··· 75 75 #ifdef CONFIG_PARAVIRT_GUEST 76 76 77 77 #define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 78 - #define PARAVIRT_HYPERVISOR_TYPE_XEN 1 79 78 80 79 #ifndef __ASSEMBLY__ 81 80
+1 -1
arch/ia64/include/asm/pvclock-abi.h
··· 11 11 /* 12 12 * These structs MUST NOT be changed. 13 13 * They are the ABI between hypervisor and guest OS. 14 - * Both Xen and KVM are using this. 14 + * KVM is using this. 15 15 * 16 16 * pvclock_vcpu_time_info holds the system time and the tsc timestamp 17 17 * of the last update. So the guest can use the tsc delta to get a
-51
arch/ia64/include/asm/sync_bitops.h
··· 1 - #ifndef _ASM_IA64_SYNC_BITOPS_H 2 - #define _ASM_IA64_SYNC_BITOPS_H 3 - 4 - /* 5 - * Copyright (C) 2008 Isaku Yamahata <yamahata at valinux co jp> 6 - * 7 - * Based on synch_bitops.h which Dan Magenhaimer wrote. 8 - * 9 - * bit operations which provide guaranteed strong synchronisation 10 - * when communicating with Xen or other guest OSes running on other CPUs. 11 - */ 12 - 13 - static inline void sync_set_bit(int nr, volatile void *addr) 14 - { 15 - set_bit(nr, addr); 16 - } 17 - 18 - static inline void sync_clear_bit(int nr, volatile void *addr) 19 - { 20 - clear_bit(nr, addr); 21 - } 22 - 23 - static inline void sync_change_bit(int nr, volatile void *addr) 24 - { 25 - change_bit(nr, addr); 26 - } 27 - 28 - static inline int sync_test_and_set_bit(int nr, volatile void *addr) 29 - { 30 - return test_and_set_bit(nr, addr); 31 - } 32 - 33 - static inline int sync_test_and_clear_bit(int nr, volatile void *addr) 34 - { 35 - return test_and_clear_bit(nr, addr); 36 - } 37 - 38 - static inline int sync_test_and_change_bit(int nr, volatile void *addr) 39 - { 40 - return test_and_change_bit(nr, addr); 41 - } 42 - 43 - static inline int sync_test_bit(int nr, const volatile void *addr) 44 - { 45 - return test_bit(nr, addr); 46 - } 47 - 48 - #define sync_cmpxchg(ptr, old, new) \ 49 - ((__typeof__(*(ptr)))cmpxchg_acq((ptr), (old), (new))) 50 - 51 - #endif /* _ASM_IA64_SYNC_BITOPS_H */
-41
arch/ia64/include/asm/xen/events.h
··· 1 - /****************************************************************************** 2 - * arch/ia64/include/asm/xen/events.h 3 - * 4 - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 5 - * VA Linux Systems Japan K.K. 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 - * 21 - */ 22 - #ifndef _ASM_IA64_XEN_EVENTS_H 23 - #define _ASM_IA64_XEN_EVENTS_H 24 - 25 - enum ipi_vector { 26 - XEN_RESCHEDULE_VECTOR, 27 - XEN_IPI_VECTOR, 28 - XEN_CMCP_VECTOR, 29 - XEN_CPEP_VECTOR, 30 - 31 - XEN_NR_IPIS, 32 - }; 33 - 34 - static inline int xen_irqs_disabled(struct pt_regs *regs) 35 - { 36 - return !(ia64_psr(regs)->i); 37 - } 38 - 39 - #define irq_ctx_init(cpu) do { } while (0) 40 - 41 - #endif /* _ASM_IA64_XEN_EVENTS_H */
-265
arch/ia64/include/asm/xen/hypercall.h
··· 1 - /****************************************************************************** 2 - * hypercall.h 3 - * 4 - * Linux-specific hypervisor handling. 5 - * 6 - * Copyright (c) 2002-2004, K A Fraser 7 - * 8 - * This program is free software; you can redistribute it and/or 9 - * modify it under the terms of the GNU General Public License version 2 10 - * as published by the Free Software Foundation; or, when distributed 11 - * separately from the Linux kernel or incorporated into other 12 - * software packages, subject to the following license: 13 - * 14 - * Permission is hereby granted, free of charge, to any person obtaining a copy 15 - * of this source file (the "Software"), to deal in the Software without 16 - * restriction, including without limitation the rights to use, copy, modify, 17 - * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 - * and to permit persons to whom the Software is furnished to do so, subject to 19 - * the following conditions: 20 - * 21 - * The above copyright notice and this permission notice shall be included in 22 - * all copies or substantial portions of the Software. 23 - * 24 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 - * IN THE SOFTWARE. 31 - */ 32 - 33 - #ifndef _ASM_IA64_XEN_HYPERCALL_H 34 - #define _ASM_IA64_XEN_HYPERCALL_H 35 - 36 - #include <xen/interface/xen.h> 37 - #include <xen/interface/physdev.h> 38 - #include <xen/interface/sched.h> 39 - #include <asm/xen/xcom_hcall.h> 40 - struct xencomm_handle; 41 - extern unsigned long __hypercall(unsigned long a1, unsigned long a2, 42 - unsigned long a3, unsigned long a4, 43 - unsigned long a5, unsigned long cmd); 44 - 45 - /* 46 - * Assembler stubs for hyper-calls. 47 - */ 48 - 49 - #define _hypercall0(type, name) \ 50 - ({ \ 51 - long __res; \ 52 - __res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\ 53 - (type)__res; \ 54 - }) 55 - 56 - #define _hypercall1(type, name, a1) \ 57 - ({ \ 58 - long __res; \ 59 - __res = __hypercall((unsigned long)a1, \ 60 - 0, 0, 0, 0, __HYPERVISOR_##name); \ 61 - (type)__res; \ 62 - }) 63 - 64 - #define _hypercall2(type, name, a1, a2) \ 65 - ({ \ 66 - long __res; \ 67 - __res = __hypercall((unsigned long)a1, \ 68 - (unsigned long)a2, \ 69 - 0, 0, 0, __HYPERVISOR_##name); \ 70 - (type)__res; \ 71 - }) 72 - 73 - #define _hypercall3(type, name, a1, a2, a3) \ 74 - ({ \ 75 - long __res; \ 76 - __res = __hypercall((unsigned long)a1, \ 77 - (unsigned long)a2, \ 78 - (unsigned long)a3, \ 79 - 0, 0, __HYPERVISOR_##name); \ 80 - (type)__res; \ 81 - }) 82 - 83 - #define _hypercall4(type, name, a1, a2, a3, a4) \ 84 - ({ \ 85 - long __res; \ 86 - __res = __hypercall((unsigned long)a1, \ 87 - (unsigned long)a2, \ 88 - (unsigned long)a3, \ 89 - (unsigned long)a4, \ 90 - 0, __HYPERVISOR_##name); \ 91 - (type)__res; \ 92 - }) 93 - 94 - #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ 95 - ({ \ 96 - long __res; \ 97 - __res = __hypercall((unsigned long)a1, \ 98 - (unsigned long)a2, \ 99 - (unsigned long)a3, \ 100 - (unsigned long)a4, \ 101 - (unsigned long)a5, \ 102 - __HYPERVISOR_##name); \ 103 - (type)__res; \ 104 - }) 105 - 106 - 107 - static inline int 108 - xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg) 109 - { 110 - return _hypercall2(int, sched_op, cmd, arg); 111 - } 112 - 113 - static inline long 114 - HYPERVISOR_set_timer_op(u64 timeout) 115 - { 116 - unsigned long timeout_hi = (unsigned long)(timeout >> 32); 117 - unsigned long timeout_lo = (unsigned long)timeout; 118 - return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); 119 - } 120 - 121 - static inline int 122 - xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list, 123 - int nr_calls) 124 - { 125 - return _hypercall2(int, multicall, call_list, nr_calls); 126 - } 127 - 128 - static inline int 129 - xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg) 130 - { 131 - return _hypercall2(int, memory_op, cmd, arg); 132 - } 133 - 134 - static inline int 135 - xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg) 136 - { 137 - return _hypercall2(int, event_channel_op, cmd, arg); 138 - } 139 - 140 - static inline int 141 - xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg) 142 - { 143 - return _hypercall2(int, xen_version, cmd, arg); 144 - } 145 - 146 - static inline int 147 - xencomm_arch_hypercall_console_io(int cmd, int count, 148 - struct xencomm_handle *str) 149 - { 150 - return _hypercall3(int, console_io, cmd, count, str); 151 - } 152 - 153 - static inline int 154 - xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg) 155 - { 156 - return _hypercall2(int, physdev_op, cmd, arg); 157 - } 158 - 159 - static inline int 160 - xencomm_arch_hypercall_grant_table_op(unsigned int cmd, 161 - struct xencomm_handle *uop, 162 - unsigned int count) 163 - { 164 - return _hypercall3(int, grant_table_op, cmd, uop, count); 165 - } 166 - 167 - int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); 168 - 169 - extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg); 170 - 171 - static inline int 172 - xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg) 173 - { 174 - return _hypercall2(int, callback_op, cmd, arg); 175 - } 176 - 177 - static inline long 178 - xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg) 179 - { 180 - return _hypercall3(long, vcpu_op, cmd, cpu, arg); 181 - } 182 - 183 - static inline int 184 - HYPERVISOR_physdev_op(int cmd, void *arg) 185 - { 186 - switch (cmd) { 187 - case PHYSDEVOP_eoi: 188 - return _hypercall1(int, ia64_fast_eoi, 189 - ((struct physdev_eoi *)arg)->irq); 190 - default: 191 - return xencomm_hypercall_physdev_op(cmd, arg); 192 - } 193 - } 194 - 195 - static inline long 196 - xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg) 197 - { 198 - return _hypercall1(long, opt_feature, arg); 199 - } 200 - 201 - /* for balloon driver */ 202 - #define HYPERVISOR_update_va_mapping(va, new_val, flags) (0) 203 - 204 - /* Use xencomm to do hypercalls. */ 205 - #define HYPERVISOR_sched_op xencomm_hypercall_sched_op 206 - #define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op 207 - #define HYPERVISOR_callback_op xencomm_hypercall_callback_op 208 - #define HYPERVISOR_multicall xencomm_hypercall_multicall 209 - #define HYPERVISOR_xen_version xencomm_hypercall_xen_version 210 - #define HYPERVISOR_console_io xencomm_hypercall_console_io 211 - #define HYPERVISOR_memory_op xencomm_hypercall_memory_op 212 - #define HYPERVISOR_suspend xencomm_hypercall_suspend 213 - #define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op 214 - #define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature 215 - 216 - /* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */ 217 - #define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; }) 218 - 219 - static inline int 220 - HYPERVISOR_shutdown( 221 - unsigned int reason) 222 - { 223 - struct sched_shutdown sched_shutdown = { 224 - .reason = reason 225 - }; 226 - 227 - int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); 228 - 229 - return rc; 230 - } 231 - 232 - /* for netfront.c, netback.c */ 233 - #define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */ 234 - 235 - static inline void 236 - MULTI_update_va_mapping( 237 - struct multicall_entry *mcl, unsigned long va, 238 - pte_t new_val, unsigned long flags) 239 - { 240 - mcl->op = __HYPERVISOR_update_va_mapping; 241 - mcl->result = 0; 242 - } 243 - 244 - static inline void 245 - MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd, 246 - void *uop, unsigned int count) 247 - { 248 - mcl->op = __HYPERVISOR_grant_table_op; 249 - mcl->args[0] = cmd; 250 - mcl->args[1] = (unsigned long)uop; 251 - mcl->args[2] = count; 252 - } 253 - 254 - static inline void 255 - MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, 256 - int count, int *success_count, domid_t domid) 257 - { 258 - mcl->op = __HYPERVISOR_mmu_update; 259 - mcl->args[0] = (unsigned long)req; 260 - mcl->args[1] = count; 261 - mcl->args[2] = (unsigned long)success_count; 262 - mcl->args[3] = domid; 263 - } 264 - 265 - #endif /* _ASM_IA64_XEN_HYPERCALL_H */
-61
arch/ia64/include/asm/xen/hypervisor.h
··· 1 - /****************************************************************************** 2 - * hypervisor.h 3 - * 4 - * Linux-specific hypervisor handling. 5 - * 6 - * Copyright (c) 2002-2004, K A Fraser 7 - * 8 - * This program is free software; you can redistribute it and/or 9 - * modify it under the terms of the GNU General Public License version 2 10 - * as published by the Free Software Foundation; or, when distributed 11 - * separately from the Linux kernel or incorporated into other 12 - * software packages, subject to the following license: 13 - * 14 - * Permission is hereby granted, free of charge, to any person obtaining a copy 15 - * of this source file (the "Software"), to deal in the Software without 16 - * restriction, including without limitation the rights to use, copy, modify, 17 - * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 - * and to permit persons to whom the Software is furnished to do so, subject to 19 - * the following conditions: 20 - * 21 - * The above copyright notice and this permission notice shall be included in 22 - * all copies or substantial portions of the Software. 23 - * 24 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 - * IN THE SOFTWARE. 31 - */ 32 - 33 - #ifndef _ASM_IA64_XEN_HYPERVISOR_H 34 - #define _ASM_IA64_XEN_HYPERVISOR_H 35 - 36 - #include <linux/err.h> 37 - #include <xen/interface/xen.h> 38 - #include <xen/interface/version.h> /* to compile feature.c */ 39 - #include <xen/features.h> /* to comiple xen-netfront.c */ 40 - #include <xen/xen.h> 41 - #include <asm/xen/hypercall.h> 42 - 43 - #ifdef CONFIG_XEN 44 - extern struct shared_info *HYPERVISOR_shared_info; 45 - extern struct start_info *xen_start_info; 46 - 47 - void __init xen_setup_vcpu_info_placement(void); 48 - void force_evtchn_callback(void); 49 - 50 - /* for drivers/xen/balloon/balloon.c */ 51 - #ifdef CONFIG_XEN_SCRUB_PAGES 52 - #define scrub_pages(_p, _n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT) 53 - #else 54 - #define scrub_pages(_p, _n) ((void)0) 55 - #endif 56 - 57 - /* For setup_arch() in arch/ia64/kernel/setup.c */ 58 - void xen_ia64_enable_opt_feature(void); 59 - #endif 60 - 61 - #endif /* _ASM_IA64_XEN_HYPERVISOR_H */
-486
arch/ia64/include/asm/xen/inst.h
··· 1 - /****************************************************************************** 2 - * arch/ia64/include/asm/xen/inst.h 3 - * 4 - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 5 - * VA Linux Systems Japan K.K. 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 - * 21 - */ 22 - 23 - #include <asm/xen/privop.h> 24 - 25 - #define ia64_ivt xen_ivt 26 - #define DO_SAVE_MIN XEN_DO_SAVE_MIN 27 - 28 - #define __paravirt_switch_to xen_switch_to 29 - #define __paravirt_leave_syscall xen_leave_syscall 30 - #define __paravirt_work_processed_syscall xen_work_processed_syscall 31 - #define __paravirt_leave_kernel xen_leave_kernel 32 - #define __paravirt_pending_syscall_end xen_work_pending_syscall_end 33 - #define __paravirt_work_processed_syscall_target \ 34 - xen_work_processed_syscall 35 - 36 - #define paravirt_fsyscall_table xen_fsyscall_table 37 - #define paravirt_fsys_bubble_down xen_fsys_bubble_down 38 - 39 - #define MOV_FROM_IFA(reg) \ 40 - movl reg = XSI_IFA; \ 41 - ;; \ 42 - ld8 reg = [reg] 43 - 44 - #define MOV_FROM_ITIR(reg) \ 45 - movl reg = XSI_ITIR; \ 46 - ;; \ 47 - ld8 reg = [reg] 48 - 49 - #define MOV_FROM_ISR(reg) \ 50 - movl reg = XSI_ISR; \ 51 - ;; \ 52 - ld8 reg = [reg] 53 - 54 - #define MOV_FROM_IHA(reg) \ 55 - movl reg = XSI_IHA; \ 56 - ;; \ 57 - ld8 reg = [reg] 58 - 59 - #define MOV_FROM_IPSR(pred, reg) \ 60 - (pred) movl reg = XSI_IPSR; \ 61 - ;; \ 62 - (pred) ld8 reg = [reg] 63 - 64 - #define MOV_FROM_IIM(reg) \ 65 - movl reg = XSI_IIM; \ 66 - ;; \ 67 - ld8 reg = [reg] 68 - 69 - #define MOV_FROM_IIP(reg) \ 70 - movl reg = XSI_IIP; \ 71 - ;; \ 72 - ld8 reg = [reg] 73 - 74 - .macro __MOV_FROM_IVR reg, clob 75 - .ifc "\reg", "r8" 76 - XEN_HYPER_GET_IVR 77 - .exitm 78 - .endif 79 - .ifc "\clob", "r8" 80 - XEN_HYPER_GET_IVR 81 - ;; 82 - mov \reg = r8 83 - .exitm 84 - .endif 85 - 86 - mov \clob = r8 87 - ;; 88 - XEN_HYPER_GET_IVR 89 - ;; 90 - mov \reg = r8 91 - ;; 92 - mov r8 = \clob 93 - .endm 94 - #define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob 95 - 96 - .macro __MOV_FROM_PSR pred, reg, clob 97 - .ifc "\reg", "r8" 98 - (\pred) XEN_HYPER_GET_PSR; 99 - .exitm 100 - .endif 101 - .ifc "\clob", "r8" 102 - (\pred) XEN_HYPER_GET_PSR 103 - ;; 104 - (\pred) mov \reg = r8 105 - .exitm 106 - .endif 107 - 108 - (\pred) mov \clob = r8 109 - (\pred) XEN_HYPER_GET_PSR 110 - ;; 111 - (\pred) mov \reg = r8 112 - (\pred) mov r8 = \clob 113 - .endm 114 - #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob 115 - 116 - /* assuming ar.itc is read with interrupt disabled. */ 117 - #define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ 118 - (pred) movl clob = XSI_ITC_OFFSET; \ 119 - ;; \ 120 - (pred) ld8 clob = [clob]; \ 121 - (pred) mov reg = ar.itc; \ 122 - ;; \ 123 - (pred) add reg = reg, clob; \ 124 - ;; \ 125 - (pred) movl clob = XSI_ITC_LAST; \ 126 - ;; \ 127 - (pred) ld8 clob = [clob]; \ 128 - ;; \ 129 - (pred) cmp.geu.unc pred_clob, p0 = clob, reg; \ 130 - ;; \ 131 - (pred_clob) add reg = 1, clob; \ 132 - ;; \ 133 - (pred) movl clob = XSI_ITC_LAST; \ 134 - ;; \ 135 - (pred) st8 [clob] = reg 136 - 137 - 138 - #define MOV_TO_IFA(reg, clob) \ 139 - movl clob = XSI_IFA; \ 140 - ;; \ 141 - st8 [clob] = reg \ 142 - 143 - #define MOV_TO_ITIR(pred, reg, clob) \ 144 - (pred) movl clob = XSI_ITIR; \ 145 - ;; \ 146 - (pred) st8 [clob] = reg 147 - 148 - #define MOV_TO_IHA(pred, reg, clob) \ 149 - (pred) movl clob = XSI_IHA; \ 150 - ;; \ 151 - (pred) st8 [clob] = reg 152 - 153 - #define MOV_TO_IPSR(pred, reg, clob) \ 154 - (pred) movl clob = XSI_IPSR; \ 155 - ;; \ 156 - (pred) st8 [clob] = reg; \ 157 - ;; 158 - 159 - #define MOV_TO_IFS(pred, reg, clob) \ 160 - (pred) movl clob = XSI_IFS; \ 161 - ;; \ 162 - (pred) st8 [clob] = reg; \ 163 - ;; 164 - 165 - #define MOV_TO_IIP(reg, clob) \ 166 - movl clob = XSI_IIP; \ 167 - ;; \ 168 - st8 [clob] = reg 169 - 170 - .macro ____MOV_TO_KR kr, reg, clob0, clob1 171 - .ifc "\clob0", "r9" 172 - .error "clob0 \clob0 must not be r9" 173 - .endif 174 - .ifc "\clob1", "r8" 175 - .error "clob1 \clob1 must not be r8" 176 - .endif 177 - 178 - .ifnc "\reg", "r9" 179 - .ifnc "\clob1", "r9" 180 - mov \clob1 = r9 181 - .endif 182 - mov r9 = \reg 183 - .endif 184 - .ifnc "\clob0", "r8" 185 - mov \clob0 = r8 186 - .endif 187 - mov r8 = \kr 188 - ;; 189 - XEN_HYPER_SET_KR 190 - 191 - .ifnc "\reg", "r9" 192 - .ifnc "\clob1", "r9" 193 - mov r9 = \clob1 194 - .endif 195 - .endif 196 - .ifnc "\clob0", "r8" 197 - mov r8 = \clob0 198 - .endif 199 - .endm 200 - 201 - .macro __MOV_TO_KR kr, reg, clob0, clob1 202 - .ifc "\clob0", "r9" 203 - ____MOV_TO_KR \kr, \reg, \clob1, \clob0 204 - .exitm 205 - .endif 206 - .ifc "\clob1", "r8" 207 - ____MOV_TO_KR \kr, \reg, \clob1, \clob0 208 - .exitm 209 - .endif 210 - 211 - ____MOV_TO_KR \kr, \reg, \clob0, \clob1 212 - .endm 213 - 214 - #define MOV_TO_KR(kr, reg, clob0, clob1) \ 215 - __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1 216 - 217 - 218 - .macro __ITC_I pred, reg, clob 219 - .ifc "\reg", "r8" 220 - (\pred) XEN_HYPER_ITC_I 221 - .exitm 222 - .endif 223 - .ifc "\clob", "r8" 224 - (\pred) mov r8 = \reg 225 - ;; 226 - (\pred) XEN_HYPER_ITC_I 227 - .exitm 228 - .endif 229 - 230 - (\pred) mov \clob = r8 231 - (\pred) mov r8 = \reg 232 - ;; 233 - (\pred) XEN_HYPER_ITC_I 234 - ;; 235 - (\pred) mov r8 = \clob 236 - ;; 237 - .endm 238 - #define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob 239 - 240 - .macro __ITC_D pred, reg, clob 241 - .ifc "\reg", "r8" 242 - (\pred) XEN_HYPER_ITC_D 243 - ;; 244 - .exitm 245 - .endif 246 - .ifc "\clob", "r8" 247 - (\pred) mov r8 = \reg 248 - ;; 249 - (\pred) XEN_HYPER_ITC_D 250 - ;; 251 - .exitm 252 - .endif 253 - 254 - (\pred) mov \clob = r8 255 - (\pred) mov r8 = \reg 256 - ;; 257 - (\pred) XEN_HYPER_ITC_D 258 - ;; 259 - (\pred) mov r8 = \clob 260 - ;; 261 - .endm 262 - #define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob 263 - 264 - .macro __ITC_I_AND_D pred_i, pred_d, reg, clob 265 - .ifc "\reg", "r8" 266 - (\pred_i)XEN_HYPER_ITC_I 267 - ;; 268 - (\pred_d)XEN_HYPER_ITC_D 269 - ;; 270 - .exitm 271 - .endif 272 - .ifc "\clob", "r8" 273 - mov r8 = \reg 274 - ;; 275 - (\pred_i)XEN_HYPER_ITC_I 276 - ;; 277 - (\pred_d)XEN_HYPER_ITC_D 278 - ;; 279 - .exitm 280 - .endif 281 - 282 - mov \clob = r8 283 - mov r8 = \reg 284 - ;; 285 - (\pred_i)XEN_HYPER_ITC_I 286 - ;; 287 - (\pred_d)XEN_HYPER_ITC_D 288 - ;; 289 - mov r8 = \clob 290 - ;; 291 - .endm 292 - #define ITC_I_AND_D(pred_i, pred_d, reg, clob) \ 293 - __ITC_I_AND_D pred_i, pred_d, reg, clob 294 - 295 - .macro __THASH pred, reg0, reg1, clob 296 - .ifc "\reg0", "r8" 297 - (\pred) mov r8 = \reg1 298 - (\pred) XEN_HYPER_THASH 299 - .exitm 300 - .endc 301 - .ifc "\reg1", "r8" 302 - (\pred) XEN_HYPER_THASH 303 - ;; 304 - (\pred) mov \reg0 = r8 305 - ;; 306 - .exitm 307 - .endif 308 - .ifc "\clob", "r8" 309 - (\pred) mov r8 = \reg1 310 - (\pred) XEN_HYPER_THASH 311 - ;; 312 - (\pred) mov \reg0 = r8 313 - ;; 314 - .exitm 315 - .endif 316 - 317 - (\pred) mov \clob = r8 318 - (\pred) mov r8 = \reg1 319 - (\pred) XEN_HYPER_THASH 320 - ;; 321 - (\pred) mov \reg0 = r8 322 - (\pred) mov r8 = \clob 323 - ;; 324 - .endm 325 - #define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob 326 - 327 - #define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \ 328 - mov clob0 = 1; \ 329 - movl clob1 = XSI_PSR_IC; \ 330 - ;; \ 331 - st4 [clob1] = clob0 \ 332 - ;; 333 - 334 - #define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \ 335 - ;; \ 336 - srlz.d; \ 337 - mov clob1 = 1; \ 338 - movl clob0 = XSI_PSR_IC; \ 339 - ;; \ 340 - st4 [clob0] = clob1 341 - 342 - #define RSM_PSR_IC(clob) \ 343 - movl clob = XSI_PSR_IC; \ 344 - ;; \ 345 - st4 [clob] = r0; \ 346 - ;; 347 - 348 - /* pred will be clobbered */ 349 - #define MASK_TO_PEND_OFS (-1) 350 - #define SSM_PSR_I(pred, pred_clob, clob) \ 351 - (pred) movl clob = XSI_PSR_I_ADDR \ 352 - ;; \ 353 - (pred) ld8 clob = [clob] \ 354 - ;; \ 355 - /* if (pred) vpsr.i = 1 */ \ 356 - /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \ 357 - (pred) st1 [clob] = r0, MASK_TO_PEND_OFS \ 358 - ;; \ 359 - /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \ 360 - (pred) ld1 clob = [clob] \ 361 - ;; \ 362 - (pred) cmp.ne.unc pred_clob, p0 = clob, r0 \ 363 - ;; \ 364 - (pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */ 365 - 366 - #define RSM_PSR_I(pred, clob0, clob1) \ 367 - movl clob0 = XSI_PSR_I_ADDR; \ 368 - mov clob1 = 1; \ 369 - ;; \ 370 - ld8 clob0 = [clob0]; \ 371 - ;; \ 372 - (pred) st1 [clob0] = clob1 373 - 374 - #define RSM_PSR_I_IC(clob0, clob1, clob2) \ 375 - movl clob0 = XSI_PSR_I_ADDR; \ 376 - movl clob1 = XSI_PSR_IC; \ 377 - ;; \ 378 - ld8 clob0 = [clob0]; \ 379 - mov clob2 = 1; \ 380 - ;; \ 381 - /* note: clears both vpsr.i and vpsr.ic! */ \ 382 - st1 [clob0] = clob2; \ 383 - st4 [clob1] = r0; \ 384 - ;; 385 - 386 - #define RSM_PSR_DT \ 387 - XEN_HYPER_RSM_PSR_DT 388 - 389 - #define RSM_PSR_BE_I(clob0, clob1) \ 390 - RSM_PSR_I(p0, clob0, clob1); \ 391 - rum psr.be 392 - 393 - #define SSM_PSR_DT_AND_SRLZ_I \ 394 - XEN_HYPER_SSM_PSR_DT 395 - 396 - #define BSW_0(clob0, clob1, clob2) \ 397 - ;; \ 398 - /* r16-r31 all now hold bank1 values */ \ 399 - mov clob2 = ar.unat; \ 400 - movl clob0 = XSI_BANK1_R16; \ 401 - movl clob1 = XSI_BANK1_R16 + 8; \ 402 - ;; \ 403 - .mem.offset 0, 0; st8.spill [clob0] = r16, 16; \ 404 - .mem.offset 8, 0; st8.spill [clob1] = r17, 16; \ 405 - ;; \ 406 - .mem.offset 0, 0; st8.spill [clob0] = r18, 16; \ 407 - .mem.offset 8, 0; st8.spill [clob1] = r19, 16; \ 408 - ;; \ 409 - .mem.offset 0, 0; st8.spill [clob0] = r20, 16; \ 410 - .mem.offset 8, 0; st8.spill [clob1] = r21, 16; \ 411 - ;; \ 412 - .mem.offset 0, 0; st8.spill [clob0] = r22, 16; \ 413 - .mem.offset 8, 0; st8.spill [clob1] = r23, 16; \ 414 - ;; \ 415 - .mem.offset 0, 0; st8.spill [clob0] = r24, 16; \ 416 - .mem.offset 8, 0; st8.spill [clob1] = r25, 16; \ 417 - ;; \ 418 - .mem.offset 0, 0; st8.spill [clob0] = r26, 16; \ 419 - .mem.offset 8, 0; st8.spill [clob1] = r27, 16; \ 420 - ;; \ 421 - .mem.offset 0, 0; st8.spill [clob0] = r28, 16; \ 422 - .mem.offset 8, 0; st8.spill [clob1] = r29, 16; \ 423 - ;; \ 424 - .mem.offset 0, 0; st8.spill [clob0] = r30, 16; \ 425 - .mem.offset 8, 0; st8.spill [clob1] = r31, 16; \ 426 - ;; \ 427 - mov clob1 = ar.unat; \ 428 - movl clob0 = XSI_B1NAT; \ 429 - ;; \ 430 - st8 [clob0] = clob1; \ 431 - mov ar.unat = clob2; \ 432 - movl clob0 = XSI_BANKNUM; \ 433 - ;; \ 434 - st4 [clob0] = r0 435 - 436 - 437 - /* FIXME: THIS CODE IS NOT NaT SAFE! */ 438 - #define XEN_BSW_1(clob) \ 439 - mov clob = ar.unat; \ 440 - movl r30 = XSI_B1NAT; \ 441 - ;; \ 442 - ld8 r30 = [r30]; \ 443 - mov r31 = 1; \ 444 - ;; \ 445 - mov ar.unat = r30; \ 446 - movl r30 = XSI_BANKNUM; \ 447 - ;; \ 448 - st4 [r30] = r31; \ 449 - movl r30 = XSI_BANK1_R16; \ 450 - movl r31 = XSI_BANK1_R16+8; \ 451 - ;; \ 452 - ld8.fill r16 = [r30], 16; \ 453 - ld8.fill r17 = [r31], 16; \ 454 - ;; \ 455 - ld8.fill r18 = [r30], 16; \ 456 - ld8.fill r19 = [r31], 16; \ 457 - ;; \ 458 - ld8.fill r20 = [r30], 16; \ 459 - ld8.fill r21 = [r31], 16; \ 460 - ;; \ 461 - ld8.fill r22 = [r30], 16; \ 462 - ld8.fill r23 = [r31], 16; \ 463 - ;; \ 464 - ld8.fill r24 = [r30], 16; \ 465 - ld8.fill r25 = [r31], 16; \ 466 - ;; \ 467 - ld8.fill r26 = [r30], 16; \ 468 - ld8.fill r27 = [r31], 16; \ 469 - ;; \ 470 - ld8.fill r28 = [r30], 16; \ 471 - ld8.fill r29 = [r31], 16; \ 472 - ;; \ 473 - ld8.fill r30 = [r30]; \ 474 - ld8.fill r31 = [r31]; \ 475 - ;; \ 476 - mov ar.unat = clob 477 - 478 - #define BSW_1(clob0, clob1) XEN_BSW_1(clob1) 479 - 480 - 481 - #define COVER \ 482 - XEN_HYPER_COVER 483 - 484 - #define RFI \ 485 - XEN_HYPER_RFI; \ 486 - dv_serialize_data
-363
arch/ia64/include/asm/xen/interface.h
··· 1 - /****************************************************************************** 2 - * arch-ia64/hypervisor-if.h 3 - * 4 - * Guest OS interface to IA64 Xen. 5 - * 6 - * Permission is hereby granted, free of charge, to any person obtaining a copy 7 - * of this software and associated documentation files (the "Software"), to 8 - * deal in the Software without restriction, including without limitation the 9 - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 - * sell copies of the Software, and to permit persons to whom the Software is 11 - * furnished to do so, subject to the following conditions: 12 - * 13 - * The above copyright notice and this permission notice shall be included in 14 - * all copies or substantial portions of the Software. 15 - * 16 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 - * DEALINGS IN THE SOFTWARE. 23 - * 24 - * Copyright by those who contributed. (in alphabetical order) 25 - * 26 - * Anthony Xu <anthony.xu@intel.com> 27 - * Eddie Dong <eddie.dong@intel.com> 28 - * Fred Yang <fred.yang@intel.com> 29 - * Kevin Tian <kevin.tian@intel.com> 30 - * Alex Williamson <alex.williamson@hp.com> 31 - * Chris Wright <chrisw@sous-sol.org> 32 - * Christian Limpach <Christian.Limpach@cl.cam.ac.uk> 33 - * Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com> 34 - * Hollis Blanchard <hollisb@us.ibm.com> 35 - * Isaku Yamahata <yamahata@valinux.co.jp> 36 - * Jan Beulich <jbeulich@novell.com> 37 - * John Levon <john.levon@sun.com> 38 - * Kazuhiro Suzuki <kaz@jp.fujitsu.com> 39 - * Keir Fraser <keir.fraser@citrix.com> 40 - * Kouya Shimura <kouya@jp.fujitsu.com> 41 - * Masaki Kanno <kanno.masaki@jp.fujitsu.com> 42 - * Matt Chapman <matthewc@hp.com> 43 - * Matthew Chapman <matthewc@hp.com> 44 - * Samuel Thibault <samuel.thibault@eu.citrix.com> 45 - * Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com> 46 - * Tristan Gingold <tgingold@free.fr> 47 - * Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com> 48 - * Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com> 49 - * Zhang Xin <xing.z.zhang@intel.com> 50 - * Zhang xiantao <xiantao.zhang@intel.com> 51 - * dan.magenheimer@hp.com 52 - * ian.pratt@cl.cam.ac.uk 53 - * michael.fetterman@cl.cam.ac.uk 54 - */ 55 - 56 - #ifndef _ASM_IA64_XEN_INTERFACE_H 57 - #define _ASM_IA64_XEN_INTERFACE_H 58 - 59 - #define __DEFINE_GUEST_HANDLE(name, type) \ 60 - typedef struct { type *p; } __guest_handle_ ## name 61 - 62 - #define DEFINE_GUEST_HANDLE_STRUCT(name) \ 63 - __DEFINE_GUEST_HANDLE(name, struct name) 64 - #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) 65 - #define GUEST_HANDLE(name) __guest_handle_ ## name 66 - #define GUEST_HANDLE_64(name) GUEST_HANDLE(name) 67 - #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) 68 - 69 - #ifndef __ASSEMBLY__ 70 - /* Explicitly size integers that represent pfns in the public interface 71 - * with Xen so that we could have one ABI that works for 32 and 64 bit 72 - * guests. */ 73 - typedef unsigned long xen_pfn_t; 74 - typedef unsigned long xen_ulong_t; 75 - /* Guest handles for primitive C types. */ 76 - __DEFINE_GUEST_HANDLE(uchar, unsigned char); 77 - __DEFINE_GUEST_HANDLE(uint, unsigned int); 78 - __DEFINE_GUEST_HANDLE(ulong, unsigned long); 79 - 80 - DEFINE_GUEST_HANDLE(char); 81 - DEFINE_GUEST_HANDLE(int); 82 - DEFINE_GUEST_HANDLE(long); 83 - DEFINE_GUEST_HANDLE(void); 84 - DEFINE_GUEST_HANDLE(uint64_t); 85 - DEFINE_GUEST_HANDLE(uint32_t); 86 - 87 - DEFINE_GUEST_HANDLE(xen_pfn_t); 88 - #define PRI_xen_pfn "lx" 89 - #endif 90 - 91 - /* Arch specific VIRQs definition */ 92 - #define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */ 93 - #define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */ 94 - #define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */ 95 - 96 - /* Maximum number of virtual CPUs in multi-processor guests. */ 97 - /* keep sizeof(struct shared_page) <= PAGE_SIZE. 98 - * this is checked in arch/ia64/xen/hypervisor.c. */ 99 - #define MAX_VIRT_CPUS 64 100 - 101 - #ifndef __ASSEMBLY__ 102 - 103 - #define INVALID_MFN (~0UL) 104 - 105 - union vac { 106 - unsigned long value; 107 - struct { 108 - int a_int:1; 109 - int a_from_int_cr:1; 110 - int a_to_int_cr:1; 111 - int a_from_psr:1; 112 - int a_from_cpuid:1; 113 - int a_cover:1; 114 - int a_bsw:1; 115 - long reserved:57; 116 - }; 117 - }; 118 - 119 - union vdc { 120 - unsigned long value; 121 - struct { 122 - int d_vmsw:1; 123 - int d_extint:1; 124 - int d_ibr_dbr:1; 125 - int d_pmc:1; 126 - int d_to_pmd:1; 127 - int d_itm:1; 128 - long reserved:58; 129 - }; 130 - }; 131 - 132 - struct mapped_regs { 133 - union vac vac; 134 - union vdc vdc; 135 - unsigned long virt_env_vaddr; 136 - unsigned long reserved1[29]; 137 - unsigned long vhpi; 138 - unsigned long reserved2[95]; 139 - union { 140 - unsigned long vgr[16]; 141 - unsigned long bank1_regs[16]; /* bank1 regs (r16-r31) 142 - when bank0 active */ 143 - }; 144 - union { 145 - unsigned long vbgr[16]; 146 - unsigned long bank0_regs[16]; /* bank0 regs (r16-r31) 147 - when bank1 active */ 148 - }; 149 - unsigned long vnat; 150 - unsigned long vbnat; 151 - unsigned long vcpuid[5]; 152 - unsigned long reserved3[11]; 153 - unsigned long vpsr; 154 - unsigned long vpr; 155 - unsigned long reserved4[76]; 156 - union { 157 - unsigned long vcr[128]; 158 - struct { 159 - unsigned long dcr; /* CR0 */ 160 - unsigned long itm; 161 - unsigned long iva; 162 - unsigned long rsv1[5]; 163 - unsigned long pta; /* CR8 */ 164 - unsigned long rsv2[7]; 165 - unsigned long ipsr; /* CR16 */ 166 - unsigned long isr; 167 - unsigned long rsv3; 168 - unsigned long iip; 169 - unsigned long ifa; 170 - unsigned long itir; 171 - unsigned long iipa; 172 - unsigned long ifs; 173 - unsigned long iim; /* CR24 */ 174 - unsigned long iha; 175 - unsigned long rsv4[38]; 176 - unsigned long lid; /* CR64 */ 177 - unsigned long ivr; 178 - unsigned long tpr; 179 - unsigned long eoi; 180 - unsigned long irr[4]; 181 - unsigned long itv; /* CR72 */ 182 - unsigned long pmv; 183 - unsigned long cmcv; 184 - unsigned long rsv5[5]; 185 - unsigned long lrr0; /* CR80 */ 186 - unsigned long lrr1; 187 - unsigned long rsv6[46]; 188 - }; 189 - }; 190 - union { 191 - unsigned long reserved5[128]; 192 - struct { 193 - unsigned long precover_ifs; 194 - unsigned long unat; /* not sure if this is needed 195 - until NaT arch is done */ 196 - int interrupt_collection_enabled; /* virtual psr.ic */ 197 - 198 - /* virtual interrupt deliverable flag is 199 - * evtchn_upcall_mask in shared info area now. 200 - * interrupt_mask_addr is the address 201 - * of evtchn_upcall_mask for current vcpu 202 - */ 203 - unsigned char *interrupt_mask_addr; 204 - int pending_interruption; 205 - unsigned char vpsr_pp; 206 - unsigned char vpsr_dfh; 207 - unsigned char hpsr_dfh; 208 - unsigned char hpsr_mfh; 209 - unsigned long reserved5_1[4]; 210 - int metaphysical_mode; /* 1 = use metaphys mapping 211 - 0 = use virtual */ 212 - int banknum; /* 0 or 1, which virtual 213 - register bank is active */ 214 - unsigned long rrs[8]; /* region registers */ 215 - unsigned long krs[8]; /* kernel registers */ 216 - unsigned long tmp[16]; /* temp registers 217 - (e.g. for hyperprivops) */ 218 - 219 - /* itc paravirtualization 220 - * vAR.ITC = mAR.ITC + itc_offset 221 - * itc_last is one which was lastly passed to 222 - * the guest OS in order to prevent it from 223 - * going backwords. 224 - */ 225 - unsigned long itc_offset; 226 - unsigned long itc_last; 227 - }; 228 - }; 229 - }; 230 - 231 - struct arch_vcpu_info { 232 - /* nothing */ 233 - }; 234 - 235 - /* 236 - * This structure is used for magic page in domain pseudo physical address 237 - * space and the result of XENMEM_machine_memory_map. 238 - * As the XENMEM_machine_memory_map result, 239 - * xen_memory_map::nr_entries indicates the size in bytes 240 - * including struct xen_ia64_memmap_info. Not the number of entries. 241 - */ 242 - struct xen_ia64_memmap_info { 243 - uint64_t efi_memmap_size; /* size of EFI memory map */ 244 - uint64_t efi_memdesc_size; /* size of an EFI memory map 245 - * descriptor */ 246 - uint32_t efi_memdesc_version; /* memory descriptor version */ 247 - void *memdesc[0]; /* array of efi_memory_desc_t */ 248 - }; 249 - 250 - struct arch_shared_info { 251 - /* PFN of the start_info page. */ 252 - unsigned long start_info_pfn; 253 - 254 - /* Interrupt vector for event channel. */ 255 - int evtchn_vector; 256 - 257 - /* PFN of memmap_info page */ 258 - unsigned int memmap_info_num_pages; /* currently only = 1 case is 259 - supported. */ 260 - unsigned long memmap_info_pfn; 261 - 262 - uint64_t pad[31]; 263 - }; 264 - 265 - struct xen_callback { 266 - unsigned long ip; 267 - }; 268 - typedef struct xen_callback xen_callback_t; 269 - 270 - #endif /* !__ASSEMBLY__ */ 271 - 272 - #include <asm/pvclock-abi.h> 273 - 274 - /* Size of the shared_info area (this is not related to page size). */ 275 - #define XSI_SHIFT 14 276 - #define XSI_SIZE (1 << XSI_SHIFT) 277 - /* Log size of mapped_regs area (64 KB - only 4KB is used). */ 278 - #define XMAPPEDREGS_SHIFT 12 279 - #define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT) 280 - /* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */ 281 - #define XMAPPEDREGS_OFS XSI_SIZE 282 - 283 - /* Hyperprivops. */ 284 - #define HYPERPRIVOP_START 0x1 285 - #define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0) 286 - #define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1) 287 - #define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2) 288 - #define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3) 289 - #define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4) 290 - #define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5) 291 - #define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6) 292 - #define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7) 293 - #define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8) 294 - #define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9) 295 - #define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa) 296 - #define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb) 297 - #define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc) 298 - #define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd) 299 - #define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe) 300 - #define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf) 301 - #define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10) 302 - #define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11) 303 - #define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12) 304 - #define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13) 305 - #define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14) 306 - #define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15) 307 - #define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16) 308 - #define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17) 309 - #define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18) 310 - #define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19) 311 - #define HYPERPRIVOP_MAX (0x1a) 312 - 313 - /* Fast and light hypercalls. */ 314 - #define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1 315 - 316 - /* Xencomm macros. */ 317 - #define XENCOMM_INLINE_MASK 0xf800000000000000UL 318 - #define XENCOMM_INLINE_FLAG 0x8000000000000000UL 319 - 320 - #ifndef __ASSEMBLY__ 321 - 322 - /* 323 - * Optimization features. 324 - * The hypervisor may do some special optimizations for guests. This hypercall 325 - * can be used to switch on/of these special optimizations. 326 - */ 327 - #define __HYPERVISOR_opt_feature 0x700UL 328 - 329 - #define XEN_IA64_OPTF_OFF 0x0 330 - #define XEN_IA64_OPTF_ON 0x1 331 - 332 - /* 333 - * If this feature is switched on, the hypervisor inserts the 334 - * tlb entries without calling the guests traphandler. 335 - * This is useful in guests using region 7 for identity mapping 336 - * like the linux kernel does. 337 - */ 338 - #define XEN_IA64_OPTF_IDENT_MAP_REG7 1 339 - 340 - /* Identity mapping of region 4 addresses in HVM. */ 341 - #define XEN_IA64_OPTF_IDENT_MAP_REG4 2 342 - 343 - /* Identity mapping of region 5 addresses in HVM. */ 344 - #define XEN_IA64_OPTF_IDENT_MAP_REG5 3 345 - 346 - #define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0) 347 - 348 - struct xen_ia64_opt_feature { 349 - unsigned long cmd; /* Which feature */ 350 - unsigned char on; /* Switch feature on/off */ 351 - union { 352 - struct { 353 - /* The page protection bit mask of the pte. 354 - * This will be or'ed with the pte. */ 355 - unsigned long pgprot; 356 - unsigned long key; /* A protection key for itir.*/ 357 - }; 358 - }; 359 - }; 360 - 361 - #endif /* __ASSEMBLY__ */ 362 - 363 - #endif /* _ASM_IA64_XEN_INTERFACE_H */
-44
arch/ia64/include/asm/xen/irq.h
··· 1 - /****************************************************************************** 2 - * arch/ia64/include/asm/xen/irq.h 3 - * 4 - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 5 - * VA Linux Systems Japan K.K. 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 - * 21 - */ 22 - 23 - #ifndef _ASM_IA64_XEN_IRQ_H 24 - #define _ASM_IA64_XEN_IRQ_H 25 - 26 - /* 27 - * The flat IRQ space is divided into two regions: 28 - * 1. A one-to-one mapping of real physical IRQs. This space is only used 29 - * if we have physical device-access privilege. This region is at the 30 - * start of the IRQ space so that existing device drivers do not need 31 - * to be modified to translate physical IRQ numbers into our IRQ space. 32 - * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These 33 - * are bound using the provided bind/unbind functions. 34 - */ 35 - 36 - #define XEN_PIRQ_BASE 0 37 - #define XEN_NR_PIRQS 256 38 - 39 - #define XEN_DYNIRQ_BASE (XEN_PIRQ_BASE + XEN_NR_PIRQS) 40 - #define XEN_NR_DYNIRQS (NR_CPUS * 8) 41 - 42 - #define XEN_NR_IRQS (XEN_NR_PIRQS + XEN_NR_DYNIRQS) 43 - 44 - #endif /* _ASM_IA64_XEN_IRQ_H */
-143
arch/ia64/include/asm/xen/minstate.h
··· 1 - 2 - #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 3 - /* read ar.itc in advance, and use it before leaving bank 0 */ 4 - #define XEN_ACCOUNT_GET_STAMP \ 5 - MOV_FROM_ITC(pUStk, p6, r20, r2); 6 - #else 7 - #define XEN_ACCOUNT_GET_STAMP 8 - #endif 9 - 10 - /* 11 - * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves 12 - * the minimum state necessary that allows us to turn psr.ic back 13 - * on. 14 - * 15 - * Assumed state upon entry: 16 - * psr.ic: off 17 - * r31: contains saved predicates (pr) 18 - * 19 - * Upon exit, the state is as follows: 20 - * psr.ic: off 21 - * r2 = points to &pt_regs.r16 22 - * r8 = contents of ar.ccv 23 - * r9 = contents of ar.csd 24 - * r10 = contents of ar.ssd 25 - * r11 = FPSR_DEFAULT 26 - * r12 = kernel sp (kernel virtual address) 27 - * r13 = points to current task_struct (kernel virtual address) 28 - * p15 = TRUE if psr.i is set in cr.ipsr 29 - * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15: 30 - * preserved 31 - * CONFIG_XEN note: p6/p7 are not preserved 32 - * 33 - * Note that psr.ic is NOT turned on by this macro. This is so that 34 - * we can pass interruption state as arguments to a handler. 35 - */ 36 - #define XEN_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \ 37 - mov r16=IA64_KR(CURRENT); /* M */ \ 38 - mov r27=ar.rsc; /* M */ \ 39 - mov r20=r1; /* A */ \ 40 - mov r25=ar.unat; /* M */ \ 41 - MOV_FROM_IPSR(p0,r29); /* M */ \ 42 - MOV_FROM_IIP(r28); /* M */ \ 43 - mov r21=ar.fpsr; /* M */ \ 44 - mov r26=ar.pfs; /* I */ \ 45 - __COVER; /* B;; (or nothing) */ \ 46 - adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \ 47 - ;; \ 48 - ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \ 49 - st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \ 50 - adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \ 51 - /* switch from user to kernel RBS: */ \ 52 - ;; \ 53 - invala; /* M */ \ 54 - /* SAVE_IFS;*/ /* see xen special handling below */ \ 55 - cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \ 56 - ;; \ 57 - (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ 58 - ;; \ 59 - (pUStk) mov.m r24=ar.rnat; \ 60 - (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ 61 - (pKStk) mov r1=sp; /* get sp */ \ 62 - ;; \ 63 - (pUStk) lfetch.fault.excl.nt1 [r22]; \ 64 - (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ 65 - (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ 66 - ;; \ 67 - (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ 68 - (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \ 69 - ;; \ 70 - (pUStk) mov r18=ar.bsp; \ 71 - (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ 72 - adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ 73 - adds r16=PT(CR_IPSR),r1; \ 74 - ;; \ 75 - lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \ 76 - st8 [r16]=r29; /* save cr.ipsr */ \ 77 - ;; \ 78 - lfetch.fault.excl.nt1 [r17]; \ 79 - tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ 80 - mov r29=b0 \ 81 - ;; \ 82 - WORKAROUND; \ 83 - adds r16=PT(R8),r1; /* initialize first base pointer */ \ 84 - adds r17=PT(R9),r1; /* initialize second base pointer */ \ 85 - (pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \ 86 - ;; \ 87 - .mem.offset 0,0; st8.spill [r16]=r8,16; \ 88 - .mem.offset 8,0; st8.spill [r17]=r9,16; \ 89 - ;; \ 90 - .mem.offset 0,0; st8.spill [r16]=r10,24; \ 91 - movl r8=XSI_PRECOVER_IFS; \ 92 - .mem.offset 8,0; st8.spill [r17]=r11,24; \ 93 - ;; \ 94 - /* xen special handling for possibly lazy cover */ \ 95 - /* SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */ \ 96 - ld8 r30=[r8]; \ 97 - (pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \ 98 - st8 [r16]=r28,16; /* save cr.iip */ \ 99 - ;; \ 100 - st8 [r17]=r30,16; /* save cr.ifs */ \ 101 - mov r8=ar.ccv; \ 102 - mov r9=ar.csd; \ 103 - mov r10=ar.ssd; \ 104 - movl r11=FPSR_DEFAULT; /* L-unit */ \ 105 - ;; \ 106 - st8 [r16]=r25,16; /* save ar.unat */ \ 107 - st8 [r17]=r26,16; /* save ar.pfs */ \ 108 - shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \ 109 - ;; \ 110 - st8 [r16]=r27,16; /* save ar.rsc */ \ 111 - (pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \ 112 - (pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \ 113 - ;; /* avoid RAW on r16 & r17 */ \ 114 - (pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \ 115 - st8 [r17]=r31,16; /* save predicates */ \ 116 - (pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \ 117 - ;; \ 118 - st8 [r16]=r29,16; /* save b0 */ \ 119 - st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \ 120 - cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \ 121 - ;; \ 122 - .mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \ 123 - .mem.offset 8,0; st8.spill [r17]=r12,16; \ 124 - adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \ 125 - ;; \ 126 - .mem.offset 0,0; st8.spill [r16]=r13,16; \ 127 - .mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \ 128 - mov r13=IA64_KR(CURRENT); /* establish `current' */ \ 129 - ;; \ 130 - .mem.offset 0,0; st8.spill [r16]=r15,16; \ 131 - .mem.offset 8,0; st8.spill [r17]=r14,16; \ 132 - ;; \ 133 - .mem.offset 0,0; st8.spill [r16]=r2,16; \ 134 - .mem.offset 8,0; st8.spill [r17]=r3,16; \ 135 - XEN_ACCOUNT_GET_STAMP \ 136 - adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ 137 - ;; \ 138 - EXTRA; \ 139 - movl r1=__gp; /* establish kernel global pointer */ \ 140 - ;; \ 141 - ACCOUNT_SYS_ENTER \ 142 - BSW_1(r3,r14); /* switch back to bank 1 (must be last in insn group) */ \ 143 - ;;
-38
arch/ia64/include/asm/xen/page-coherent.h
··· 1 - #ifndef _ASM_IA64_XEN_PAGE_COHERENT_H 2 - #define _ASM_IA64_XEN_PAGE_COHERENT_H 3 - 4 - #include <asm/page.h> 5 - #include <linux/dma-attrs.h> 6 - #include <linux/dma-mapping.h> 7 - 8 - static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, 9 - dma_addr_t *dma_handle, gfp_t flags, 10 - struct dma_attrs *attrs) 11 - { 12 - void *vstart = (void*)__get_free_pages(flags, get_order(size)); 13 - *dma_handle = virt_to_phys(vstart); 14 - return vstart; 15 - } 16 - 17 - static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, 18 - void *cpu_addr, dma_addr_t dma_handle, 19 - struct dma_attrs *attrs) 20 - { 21 - free_pages((unsigned long) cpu_addr, get_order(size)); 22 - } 23 - 24 - static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 25 - unsigned long offset, size_t size, enum dma_data_direction dir, 26 - struct dma_attrs *attrs) { } 27 - 28 - static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 29 - size_t size, enum dma_data_direction dir, 30 - struct dma_attrs *attrs) { } 31 - 32 - static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 33 - dma_addr_t handle, size_t size, enum dma_data_direction dir) { } 34 - 35 - static inline void xen_dma_sync_single_for_device(struct device *hwdev, 36 - dma_addr_t handle, size_t size, enum dma_data_direction dir) { } 37 - 38 - #endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */
-65
arch/ia64/include/asm/xen/page.h
··· 1 - /****************************************************************************** 2 - * arch/ia64/include/asm/xen/page.h 3 - * 4 - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 5 - * VA Linux Systems Japan K.K. 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 - * 21 - */ 22 - 23 - #ifndef _ASM_IA64_XEN_PAGE_H 24 - #define _ASM_IA64_XEN_PAGE_H 25 - 26 - #define INVALID_P2M_ENTRY (~0UL) 27 - 28 - static inline unsigned long mfn_to_pfn(unsigned long mfn) 29 - { 30 - return mfn; 31 - } 32 - 33 - static inline unsigned long pfn_to_mfn(unsigned long pfn) 34 - { 35 - return pfn; 36 - } 37 - 38 - #define phys_to_machine_mapping_valid(_x) (1) 39 - 40 - static inline void *mfn_to_virt(unsigned long mfn) 41 - { 42 - return __va(mfn << PAGE_SHIFT); 43 - } 44 - 45 - static inline unsigned long virt_to_mfn(void *virt) 46 - { 47 - return __pa(virt) >> PAGE_SHIFT; 48 - } 49 - 50 - /* for tpmfront.c */ 51 - static inline unsigned long virt_to_machine(void *virt) 52 - { 53 - return __pa(virt); 54 - } 55 - 56 - static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) 57 - { 58 - /* nothing */ 59 - } 60 - 61 - #define pte_mfn(_x) pte_pfn(_x) 62 - #define mfn_pte(_x, _y) __pte_ma(0) /* unmodified use */ 63 - #define __pte_ma(_x) ((pte_t) {(_x)}) /* unmodified use */ 64 - 65 - #endif /* _ASM_IA64_XEN_PAGE_H */
-38
arch/ia64/include/asm/xen/patchlist.h
··· 1 - /****************************************************************************** 2 - * arch/ia64/include/asm/xen/patchlist.h 3 - * 4 - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 5 - * VA Linux Systems Japan K.K. 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 - * 21 - */ 22 - 23 - #define __paravirt_start_gate_fsyscall_patchlist \ 24 - __xen_start_gate_fsyscall_patchlist 25 - #define __paravirt_end_gate_fsyscall_patchlist \ 26 - __xen_end_gate_fsyscall_patchlist 27 - #define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \ 28 - __xen_start_gate_brl_fsys_bubble_down_patchlist 29 - #define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \ 30 - __xen_end_gate_brl_fsys_bubble_down_patchlist 31 - #define __paravirt_start_gate_vtop_patchlist \ 32 - __xen_start_gate_vtop_patchlist 33 - #define __paravirt_end_gate_vtop_patchlist \ 34 - __xen_end_gate_vtop_patchlist 35 - #define __paravirt_start_gate_mckinley_e9_patchlist \ 36 - __xen_start_gate_mckinley_e9_patchlist 37 - #define __paravirt_end_gate_mckinley_e9_patchlist \ 38 - __xen_end_gate_mckinley_e9_patchlist
-135
arch/ia64/include/asm/xen/privop.h
··· 1 - #ifndef _ASM_IA64_XEN_PRIVOP_H 2 - #define _ASM_IA64_XEN_PRIVOP_H 3 - 4 - /* 5 - * Copyright (C) 2005 Hewlett-Packard Co 6 - * Dan Magenheimer <dan.magenheimer@hp.com> 7 - * 8 - * Paravirtualizations of privileged operations for Xen/ia64 9 - * 10 - * 11 - * inline privop and paravirt_alt support 12 - * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp> 13 - * VA Linux Systems Japan K.K. 14 - * 15 - */ 16 - 17 - #ifndef __ASSEMBLY__ 18 - #include <linux/types.h> /* arch-ia64.h requires uint64_t */ 19 - #endif 20 - #include <asm/xen/interface.h> 21 - 22 - /* At 1 MB, before per-cpu space but still addressable using addl instead 23 - of movl. */ 24 - #define XSI_BASE 0xfffffffffff00000 25 - 26 - /* Address of mapped regs. */ 27 - #define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE) 28 - 29 - #ifdef __ASSEMBLY__ 30 - #define XEN_HYPER_RFI break HYPERPRIVOP_RFI 31 - #define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT 32 - #define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT 33 - #define XEN_HYPER_COVER break HYPERPRIVOP_COVER 34 - #define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D 35 - #define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I 36 - #define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I 37 - #define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR 38 - #define XEN_HYPER_THASH break HYPERPRIVOP_THASH 39 - #define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D 40 - #define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR 41 - #define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR 42 - #define XEN_HYPER_SET_RR0_TO_RR4 break HYPERPRIVOP_SET_RR0_TO_RR4 43 - 44 - #define XSI_IFS (XSI_BASE + XSI_IFS_OFS) 45 - #define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS) 46 - #define XSI_IFA (XSI_BASE + XSI_IFA_OFS) 47 - #define XSI_ISR (XSI_BASE + XSI_ISR_OFS) 48 - #define XSI_IIM (XSI_BASE + XSI_IIM_OFS) 49 - #define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS) 50 - #define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) 51 - #define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS) 52 - #define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS) 53 - #define XSI_IIP (XSI_BASE + XSI_IIP_OFS) 54 - #define XSI_B1NAT (XSI_BASE + XSI_B1NATS_OFS) 55 - #define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) 56 - #define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) 57 - #define XSI_IHA (XSI_BASE + XSI_IHA_OFS) 58 - #define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS) 59 - #define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS) 60 - #endif 61 - 62 - #ifndef __ASSEMBLY__ 63 - 64 - /************************************************/ 65 - /* Instructions paravirtualized for correctness */ 66 - /************************************************/ 67 - 68 - /* "fc" and "thash" are privilege-sensitive instructions, meaning they 69 - * may have different semantics depending on whether they are executed 70 - * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't 71 - * be allowed to execute directly, lest incorrect semantics result. */ 72 - extern void xen_fc(void *addr); 73 - extern unsigned long xen_thash(unsigned long addr); 74 - 75 - /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" 76 - * is not currently used (though it may be in a long-format VHPT system!) 77 - * and the semantics of cover only change if psr.ic is off which is very 78 - * rare (and currently non-existent outside of assembly code */ 79 - 80 - /* There are also privilege-sensitive registers. These registers are 81 - * readable at any privilege level but only writable at PL0. */ 82 - extern unsigned long xen_get_cpuid(int index); 83 - extern unsigned long xen_get_pmd(int index); 84 - 85 - #ifndef ASM_SUPPORTED 86 - extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ 87 - extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ 88 - #endif 89 - 90 - /************************************************/ 91 - /* Instructions paravirtualized for performance */ 92 - /************************************************/ 93 - 94 - /* Xen uses memory-mapped virtual privileged registers for access to many 95 - * performance-sensitive privileged registers. Some, like the processor 96 - * status register (psr), are broken up into multiple memory locations. 97 - * Others, like "pend", are abstractions based on privileged registers. 98 - * "Pend" is guaranteed to be set if reading cr.ivr would return a 99 - * (non-spurious) interrupt. */ 100 - #define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE) 101 - 102 - #define XSI_PSR_I \ 103 - (*XEN_MAPPEDREGS->interrupt_mask_addr) 104 - #define xen_get_virtual_psr_i() \ 105 - (!XSI_PSR_I) 106 - #define xen_set_virtual_psr_i(_val) \ 107 - ({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; }) 108 - #define xen_set_virtual_psr_ic(_val) \ 109 - ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; }) 110 - #define xen_get_virtual_pend() \ 111 - (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) 112 - 113 - #ifndef ASM_SUPPORTED 114 - /* Although all privileged operations can be left to trap and will 115 - * be properly handled by Xen, some are frequent enough that we use 116 - * hyperprivops for performance. */ 117 - extern unsigned long xen_get_psr(void); 118 - extern unsigned long xen_get_ivr(void); 119 - extern unsigned long xen_get_tpr(void); 120 - extern void xen_hyper_ssm_i(void); 121 - extern void xen_set_itm(unsigned long); 122 - extern void xen_set_tpr(unsigned long); 123 - extern void xen_eoi(unsigned long); 124 - extern unsigned long xen_get_rr(unsigned long index); 125 - extern void xen_set_rr(unsigned long index, unsigned long val); 126 - extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, 127 - unsigned long val2, unsigned long val3, 128 - unsigned long val4); 129 - extern void xen_set_kr(unsigned long index, unsigned long val); 130 - extern void xen_ptcga(unsigned long addr, unsigned long size); 131 - #endif /* !ASM_SUPPORTED */ 132 - 133 - #endif /* !__ASSEMBLY__ */ 134 - 135 - #endif /* _ASM_IA64_XEN_PRIVOP_H */
-51
arch/ia64/include/asm/xen/xcom_hcall.h
··· 1 - /* 2 - * Copyright (C) 2006 Tristan Gingold <tristan.gingold@bull.net>, Bull SAS 3 - * 4 - * This program is free software; you can redistribute it and/or modify 5 - * it under the terms of the GNU General Public License as published by 6 - * the Free Software Foundation; either version 2 of the License, or 7 - * (at your option) any later version. 8 - * 9 - * This program is distributed in the hope that it will be useful, 10 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 - * GNU General Public License for more details. 13 - * 14 - * You should have received a copy of the GNU General Public License 15 - * along with this program; if not, write to the Free Software 16 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 - */ 18 - 19 - #ifndef _ASM_IA64_XEN_XCOM_HCALL_H 20 - #define _ASM_IA64_XEN_XCOM_HCALL_H 21 - 22 - /* These function creates inline or mini descriptor for the parameters and 23 - calls the corresponding xencomm_arch_hypercall_X. 24 - Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless 25 - they want to use their own wrapper. */ 26 - extern int xencomm_hypercall_console_io(int cmd, int count, char *str); 27 - 28 - extern int xencomm_hypercall_event_channel_op(int cmd, void *op); 29 - 30 - extern int xencomm_hypercall_xen_version(int cmd, void *arg); 31 - 32 - extern int xencomm_hypercall_physdev_op(int cmd, void *op); 33 - 34 - extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op, 35 - unsigned int count); 36 - 37 - extern int xencomm_hypercall_sched_op(int cmd, void *arg); 38 - 39 - extern int xencomm_hypercall_multicall(void *call_list, int nr_calls); 40 - 41 - extern int xencomm_hypercall_callback_op(int cmd, void *arg); 42 - 43 - extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg); 44 - 45 - extern int xencomm_hypercall_suspend(unsigned long srec); 46 - 47 - extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg); 48 - 49 - extern long xencomm_hypercall_opt_feature(void *arg); 50 - 51 - #endif /* _ASM_IA64_XEN_XCOM_HCALL_H */
-42
arch/ia64/include/asm/xen/xencomm.h
··· 1 - /* 2 - * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation 3 - * 4 - * This program is free software; you can redistribute it and/or modify 5 - * it under the terms of the GNU General Public License as published by 6 - * the Free Software Foundation; either version 2 of the License, or 7 - * (at your option) any later version. 8 - * 9 - * This program is distributed in the hope that it will be useful, 10 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 - * GNU General Public License for more details. 13 - * 14 - * You should have received a copy of the GNU General Public License 15 - * along with this program; if not, write to the Free Software 16 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 - */ 18 - 19 - #ifndef _ASM_IA64_XEN_XENCOMM_H 20 - #define _ASM_IA64_XEN_XENCOMM_H 21 - 22 - #include <xen/xencomm.h> 23 - #include <asm/pgtable.h> 24 - 25 - /* Must be called before any hypercall. */ 26 - extern void xencomm_initialize(void); 27 - extern int xencomm_is_initialized(void); 28 - 29 - /* Check if virtual contiguity means physical contiguity 30 - * where the passed address is a pointer value in virtual address. 31 - * On ia64, identity mapping area in region 7 or the piece of region 5 32 - * that is mapped by itr[IA64_TR_KERNEL]/dtr[IA64_TR_KERNEL] 33 - */ 34 - static inline int xencomm_is_phys_contiguous(unsigned long addr) 35 - { 36 - return (PAGE_OFFSET <= addr && 37 - addr < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) || 38 - (KERNEL_START <= addr && 39 - addr < KERNEL_START + KERNEL_TR_PAGE_SIZE); 40 - } 41 - 42 - #endif /* _ASM_IA64_XEN_XENCOMM_H */
-9
arch/ia64/include/uapi/asm/break.h
··· 20 20 */ 21 21 #define __IA64_BREAK_SYSCALL 0x100000 22 22 23 - /* 24 - * Xen specific break numbers: 25 - */ 26 - #define __IA64_XEN_HYPERCALL 0x1000 27 - /* [__IA64_XEN_HYPERPRIVOP_START, __IA64_XEN_HYPERPRIVOP_MAX] is used 28 - for xen hyperprivops */ 29 - #define __IA64_XEN_HYPERPRIVOP_START 0x1 30 - #define __IA64_XEN_HYPERPRIVOP_MAX 0x1a 31 - 32 23 #endif /* _ASM_IA64_BREAK_H */
-3
arch/ia64/kernel/acpi.c
··· 53 53 #include <asm/numa.h> 54 54 #include <asm/sal.h> 55 55 #include <asm/cyclone.h> 56 - #include <asm/xen/hypervisor.h> 57 56 58 57 #define BAD_MADT_ENTRY(entry, end) ( \ 59 58 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ ··· 119 120 return "uv"; 120 121 else 121 122 return "sn2"; 122 - } else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) { 123 - return "xen"; 124 123 } 125 124 126 125 #ifdef CONFIG_INTEL_IOMMU
-32
arch/ia64/kernel/asm-offsets.c
··· 16 16 #include <asm/sigcontext.h> 17 17 #include <asm/mca.h> 18 18 19 - #include <asm/xen/interface.h> 20 - #include <asm/xen/hypervisor.h> 21 - 22 19 #include "../kernel/sigframe.h" 23 20 #include "../kernel/fsyscall_gtod_data.h" 24 21 ··· 287 290 DEFINE(IA64_ITC_LASTCYCLE_OFFSET, 288 291 offsetof (struct itc_jitter_data_t, itc_lastcycle)); 289 292 290 - #ifdef CONFIG_XEN 291 - BLANK(); 292 - 293 - DEFINE(XEN_NATIVE_ASM, XEN_NATIVE); 294 - DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN); 295 - 296 - #define DEFINE_MAPPED_REG_OFS(sym, field) \ 297 - DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field))) 298 - 299 - DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr); 300 - DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr); 301 - DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip); 302 - DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs); 303 - DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs); 304 - DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr); 305 - DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa); 306 - DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa); 307 - DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim); 308 - DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha); 309 - DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir); 310 - DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled); 311 - DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum); 312 - DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]); 313 - DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); 314 - DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); 315 - DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); 316 - DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset); 317 - DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last); 318 - #endif /* CONFIG_XEN */ 319 293 }
-3
arch/ia64/kernel/head.S
··· 416 416 417 417 default_setup_hook = 0 // Currently nothing needs to be done. 418 418 419 - .weak xen_setup_hook 420 - 421 419 .global hypervisor_type 422 420 hypervisor_type: 423 421 data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT ··· 424 426 425 427 hypervisor_setup_hooks: 426 428 data8 default_setup_hook 427 - data8 xen_setup_hook 428 429 num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8 429 430 .previous 430 431
-4
arch/ia64/kernel/nr-irqs.c
··· 10 10 #include <linux/kbuild.h> 11 11 #include <linux/threads.h> 12 12 #include <asm/native/irq.h> 13 - #include <asm/xen/irq.h> 14 13 15 14 void foo(void) 16 15 { 17 16 union paravirt_nr_irqs_max { 18 17 char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS]; 19 - #ifdef CONFIG_XEN 20 - char xen_nr_irqs[XEN_NR_IRQS]; 21 - #endif 22 18 }; 23 19 24 20 DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max));
-3
arch/ia64/kernel/paravirt_inst.h
··· 22 22 23 23 #ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK 24 24 #include <asm/native/pvchk_inst.h> 25 - #elif defined(__IA64_ASM_PARAVIRTUALIZED_XEN) 26 - #include <asm/xen/inst.h> 27 - #include <asm/xen/minstate.h> 28 25 #else 29 26 #include <asm/native/inst.h> 30 27 #endif
-4
arch/ia64/kernel/paravirt_patchlist.h
··· 20 20 * 21 21 */ 22 22 23 - #if defined(__IA64_GATE_PARAVIRTUALIZED_XEN) 24 - #include <asm/xen/patchlist.h> 25 - #else 26 23 #include <asm/native/patchlist.h> 27 - #endif 28 24
-6
arch/ia64/kernel/vmlinux.lds.S
··· 182 182 __start_gate_section = .; 183 183 *(.data..gate) 184 184 __stop_gate_section = .; 185 - #ifdef CONFIG_XEN 186 - . = ALIGN(PAGE_SIZE); 187 - __xen_start_gate_section = .; 188 - *(.data..gate.xen) 189 - __xen_stop_gate_section = .; 190 - #endif 191 185 } 192 186 /* 193 187 * make sure the gate page doesn't expose
-25
arch/ia64/xen/Kconfig
··· 1 - # 2 - # This Kconfig describes xen/ia64 options 3 - # 4 - 5 - config XEN 6 - bool "Xen hypervisor support" 7 - default y 8 - depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB 9 - select XEN_XENCOMM 10 - select NO_IDLE_HZ 11 - # followings are required to save/restore. 12 - select ARCH_SUSPEND_POSSIBLE 13 - select SUSPEND 14 - select PM_SLEEP 15 - help 16 - Enable Xen hypervisor support. Resulting kernel runs 17 - both as a guest OS on Xen and natively on hardware. 18 - 19 - config XEN_XENCOMM 20 - depends on XEN 21 - bool 22 - 23 - config NO_IDLE_HZ 24 - depends on XEN 25 - bool
-37
arch/ia64/xen/Makefile
··· 1 - # 2 - # Makefile for Xen components 3 - # 4 - 5 - obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \ 6 - hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o \ 7 - gate-data.o 8 - 9 - obj-$(CONFIG_IA64_GENERIC) += machvec.o 10 - 11 - # The gate DSO image is built using a special linker script. 12 - include $(srctree)/arch/ia64/kernel/Makefile.gate 13 - 14 - # tell compiled for xen 15 - CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_XEN 16 - AFLAGS_gate.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -D__IA64_GATE_PARAVIRTUALIZED_XEN 17 - 18 - # use same file of native. 19 - $(obj)/gate.o: $(src)/../kernel/gate.S FORCE 20 - $(call if_changed_dep,as_o_S) 21 - $(obj)/gate.lds: $(src)/../kernel/gate.lds.S FORCE 22 - $(call if_changed_dep,cpp_lds_S) 23 - 24 - 25 - AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN 26 - 27 - # xen multi compile 28 - ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S fsys.S 29 - ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o)) 30 - obj-y += $(ASM_PARAVIRT_OBJS) 31 - define paravirtualized_xen 32 - AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_XEN 33 - endef 34 - $(foreach o,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_xen,$(o)))) 35 - 36 - $(obj)/xen-%.o: $(src)/../kernel/%.S FORCE 37 - $(call if_changed_dep,as_o_S)
-3
arch/ia64/xen/gate-data.S
··· 1 - .section .data..gate.xen, "aw" 2 - 3 - .incbin "arch/ia64/xen/gate.so"
-94
arch/ia64/xen/grant-table.c
··· 1 - /****************************************************************************** 2 - * arch/ia64/xen/grant-table.c 3 - * 4 - * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> 5 - * VA Linux Systems Japan K.K. 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 - * 21 - */ 22 - 23 - #include <linux/module.h> 24 - #include <linux/vmalloc.h> 25 - #include <linux/slab.h> 26 - #include <linux/mm.h> 27 - 28 - #include <xen/interface/xen.h> 29 - #include <xen/interface/memory.h> 30 - #include <xen/grant_table.h> 31 - 32 - #include <asm/xen/hypervisor.h> 33 - 34 - /**************************************************************************** 35 - * grant table hack 36 - * cmd: GNTTABOP_xxx 37 - */ 38 - 39 - int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, 40 - unsigned long max_nr_gframes, 41 - struct grant_entry **__shared) 42 - { 43 - *__shared = __va(frames[0] << PAGE_SHIFT); 44 - return 0; 45 - } 46 - 47 - void arch_gnttab_unmap_shared(struct grant_entry *shared, 48 - unsigned long nr_gframes) 49 - { 50 - /* nothing */ 51 - } 52 - 53 - static void 54 - gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop) 55 - { 56 - uint32_t flags; 57 - 58 - flags = uop->flags; 59 - 60 - if (flags & GNTMAP_host_map) { 61 - if (flags & GNTMAP_application_map) { 62 - printk(KERN_DEBUG 63 - "GNTMAP_application_map is not supported yet: " 64 - "flags 0x%x\n", flags); 65 - BUG(); 66 - } 67 - if (flags & GNTMAP_contains_pte) { 68 - printk(KERN_DEBUG 69 - "GNTMAP_contains_pte is not supported yet: " 70 - "flags 0x%x\n", flags); 71 - BUG(); 72 - } 73 - } else if (flags & GNTMAP_device_map) { 74 - printk("GNTMAP_device_map is not supported yet 0x%x\n", flags); 75 - BUG(); /* not yet. actually this flag is not used. */ 76 - } else { 77 - BUG(); 78 - } 79 - } 80 - 81 - int 82 - HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) 83 - { 84 - if (cmd == GNTTABOP_map_grant_ref) { 85 - unsigned int i; 86 - for (i = 0; i < count; i++) { 87 - gnttab_map_grant_ref_pre( 88 - (struct gnttab_map_grant_ref *)uop + i); 89 - } 90 - } 91 - return xencomm_hypercall_grant_table_op(cmd, uop, count); 92 - } 93 - 94 - EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
-88
arch/ia64/xen/hypercall.S
··· 1 - /* 2 - * Support routines for Xen hypercalls 3 - * 4 - * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com> 5 - * Copyright (C) 2008 Yaozu (Eddie) Dong <eddie.dong@intel.com> 6 - */ 7 - 8 - #include <asm/asmmacro.h> 9 - #include <asm/intrinsics.h> 10 - #include <asm/xen/privop.h> 11 - 12 - #ifdef __INTEL_COMPILER 13 - /* 14 - * Hypercalls without parameter. 15 - */ 16 - #define __HCALL0(name,hcall) \ 17 - GLOBAL_ENTRY(name); \ 18 - break hcall; \ 19 - br.ret.sptk.many rp; \ 20 - END(name) 21 - 22 - /* 23 - * Hypercalls with 1 parameter. 24 - */ 25 - #define __HCALL1(name,hcall) \ 26 - GLOBAL_ENTRY(name); \ 27 - mov r8=r32; \ 28 - break hcall; \ 29 - br.ret.sptk.many rp; \ 30 - END(name) 31 - 32 - /* 33 - * Hypercalls with 2 parameters. 34 - */ 35 - #define __HCALL2(name,hcall) \ 36 - GLOBAL_ENTRY(name); \ 37 - mov r8=r32; \ 38 - mov r9=r33; \ 39 - break hcall; \ 40 - br.ret.sptk.many rp; \ 41 - END(name) 42 - 43 - __HCALL0(xen_get_psr, HYPERPRIVOP_GET_PSR) 44 - __HCALL0(xen_get_ivr, HYPERPRIVOP_GET_IVR) 45 - __HCALL0(xen_get_tpr, HYPERPRIVOP_GET_TPR) 46 - __HCALL0(xen_hyper_ssm_i, HYPERPRIVOP_SSM_I) 47 - 48 - __HCALL1(xen_set_tpr, HYPERPRIVOP_SET_TPR) 49 - __HCALL1(xen_eoi, HYPERPRIVOP_EOI) 50 - __HCALL1(xen_thash, HYPERPRIVOP_THASH) 51 - __HCALL1(xen_set_itm, HYPERPRIVOP_SET_ITM) 52 - __HCALL1(xen_get_rr, HYPERPRIVOP_GET_RR) 53 - __HCALL1(xen_fc, HYPERPRIVOP_FC) 54 - __HCALL1(xen_get_cpuid, HYPERPRIVOP_GET_CPUID) 55 - __HCALL1(xen_get_pmd, HYPERPRIVOP_GET_PMD) 56 - 57 - __HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA) 58 - __HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR) 59 - __HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR) 60 - 61 - GLOBAL_ENTRY(xen_set_rr0_to_rr4) 62 - mov r8=r32 63 - mov r9=r33 64 - mov r10=r34 65 - mov r11=r35 66 - mov r14=r36 67 - XEN_HYPER_SET_RR0_TO_RR4 68 - br.ret.sptk.many rp 69 - ;; 70 - END(xen_set_rr0_to_rr4) 71 - #endif 72 - 73 - GLOBAL_ENTRY(xen_send_ipi) 74 - mov r14=r32 75 - mov r15=r33 76 - mov r2=0x400 77 - break 0x1000 78 - ;; 79 - br.ret.sptk.many rp 80 - ;; 81 - END(xen_send_ipi) 82 - 83 - GLOBAL_ENTRY(__hypercall) 84 - mov r2=r37 85 - break 0x1000 86 - br.ret.sptk.many b0 87 - ;; 88 - END(__hypercall)
-97
arch/ia64/xen/hypervisor.c
··· 1 - /****************************************************************************** 2 - * arch/ia64/xen/hypervisor.c 3 - * 4 - * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> 5 - * VA Linux Systems Japan K.K. 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 - * 21 - */ 22 - 23 - #include <linux/efi.h> 24 - #include <linux/export.h> 25 - #include <asm/xen/hypervisor.h> 26 - #include <asm/xen/privop.h> 27 - 28 - #include "irq_xen.h" 29 - 30 - struct shared_info *HYPERVISOR_shared_info __read_mostly = 31 - (struct shared_info *)XSI_BASE; 32 - EXPORT_SYMBOL(HYPERVISOR_shared_info); 33 - 34 - DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 35 - 36 - struct start_info *xen_start_info; 37 - EXPORT_SYMBOL(xen_start_info); 38 - 39 - EXPORT_SYMBOL(xen_domain_type); 40 - 41 - EXPORT_SYMBOL(__hypercall); 42 - 43 - /* Stolen from arch/x86/xen/enlighten.c */ 44 - /* 45 - * Flag to determine whether vcpu info placement is available on all 46 - * VCPUs. We assume it is to start with, and then set it to zero on 47 - * the first failure. This is because it can succeed on some VCPUs 48 - * and not others, since it can involve hypervisor memory allocation, 49 - * or because the guest failed to guarantee all the appropriate 50 - * constraints on all VCPUs (ie buffer can't cross a page boundary). 51 - * 52 - * Note that any particular CPU may be using a placed vcpu structure, 53 - * but we can only optimise if the all are. 54 - * 55 - * 0: not available, 1: available 56 - */ 57 - 58 - static void __init xen_vcpu_setup(int cpu) 59 - { 60 - /* 61 - * WARNING: 62 - * before changing MAX_VIRT_CPUS, 63 - * check that shared_info fits on a page 64 - */ 65 - BUILD_BUG_ON(sizeof(struct shared_info) > PAGE_SIZE); 66 - per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 67 - } 68 - 69 - void __init xen_setup_vcpu_info_placement(void) 70 - { 71 - int cpu; 72 - 73 - for_each_possible_cpu(cpu) 74 - xen_vcpu_setup(cpu); 75 - } 76 - 77 - void 78 - xen_cpu_init(void) 79 - { 80 - xen_smp_intr_init(); 81 - } 82 - 83 - /************************************************************************** 84 - * opt feature 85 - */ 86 - void 87 - xen_ia64_enable_opt_feature(void) 88 - { 89 - /* Enable region 7 identity map optimizations in Xen */ 90 - struct xen_ia64_opt_feature optf; 91 - 92 - optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7; 93 - optf.on = XEN_IA64_OPTF_ON; 94 - optf.pgprot = pgprot_val(PAGE_KERNEL); 95 - optf.key = 0; /* No key on linux. */ 96 - HYPERVISOR_opt_feature(&optf); 97 - }
-443
arch/ia64/xen/irq_xen.c
··· 1 - /****************************************************************************** 2 - * arch/ia64/xen/irq_xen.c 3 - * 4 - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 5 - * VA Linux Systems Japan K.K. 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 - * 21 - */ 22 - 23 - #include <linux/cpu.h> 24 - 25 - #include <xen/interface/xen.h> 26 - #include <xen/interface/callback.h> 27 - #include <xen/events.h> 28 - 29 - #include <asm/xen/privop.h> 30 - 31 - #include "irq_xen.h" 32 - 33 - /*************************************************************************** 34 - * pv_irq_ops 35 - * irq operations 36 - */ 37 - 38 - static int 39 - xen_assign_irq_vector(int irq) 40 - { 41 - struct physdev_irq irq_op; 42 - 43 - irq_op.irq = irq; 44 - if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) 45 - return -ENOSPC; 46 - 47 - return irq_op.vector; 48 - } 49 - 50 - static void 51 - xen_free_irq_vector(int vector) 52 - { 53 - struct physdev_irq irq_op; 54 - 55 - if (vector < IA64_FIRST_DEVICE_VECTOR || 56 - vector > IA64_LAST_DEVICE_VECTOR) 57 - return; 58 - 59 - irq_op.vector = vector; 60 - if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op)) 61 - printk(KERN_WARNING "%s: xen_free_irq_vector fail vector=%d\n", 62 - __func__, vector); 63 - } 64 - 65 - 66 - static DEFINE_PER_CPU(int, xen_timer_irq) = -1; 67 - static DEFINE_PER_CPU(int, xen_ipi_irq) = -1; 68 - static DEFINE_PER_CPU(int, xen_resched_irq) = -1; 69 - static DEFINE_PER_CPU(int, xen_cmc_irq) = -1; 70 - static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1; 71 - static DEFINE_PER_CPU(int, xen_cpep_irq) = -1; 72 - #define NAME_SIZE 15 73 - static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name); 74 - static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name); 75 - static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name); 76 - static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name); 77 - static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name); 78 - static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name); 79 - #undef NAME_SIZE 80 - 81 - struct saved_irq { 82 - unsigned int irq; 83 - struct irqaction *action; 84 - }; 85 - /* 16 should be far optimistic value, since only several percpu irqs 86 - * are registered early. 87 - */ 88 - #define MAX_LATE_IRQ 16 89 - static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ]; 90 - static unsigned short late_irq_cnt; 91 - static unsigned short saved_irq_cnt; 92 - static int xen_slab_ready; 93 - 94 - #ifdef CONFIG_SMP 95 - #include <linux/sched.h> 96 - 97 - /* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ, 98 - * it ends up to issue several memory accesses upon percpu data and 99 - * thus adds unnecessary traffic to other paths. 100 - */ 101 - static irqreturn_t 102 - xen_dummy_handler(int irq, void *dev_id) 103 - { 104 - return IRQ_HANDLED; 105 - } 106 - 107 - static irqreturn_t 108 - xen_resched_handler(int irq, void *dev_id) 109 - { 110 - scheduler_ipi(); 111 - return IRQ_HANDLED; 112 - } 113 - 114 - static struct irqaction xen_ipi_irqaction = { 115 - .handler = handle_IPI, 116 - .flags = IRQF_DISABLED, 117 - .name = "IPI" 118 - }; 119 - 120 - static struct irqaction xen_resched_irqaction = { 121 - .handler = xen_resched_handler, 122 - .flags = IRQF_DISABLED, 123 - .name = "resched" 124 - }; 125 - 126 - static struct irqaction xen_tlb_irqaction = { 127 - .handler = xen_dummy_handler, 128 - .flags = IRQF_DISABLED, 129 - .name = "tlb_flush" 130 - }; 131 - #endif 132 - 133 - /* 134 - * This is xen version percpu irq registration, which needs bind 135 - * to xen specific evtchn sub-system. One trick here is that xen 136 - * evtchn binding interface depends on kmalloc because related 137 - * port needs to be freed at device/cpu down. So we cache the 138 - * registration on BSP before slab is ready and then deal them 139 - * at later point. For rest instances happening after slab ready, 140 - * we hook them to xen evtchn immediately. 141 - * 142 - * FIXME: MCA is not supported by far, and thus "nomca" boot param is 143 - * required. 144 - */ 145 - static void 146 - __xen_register_percpu_irq(unsigned int cpu, unsigned int vec, 147 - struct irqaction *action, int save) 148 - { 149 - int irq = 0; 150 - 151 - if (xen_slab_ready) { 152 - switch (vec) { 153 - case IA64_TIMER_VECTOR: 154 - snprintf(per_cpu(xen_timer_name, cpu), 155 - sizeof(per_cpu(xen_timer_name, cpu)), 156 - "%s%d", action->name, cpu); 157 - irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, 158 - action->handler, action->flags, 159 - per_cpu(xen_timer_name, cpu), action->dev_id); 160 - per_cpu(xen_timer_irq, cpu) = irq; 161 - break; 162 - case IA64_IPI_RESCHEDULE: 163 - snprintf(per_cpu(xen_resched_name, cpu), 164 - sizeof(per_cpu(xen_resched_name, cpu)), 165 - "%s%d", action->name, cpu); 166 - irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, 167 - action->handler, action->flags, 168 - per_cpu(xen_resched_name, cpu), action->dev_id); 169 - per_cpu(xen_resched_irq, cpu) = irq; 170 - break; 171 - case IA64_IPI_VECTOR: 172 - snprintf(per_cpu(xen_ipi_name, cpu), 173 - sizeof(per_cpu(xen_ipi_name, cpu)), 174 - "%s%d", action->name, cpu); 175 - irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, 176 - action->handler, action->flags, 177 - per_cpu(xen_ipi_name, cpu), action->dev_id); 178 - per_cpu(xen_ipi_irq, cpu) = irq; 179 - break; 180 - case IA64_CMC_VECTOR: 181 - snprintf(per_cpu(xen_cmc_name, cpu), 182 - sizeof(per_cpu(xen_cmc_name, cpu)), 183 - "%s%d", action->name, cpu); 184 - irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, 185 - action->handler, 186 - action->flags, 187 - per_cpu(xen_cmc_name, cpu), 188 - action->dev_id); 189 - per_cpu(xen_cmc_irq, cpu) = irq; 190 - break; 191 - case IA64_CMCP_VECTOR: 192 - snprintf(per_cpu(xen_cmcp_name, cpu), 193 - sizeof(per_cpu(xen_cmcp_name, cpu)), 194 - "%s%d", action->name, cpu); 195 - irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, 196 - action->handler, 197 - action->flags, 198 - per_cpu(xen_cmcp_name, cpu), 199 - action->dev_id); 200 - per_cpu(xen_cmcp_irq, cpu) = irq; 201 - break; 202 - case IA64_CPEP_VECTOR: 203 - snprintf(per_cpu(xen_cpep_name, cpu), 204 - sizeof(per_cpu(xen_cpep_name, cpu)), 205 - "%s%d", action->name, cpu); 206 - irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, 207 - action->handler, 208 - action->flags, 209 - per_cpu(xen_cpep_name, cpu), 210 - action->dev_id); 211 - per_cpu(xen_cpep_irq, cpu) = irq; 212 - break; 213 - case IA64_CPE_VECTOR: 214 - case IA64_MCA_RENDEZ_VECTOR: 215 - case IA64_PERFMON_VECTOR: 216 - case IA64_MCA_WAKEUP_VECTOR: 217 - case IA64_SPURIOUS_INT_VECTOR: 218 - /* No need to complain, these aren't supported. */ 219 - break; 220 - default: 221 - printk(KERN_WARNING "Percpu irq %d is unsupported " 222 - "by xen!\n", vec); 223 - break; 224 - } 225 - BUG_ON(irq < 0); 226 - 227 - if (irq > 0) { 228 - /* 229 - * Mark percpu. Without this, migrate_irqs() will 230 - * mark the interrupt for migrations and trigger it 231 - * on cpu hotplug. 232 - */ 233 - irq_set_status_flags(irq, IRQ_PER_CPU); 234 - } 235 - } 236 - 237 - /* For BSP, we cache registered percpu irqs, and then re-walk 238 - * them when initializing APs 239 - */ 240 - if (!cpu && save) { 241 - BUG_ON(saved_irq_cnt == MAX_LATE_IRQ); 242 - saved_percpu_irqs[saved_irq_cnt].irq = vec; 243 - saved_percpu_irqs[saved_irq_cnt].action = action; 244 - saved_irq_cnt++; 245 - if (!xen_slab_ready) 246 - late_irq_cnt++; 247 - } 248 - } 249 - 250 - static void 251 - xen_register_percpu_irq(ia64_vector vec, struct irqaction *action) 252 - { 253 - __xen_register_percpu_irq(smp_processor_id(), vec, action, 1); 254 - } 255 - 256 - static void 257 - xen_bind_early_percpu_irq(void) 258 - { 259 - int i; 260 - 261 - xen_slab_ready = 1; 262 - /* There's no race when accessing this cached array, since only 263 - * BSP will face with such step shortly 264 - */ 265 - for (i = 0; i < late_irq_cnt; i++) 266 - __xen_register_percpu_irq(smp_processor_id(), 267 - saved_percpu_irqs[i].irq, 268 - saved_percpu_irqs[i].action, 0); 269 - } 270 - 271 - /* FIXME: There's no obvious point to check whether slab is ready. So 272 - * a hack is used here by utilizing a late time hook. 273 - */ 274 - 275 - #ifdef CONFIG_HOTPLUG_CPU 276 - static int unbind_evtchn_callback(struct notifier_block *nfb, 277 - unsigned long action, void *hcpu) 278 - { 279 - unsigned int cpu = (unsigned long)hcpu; 280 - 281 - if (action == CPU_DEAD) { 282 - /* Unregister evtchn. */ 283 - if (per_cpu(xen_cpep_irq, cpu) >= 0) { 284 - unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu), 285 - NULL); 286 - per_cpu(xen_cpep_irq, cpu) = -1; 287 - } 288 - if (per_cpu(xen_cmcp_irq, cpu) >= 0) { 289 - unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu), 290 - NULL); 291 - per_cpu(xen_cmcp_irq, cpu) = -1; 292 - } 293 - if (per_cpu(xen_cmc_irq, cpu) >= 0) { 294 - unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL); 295 - per_cpu(xen_cmc_irq, cpu) = -1; 296 - } 297 - if (per_cpu(xen_ipi_irq, cpu) >= 0) { 298 - unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL); 299 - per_cpu(xen_ipi_irq, cpu) = -1; 300 - } 301 - if (per_cpu(xen_resched_irq, cpu) >= 0) { 302 - unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), 303 - NULL); 304 - per_cpu(xen_resched_irq, cpu) = -1; 305 - } 306 - if (per_cpu(xen_timer_irq, cpu) >= 0) { 307 - unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu), 308 - NULL); 309 - per_cpu(xen_timer_irq, cpu) = -1; 310 - } 311 - } 312 - return NOTIFY_OK; 313 - } 314 - 315 - static struct notifier_block unbind_evtchn_notifier = { 316 - .notifier_call = unbind_evtchn_callback, 317 - .priority = 0 318 - }; 319 - #endif 320 - 321 - void xen_smp_intr_init_early(unsigned int cpu) 322 - { 323 - #ifdef CONFIG_SMP 324 - unsigned int i; 325 - 326 - for (i = 0; i < saved_irq_cnt; i++) 327 - __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq, 328 - saved_percpu_irqs[i].action, 0); 329 - #endif 330 - } 331 - 332 - void xen_smp_intr_init(void) 333 - { 334 - #ifdef CONFIG_SMP 335 - unsigned int cpu = smp_processor_id(); 336 - struct callback_register event = { 337 - .type = CALLBACKTYPE_event, 338 - .address = { .ip = (unsigned long)&xen_event_callback }, 339 - }; 340 - 341 - if (cpu == 0) { 342 - /* Initialization was already done for boot cpu. */ 343 - #ifdef CONFIG_HOTPLUG_CPU 344 - /* Register the notifier only once. */ 345 - register_cpu_notifier(&unbind_evtchn_notifier); 346 - #endif 347 - return; 348 - } 349 - 350 - /* This should be piggyback when setup vcpu guest context */ 351 - BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); 352 - #endif /* CONFIG_SMP */ 353 - } 354 - 355 - void __init 356 - xen_irq_init(void) 357 - { 358 - struct callback_register event = { 359 - .type = CALLBACKTYPE_event, 360 - .address = { .ip = (unsigned long)&xen_event_callback }, 361 - }; 362 - 363 - xen_init_IRQ(); 364 - BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); 365 - late_time_init = xen_bind_early_percpu_irq; 366 - } 367 - 368 - void 369 - xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect) 370 - { 371 - #ifdef CONFIG_SMP 372 - /* TODO: we need to call vcpu_up here */ 373 - if (unlikely(vector == ap_wakeup_vector)) { 374 - /* XXX 375 - * This should be in __cpu_up(cpu) in ia64 smpboot.c 376 - * like x86. But don't want to modify it, 377 - * keep it untouched. 378 - */ 379 - xen_smp_intr_init_early(cpu); 380 - 381 - xen_send_ipi(cpu, vector); 382 - /* vcpu_prepare_and_up(cpu); */ 383 - return; 384 - } 385 - #endif 386 - 387 - switch (vector) { 388 - case IA64_IPI_VECTOR: 389 - xen_send_IPI_one(cpu, XEN_IPI_VECTOR); 390 - break; 391 - case IA64_IPI_RESCHEDULE: 392 - xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 393 - break; 394 - case IA64_CMCP_VECTOR: 395 - xen_send_IPI_one(cpu, XEN_CMCP_VECTOR); 396 - break; 397 - case IA64_CPEP_VECTOR: 398 - xen_send_IPI_one(cpu, XEN_CPEP_VECTOR); 399 - break; 400 - case IA64_TIMER_VECTOR: { 401 - /* this is used only once by check_sal_cache_flush() 402 - at boot time */ 403 - static int used = 0; 404 - if (!used) { 405 - xen_send_ipi(cpu, IA64_TIMER_VECTOR); 406 - used = 1; 407 - break; 408 - } 409 - /* fallthrough */ 410 - } 411 - default: 412 - printk(KERN_WARNING "Unsupported IPI type 0x%x\n", 413 - vector); 414 - notify_remote_via_irq(0); /* defaults to 0 irq */ 415 - break; 416 - } 417 - } 418 - 419 - static void __init 420 - xen_register_ipi(void) 421 - { 422 - #ifdef CONFIG_SMP 423 - register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction); 424 - register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction); 425 - register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction); 426 - #endif 427 - } 428 - 429 - static void 430 - xen_resend_irq(unsigned int vector) 431 - { 432 - (void)resend_irq_on_evtchn(vector); 433 - } 434 - 435 - const struct pv_irq_ops xen_irq_ops __initconst = { 436 - .register_ipi = xen_register_ipi, 437 - 438 - .assign_irq_vector = xen_assign_irq_vector, 439 - .free_irq_vector = xen_free_irq_vector, 440 - .register_percpu_irq = xen_register_percpu_irq, 441 - 442 - .resend_irq = xen_resend_irq, 443 - };
-34
arch/ia64/xen/irq_xen.h
··· 1 - /****************************************************************************** 2 - * arch/ia64/xen/irq_xen.h 3 - * 4 - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 5 - * VA Linux Systems Japan K.K. 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 - * 21 - */ 22 - 23 - #ifndef IRQ_XEN_H 24 - #define IRQ_XEN_H 25 - 26 - extern void (*late_time_init)(void); 27 - extern char xen_event_callback; 28 - void __init xen_init_IRQ(void); 29 - 30 - extern const struct pv_irq_ops xen_irq_ops __initconst; 31 - extern void xen_smp_intr_init(void); 32 - extern void xen_send_ipi(int cpu, int vec); 33 - 34 - #endif /* IRQ_XEN_H */
-4
arch/ia64/xen/machvec.c
··· 1 - #define MACHVEC_PLATFORM_NAME xen 2 - #define MACHVEC_PLATFORM_HEADER <asm/machvec_xen.h> 3 - #include <asm/machvec_init.h> 4 -
-59
arch/ia64/xen/suspend.c
··· 1 - /****************************************************************************** 2 - * arch/ia64/xen/suspend.c 3 - * 4 - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 5 - * VA Linux Systems Japan K.K. 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 - * 21 - * suspend/resume 22 - */ 23 - 24 - #include <xen/xen-ops.h> 25 - #include <asm/xen/hypervisor.h> 26 - #include "time.h" 27 - 28 - void 29 - xen_mm_pin_all(void) 30 - { 31 - /* nothing */ 32 - } 33 - 34 - void 35 - xen_mm_unpin_all(void) 36 - { 37 - /* nothing */ 38 - } 39 - 40 - void 41 - xen_arch_pre_suspend() 42 - { 43 - /* nothing */ 44 - } 45 - 46 - void 47 - xen_arch_post_suspend(int suspend_cancelled) 48 - { 49 - if (suspend_cancelled) 50 - return; 51 - 52 - xen_ia64_enable_opt_feature(); 53 - /* add more if necessary */ 54 - } 55 - 56 - void xen_arch_resume(void) 57 - { 58 - xen_timer_resume_on_aps(); 59 - }
-257
arch/ia64/xen/time.c
··· 1 - /****************************************************************************** 2 - * arch/ia64/xen/time.c 3 - * 4 - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 5 - * VA Linux Systems Japan K.K. 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 - * 21 - */ 22 - 23 - #include <linux/delay.h> 24 - #include <linux/kernel_stat.h> 25 - #include <linux/posix-timers.h> 26 - #include <linux/irq.h> 27 - #include <linux/clocksource.h> 28 - 29 - #include <asm/timex.h> 30 - 31 - #include <asm/xen/hypervisor.h> 32 - 33 - #include <xen/interface/vcpu.h> 34 - 35 - #include "../kernel/fsyscall_gtod_data.h" 36 - 37 - static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); 38 - static DEFINE_PER_CPU(unsigned long, xen_stolen_time); 39 - static DEFINE_PER_CPU(unsigned long, xen_blocked_time); 40 - 41 - /* taken from i386/kernel/time-xen.c */ 42 - static void xen_init_missing_ticks_accounting(int cpu) 43 - { 44 - struct vcpu_register_runstate_memory_area area; 45 - struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu); 46 - int rc; 47 - 48 - memset(runstate, 0, sizeof(*runstate)); 49 - 50 - area.addr.v = runstate; 51 - rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, 52 - &area); 53 - WARN_ON(rc && rc != -ENOSYS); 54 - 55 - per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; 56 - per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] 57 - + runstate->time[RUNSTATE_offline]; 58 - } 59 - 60 - /* 61 - * Runstate accounting 62 - */ 63 - /* stolen from arch/x86/xen/time.c */ 64 - static void get_runstate_snapshot(struct vcpu_runstate_info *res) 65 - { 66 - u64 state_time; 67 - struct vcpu_runstate_info *state; 68 - 69 - BUG_ON(preemptible()); 70 - 71 - state = &__get_cpu_var(xen_runstate); 72 - 73 - /* 74 - * The runstate info is always updated by the hypervisor on 75 - * the current CPU, so there's no need to use anything 76 - * stronger than a compiler barrier when fetching it. 77 - */ 78 - do { 79 - state_time = state->state_entry_time; 80 - rmb(); 81 - *res = *state; 82 - rmb(); 83 - } while (state->state_entry_time != state_time); 84 - } 85 - 86 - #define NS_PER_TICK (1000000000LL/HZ) 87 - 88 - static unsigned long 89 - consider_steal_time(unsigned long new_itm) 90 - { 91 - unsigned long stolen, blocked; 92 - unsigned long delta_itm = 0, stolentick = 0; 93 - int cpu = smp_processor_id(); 94 - struct vcpu_runstate_info runstate; 95 - struct task_struct *p = current; 96 - 97 - get_runstate_snapshot(&runstate); 98 - 99 - /* 100 - * Check for vcpu migration effect 101 - * In this case, itc value is reversed. 102 - * This causes huge stolen value. 103 - * This function just checks and reject this effect. 104 - */ 105 - if (!time_after_eq(runstate.time[RUNSTATE_blocked], 106 - per_cpu(xen_blocked_time, cpu))) 107 - blocked = 0; 108 - 109 - if (!time_after_eq(runstate.time[RUNSTATE_runnable] + 110 - runstate.time[RUNSTATE_offline], 111 - per_cpu(xen_stolen_time, cpu))) 112 - stolen = 0; 113 - 114 - if (!time_after(delta_itm + new_itm, ia64_get_itc())) 115 - stolentick = ia64_get_itc() - new_itm; 116 - 117 - do_div(stolentick, NS_PER_TICK); 118 - stolentick++; 119 - 120 - do_div(stolen, NS_PER_TICK); 121 - 122 - if (stolen > stolentick) 123 - stolen = stolentick; 124 - 125 - stolentick -= stolen; 126 - do_div(blocked, NS_PER_TICK); 127 - 128 - if (blocked > stolentick) 129 - blocked = stolentick; 130 - 131 - if (stolen > 0 || blocked > 0) { 132 - account_steal_ticks(stolen); 133 - account_idle_ticks(blocked); 134 - run_local_timers(); 135 - 136 - rcu_check_callbacks(cpu, user_mode(get_irq_regs())); 137 - 138 - scheduler_tick(); 139 - run_posix_cpu_timers(p); 140 - delta_itm += local_cpu_data->itm_delta * (stolen + blocked); 141 - 142 - if (cpu == time_keeper_id) 143 - xtime_update(stolen + blocked); 144 - 145 - local_cpu_data->itm_next = delta_itm + new_itm; 146 - 147 - per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; 148 - per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; 149 - } 150 - return delta_itm; 151 - } 152 - 153 - static int xen_do_steal_accounting(unsigned long *new_itm) 154 - { 155 - unsigned long delta_itm; 156 - delta_itm = consider_steal_time(*new_itm); 157 - *new_itm += delta_itm; 158 - if (time_after(*new_itm, ia64_get_itc()) && delta_itm) 159 - return 1; 160 - 161 - return 0; 162 - } 163 - 164 - static void xen_itc_jitter_data_reset(void) 165 - { 166 - u64 lcycle, ret; 167 - 168 - do { 169 - lcycle = itc_jitter_data.itc_lastcycle; 170 - ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, 0); 171 - } while (unlikely(ret != lcycle)); 172 - } 173 - 174 - /* based on xen_sched_clock() in arch/x86/xen/time.c. */ 175 - /* 176 - * This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined, 177 - * something similar logic should be implemented here. 178 - */ 179 - /* 180 - * Xen sched_clock implementation. Returns the number of unstolen 181 - * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED 182 - * states. 183 - */ 184 - static unsigned long long xen_sched_clock(void) 185 - { 186 - struct vcpu_runstate_info runstate; 187 - 188 - unsigned long long now; 189 - unsigned long long offset; 190 - unsigned long long ret; 191 - 192 - /* 193 - * Ideally sched_clock should be called on a per-cpu basis 194 - * anyway, so preempt should already be disabled, but that's 195 - * not current practice at the moment. 196 - */ 197 - preempt_disable(); 198 - 199 - /* 200 - * both ia64_native_sched_clock() and xen's runstate are 201 - * based on mAR.ITC. So difference of them makes sense. 202 - */ 203 - now = ia64_native_sched_clock(); 204 - 205 - get_runstate_snapshot(&runstate); 206 - 207 - WARN_ON(runstate.state != RUNSTATE_running); 208 - 209 - offset = 0; 210 - if (now > runstate.state_entry_time) 211 - offset = now - runstate.state_entry_time; 212 - ret = runstate.time[RUNSTATE_blocked] + 213 - runstate.time[RUNSTATE_running] + 214 - offset; 215 - 216 - preempt_enable(); 217 - 218 - return ret; 219 - } 220 - 221 - struct pv_time_ops xen_time_ops __initdata = { 222 - .init_missing_ticks_accounting = xen_init_missing_ticks_accounting, 223 - .do_steal_accounting = xen_do_steal_accounting, 224 - .clocksource_resume = xen_itc_jitter_data_reset, 225 - .sched_clock = xen_sched_clock, 226 - }; 227 - 228 - /* Called after suspend, to resume time. */ 229 - static void xen_local_tick_resume(void) 230 - { 231 - /* Just trigger a tick. */ 232 - ia64_cpu_local_tick(); 233 - touch_softlockup_watchdog(); 234 - } 235 - 236 - void 237 - xen_timer_resume(void) 238 - { 239 - unsigned int cpu; 240 - 241 - xen_local_tick_resume(); 242 - 243 - for_each_online_cpu(cpu) 244 - xen_init_missing_ticks_accounting(cpu); 245 - } 246 - 247 - static void ia64_cpu_local_tick_fn(void *unused) 248 - { 249 - xen_local_tick_resume(); 250 - xen_init_missing_ticks_accounting(smp_processor_id()); 251 - } 252 - 253 - void 254 - xen_timer_resume_on_aps(void) 255 - { 256 - smp_call_function(&ia64_cpu_local_tick_fn, NULL, 1); 257 - }
-24
arch/ia64/xen/time.h
··· 1 - /****************************************************************************** 2 - * arch/ia64/xen/time.h 3 - * 4 - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 5 - * VA Linux Systems Japan K.K. 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 - * 21 - */ 22 - 23 - extern struct pv_time_ops xen_time_ops __initdata; 24 - void xen_timer_resume_on_aps(void);
-441
arch/ia64/xen/xcom_hcall.c
··· 1 - /* 2 - * This program is free software; you can redistribute it and/or modify 3 - * it under the terms of the GNU General Public License as published by 4 - * the Free Software Foundation; either version 2 of the License, or 5 - * (at your option) any later version. 6 - * 7 - * This program is distributed in the hope that it will be useful, 8 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 - * GNU General Public License for more details. 11 - * 12 - * You should have received a copy of the GNU General Public License 13 - * along with this program; if not, write to the Free Software 14 - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 15 - * 16 - * Tristan Gingold <tristan.gingold@bull.net> 17 - * 18 - * Copyright (c) 2007 19 - * Isaku Yamahata <yamahata at valinux co jp> 20 - * VA Linux Systems Japan K.K. 21 - * consolidate mini and inline version. 22 - */ 23 - 24 - #include <linux/module.h> 25 - #include <xen/interface/xen.h> 26 - #include <xen/interface/memory.h> 27 - #include <xen/interface/grant_table.h> 28 - #include <xen/interface/callback.h> 29 - #include <xen/interface/vcpu.h> 30 - #include <asm/xen/hypervisor.h> 31 - #include <asm/xen/xencomm.h> 32 - 33 - /* Xencomm notes: 34 - * This file defines hypercalls to be used by xencomm. The hypercalls simply 35 - * create inlines or mini descriptors for pointers and then call the raw arch 36 - * hypercall xencomm_arch_hypercall_XXX 37 - * 38 - * If the arch wants to directly use these hypercalls, simply define macros 39 - * in asm/xen/hypercall.h, eg: 40 - * #define HYPERVISOR_sched_op xencomm_hypercall_sched_op 41 - * 42 - * The arch may also define HYPERVISOR_xxx as a function and do more operations 43 - * before/after doing the hypercall. 44 - * 45 - * Note: because only inline or mini descriptors are created these functions 46 - * must only be called with in kernel memory parameters. 47 - */ 48 - 49 - int 50 - xencomm_hypercall_console_io(int cmd, int count, char *str) 51 - { 52 - /* xen early printk uses console io hypercall before 53 - * xencomm initialization. In that case, we just ignore it. 54 - */ 55 - if (!xencomm_is_initialized()) 56 - return 0; 57 - 58 - return xencomm_arch_hypercall_console_io 59 - (cmd, count, xencomm_map_no_alloc(str, count)); 60 - } 61 - EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io); 62 - 63 - int 64 - xencomm_hypercall_event_channel_op(int cmd, void *op) 65 - { 66 - struct xencomm_handle *desc; 67 - desc = xencomm_map_no_alloc(op, sizeof(struct evtchn_op)); 68 - if (desc == NULL) 69 - return -EINVAL; 70 - 71 - return xencomm_arch_hypercall_event_channel_op(cmd, desc); 72 - } 73 - EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op); 74 - 75 - int 76 - xencomm_hypercall_xen_version(int cmd, void *arg) 77 - { 78 - struct xencomm_handle *desc; 79 - unsigned int argsize; 80 - 81 - switch (cmd) { 82 - case XENVER_version: 83 - /* do not actually pass an argument */ 84 - return xencomm_arch_hypercall_xen_version(cmd, 0); 85 - case XENVER_extraversion: 86 - argsize = sizeof(struct xen_extraversion); 87 - break; 88 - case XENVER_compile_info: 89 - argsize = sizeof(struct xen_compile_info); 90 - break; 91 - case XENVER_capabilities: 92 - argsize = sizeof(struct xen_capabilities_info); 93 - break; 94 - case XENVER_changeset: 95 - argsize = sizeof(struct xen_changeset_info); 96 - break; 97 - case XENVER_platform_parameters: 98 - argsize = sizeof(struct xen_platform_parameters); 99 - break; 100 - case XENVER_get_features: 101 - argsize = (arg == NULL) ? 0 : sizeof(struct xen_feature_info); 102 - break; 103 - 104 - default: 105 - printk(KERN_DEBUG 106 - "%s: unknown version op %d\n", __func__, cmd); 107 - return -ENOSYS; 108 - } 109 - 110 - desc = xencomm_map_no_alloc(arg, argsize); 111 - if (desc == NULL) 112 - return -EINVAL; 113 - 114 - return xencomm_arch_hypercall_xen_version(cmd, desc); 115 - } 116 - EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version); 117 - 118 - int 119 - xencomm_hypercall_physdev_op(int cmd, void *op) 120 - { 121 - unsigned int argsize; 122 - 123 - switch (cmd) { 124 - case PHYSDEVOP_apic_read: 125 - case PHYSDEVOP_apic_write: 126 - argsize = sizeof(struct physdev_apic); 127 - break; 128 - case PHYSDEVOP_alloc_irq_vector: 129 - case PHYSDEVOP_free_irq_vector: 130 - argsize = sizeof(struct physdev_irq); 131 - break; 132 - case PHYSDEVOP_irq_status_query: 133 - argsize = sizeof(struct physdev_irq_status_query); 134 - break; 135 - 136 - default: 137 - printk(KERN_DEBUG 138 - "%s: unknown physdev op %d\n", __func__, cmd); 139 - return -ENOSYS; 140 - } 141 - 142 - return xencomm_arch_hypercall_physdev_op 143 - (cmd, xencomm_map_no_alloc(op, argsize)); 144 - } 145 - 146 - static int 147 - xencommize_grant_table_op(struct xencomm_mini **xc_area, 148 - unsigned int cmd, void *op, unsigned int count, 149 - struct xencomm_handle **desc) 150 - { 151 - struct xencomm_handle *desc1; 152 - unsigned int argsize; 153 - 154 - switch (cmd) { 155 - case GNTTABOP_map_grant_ref: 156 - argsize = sizeof(struct gnttab_map_grant_ref); 157 - break; 158 - case GNTTABOP_unmap_grant_ref: 159 - argsize = sizeof(struct gnttab_unmap_grant_ref); 160 - break; 161 - case GNTTABOP_setup_table: 162 - { 163 - struct gnttab_setup_table *setup = op; 164 - 165 - argsize = sizeof(*setup); 166 - 167 - if (count != 1) 168 - return -EINVAL; 169 - desc1 = __xencomm_map_no_alloc 170 - (xen_guest_handle(setup->frame_list), 171 - setup->nr_frames * 172 - sizeof(*xen_guest_handle(setup->frame_list)), 173 - *xc_area); 174 - if (desc1 == NULL) 175 - return -EINVAL; 176 - (*xc_area)++; 177 - set_xen_guest_handle(setup->frame_list, (void *)desc1); 178 - break; 179 - } 180 - case GNTTABOP_dump_table: 181 - argsize = sizeof(struct gnttab_dump_table); 182 - break; 183 - case GNTTABOP_transfer: 184 - argsize = sizeof(struct gnttab_transfer); 185 - break; 186 - case GNTTABOP_copy: 187 - argsize = sizeof(struct gnttab_copy); 188 - break; 189 - case GNTTABOP_query_size: 190 - argsize = sizeof(struct gnttab_query_size); 191 - break; 192 - default: 193 - printk(KERN_DEBUG "%s: unknown hypercall grant table op %d\n", 194 - __func__, cmd); 195 - BUG(); 196 - } 197 - 198 - *desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area); 199 - if (*desc == NULL) 200 - return -EINVAL; 201 - (*xc_area)++; 202 - 203 - return 0; 204 - } 205 - 206 - int 207 - xencomm_hypercall_grant_table_op(unsigned int cmd, void *op, 208 - unsigned int count) 209 - { 210 - int rc; 211 - struct xencomm_handle *desc; 212 - XENCOMM_MINI_ALIGNED(xc_area, 2); 213 - 214 - rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc); 215 - if (rc) 216 - return rc; 217 - 218 - return xencomm_arch_hypercall_grant_table_op(cmd, desc, count); 219 - } 220 - EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op); 221 - 222 - int 223 - xencomm_hypercall_sched_op(int cmd, void *arg) 224 - { 225 - struct xencomm_handle *desc; 226 - unsigned int argsize; 227 - 228 - switch (cmd) { 229 - case SCHEDOP_yield: 230 - case SCHEDOP_block: 231 - argsize = 0; 232 - break; 233 - case SCHEDOP_shutdown: 234 - argsize = sizeof(struct sched_shutdown); 235 - break; 236 - case SCHEDOP_poll: 237 - { 238 - struct sched_poll *poll = arg; 239 - struct xencomm_handle *ports; 240 - 241 - argsize = sizeof(struct sched_poll); 242 - ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports), 243 - sizeof(*xen_guest_handle(poll->ports))); 244 - 245 - set_xen_guest_handle(poll->ports, (void *)ports); 246 - break; 247 - } 248 - default: 249 - printk(KERN_DEBUG "%s: unknown sched op %d\n", __func__, cmd); 250 - return -ENOSYS; 251 - } 252 - 253 - desc = xencomm_map_no_alloc(arg, argsize); 254 - if (desc == NULL) 255 - return -EINVAL; 256 - 257 - return xencomm_arch_hypercall_sched_op(cmd, desc); 258 - } 259 - EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op); 260 - 261 - int 262 - xencomm_hypercall_multicall(void *call_list, int nr_calls) 263 - { 264 - int rc; 265 - int i; 266 - struct multicall_entry *mce; 267 - struct xencomm_handle *desc; 268 - XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2); 269 - 270 - for (i = 0; i < nr_calls; i++) { 271 - mce = (struct multicall_entry *)call_list + i; 272 - 273 - switch (mce->op) { 274 - case __HYPERVISOR_update_va_mapping: 275 - case __HYPERVISOR_mmu_update: 276 - /* No-op on ia64. */ 277 - break; 278 - case __HYPERVISOR_grant_table_op: 279 - rc = xencommize_grant_table_op 280 - (&xc_area, 281 - mce->args[0], (void *)mce->args[1], 282 - mce->args[2], &desc); 283 - if (rc) 284 - return rc; 285 - mce->args[1] = (unsigned long)desc; 286 - break; 287 - case __HYPERVISOR_memory_op: 288 - default: 289 - printk(KERN_DEBUG 290 - "%s: unhandled multicall op entry op %lu\n", 291 - __func__, mce->op); 292 - return -ENOSYS; 293 - } 294 - } 295 - 296 - desc = xencomm_map_no_alloc(call_list, 297 - nr_calls * sizeof(struct multicall_entry)); 298 - if (desc == NULL) 299 - return -EINVAL; 300 - 301 - return xencomm_arch_hypercall_multicall(desc, nr_calls); 302 - } 303 - EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall); 304 - 305 - int 306 - xencomm_hypercall_callback_op(int cmd, void *arg) 307 - { 308 - unsigned int argsize; 309 - switch (cmd) { 310 - case CALLBACKOP_register: 311 - argsize = sizeof(struct callback_register); 312 - break; 313 - case CALLBACKOP_unregister: 314 - argsize = sizeof(struct callback_unregister); 315 - break; 316 - default: 317 - printk(KERN_DEBUG 318 - "%s: unknown callback op %d\n", __func__, cmd); 319 - return -ENOSYS; 320 - } 321 - 322 - return xencomm_arch_hypercall_callback_op 323 - (cmd, xencomm_map_no_alloc(arg, argsize)); 324 - } 325 - 326 - static int 327 - xencommize_memory_reservation(struct xencomm_mini *xc_area, 328 - struct xen_memory_reservation *mop) 329 - { 330 - struct xencomm_handle *desc; 331 - 332 - desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start), 333 - mop->nr_extents * 334 - sizeof(*xen_guest_handle(mop->extent_start)), 335 - xc_area); 336 - if (desc == NULL) 337 - return -EINVAL; 338 - 339 - set_xen_guest_handle(mop->extent_start, (void *)desc); 340 - return 0; 341 - } 342 - 343 - int 344 - xencomm_hypercall_memory_op(unsigned int cmd, void *arg) 345 - { 346 - GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = { {NULL}, {NULL} }; 347 - struct xen_memory_reservation *xmr = NULL; 348 - int rc; 349 - struct xencomm_handle *desc; 350 - unsigned int argsize; 351 - XENCOMM_MINI_ALIGNED(xc_area, 2); 352 - 353 - switch (cmd) { 354 - case XENMEM_increase_reservation: 355 - case XENMEM_decrease_reservation: 356 - case XENMEM_populate_physmap: 357 - xmr = (struct xen_memory_reservation *)arg; 358 - set_xen_guest_handle(extent_start_va[0], 359 - xen_guest_handle(xmr->extent_start)); 360 - 361 - argsize = sizeof(*xmr); 362 - rc = xencommize_memory_reservation(xc_area, xmr); 363 - if (rc) 364 - return rc; 365 - xc_area++; 366 - break; 367 - 368 - case XENMEM_maximum_ram_page: 369 - argsize = 0; 370 - break; 371 - 372 - case XENMEM_add_to_physmap: 373 - argsize = sizeof(struct xen_add_to_physmap); 374 - break; 375 - 376 - default: 377 - printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd); 378 - return -ENOSYS; 379 - } 380 - 381 - desc = xencomm_map_no_alloc(arg, argsize); 382 - if (desc == NULL) 383 - return -EINVAL; 384 - 385 - rc = xencomm_arch_hypercall_memory_op(cmd, desc); 386 - 387 - switch (cmd) { 388 - case XENMEM_increase_reservation: 389 - case XENMEM_decrease_reservation: 390 - case XENMEM_populate_physmap: 391 - set_xen_guest_handle(xmr->extent_start, 392 - xen_guest_handle(extent_start_va[0])); 393 - break; 394 - } 395 - 396 - return rc; 397 - } 398 - EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op); 399 - 400 - int 401 - xencomm_hypercall_suspend(unsigned long srec) 402 - { 403 - struct sched_shutdown arg; 404 - 405 - arg.reason = SHUTDOWN_suspend; 406 - 407 - return xencomm_arch_hypercall_sched_op( 408 - SCHEDOP_shutdown, xencomm_map_no_alloc(&arg, sizeof(arg))); 409 - } 410 - 411 - long 412 - xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg) 413 - { 414 - unsigned int argsize; 415 - switch (cmd) { 416 - case VCPUOP_register_runstate_memory_area: { 417 - struct vcpu_register_runstate_memory_area *area = 418 - (struct vcpu_register_runstate_memory_area *)arg; 419 - argsize = sizeof(*arg); 420 - set_xen_guest_handle(area->addr.h, 421 - (void *)xencomm_map_no_alloc(area->addr.v, 422 - sizeof(area->addr.v))); 423 - break; 424 - } 425 - 426 - default: 427 - printk(KERN_DEBUG "%s: unknown vcpu op %d\n", __func__, cmd); 428 - return -ENOSYS; 429 - } 430 - 431 - return xencomm_arch_hypercall_vcpu_op(cmd, cpu, 432 - xencomm_map_no_alloc(arg, argsize)); 433 - } 434 - 435 - long 436 - xencomm_hypercall_opt_feature(void *arg) 437 - { 438 - return xencomm_arch_hypercall_opt_feature( 439 - xencomm_map_no_alloc(arg, 440 - sizeof(struct xen_ia64_opt_feature))); 441 - }
-1141
arch/ia64/xen/xen_pv_ops.c
··· 1 - /****************************************************************************** 2 - * arch/ia64/xen/xen_pv_ops.c 3 - * 4 - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 5 - * VA Linux Systems Japan K.K. 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 - * 21 - */ 22 - 23 - #include <linux/console.h> 24 - #include <linux/irq.h> 25 - #include <linux/kernel.h> 26 - #include <linux/pm.h> 27 - #include <linux/unistd.h> 28 - 29 - #include <asm/xen/hypervisor.h> 30 - #include <asm/xen/xencomm.h> 31 - #include <asm/xen/privop.h> 32 - 33 - #include "irq_xen.h" 34 - #include "time.h" 35 - 36 - /*************************************************************************** 37 - * general info 38 - */ 39 - static struct pv_info xen_info __initdata = { 40 - .kernel_rpl = 2, /* or 1: determin at runtime */ 41 - .paravirt_enabled = 1, 42 - .name = "Xen/ia64", 43 - }; 44 - 45 - #define IA64_RSC_PL_SHIFT 2 46 - #define IA64_RSC_PL_BIT_SIZE 2 47 - #define IA64_RSC_PL_MASK \ 48 - (((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT) 49 - 50 - static void __init 51 - xen_info_init(void) 52 - { 53 - /* Xenified Linux/ia64 may run on pl = 1 or 2. 54 - * determin at run time. */ 55 - unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC); 56 - unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT; 57 - xen_info.kernel_rpl = rpl; 58 - } 59 - 60 - /*************************************************************************** 61 - * pv_init_ops 62 - * initialization hooks. 63 - */ 64 - 65 - static void 66 - xen_panic_hypercall(struct unw_frame_info *info, void *arg) 67 - { 68 - current->thread.ksp = (__u64)info->sw - 16; 69 - HYPERVISOR_shutdown(SHUTDOWN_crash); 70 - /* we're never actually going to get here... */ 71 - } 72 - 73 - static int 74 - xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) 75 - { 76 - unw_init_running(xen_panic_hypercall, NULL); 77 - /* we're never actually going to get here... */ 78 - return NOTIFY_DONE; 79 - } 80 - 81 - static struct notifier_block xen_panic_block = { 82 - xen_panic_event, NULL, 0 /* try to go last */ 83 - }; 84 - 85 - static void xen_pm_power_off(void) 86 - { 87 - local_irq_disable(); 88 - HYPERVISOR_shutdown(SHUTDOWN_poweroff); 89 - } 90 - 91 - static void __init 92 - xen_banner(void) 93 - { 94 - printk(KERN_INFO 95 - "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld " 96 - "flags=0x%x\n", 97 - xen_info.kernel_rpl, 98 - HYPERVISOR_shared_info->arch.start_info_pfn, 99 - xen_start_info->nr_pages, xen_start_info->flags); 100 - } 101 - 102 - static int __init 103 - xen_reserve_memory(struct rsvd_region *region) 104 - { 105 - region->start = (unsigned long)__va( 106 - (HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT)); 107 - region->end = region->start + PAGE_SIZE; 108 - return 1; 109 - } 110 - 111 - static void __init 112 - xen_arch_setup_early(void) 113 - { 114 - struct shared_info *s; 115 - BUG_ON(!xen_pv_domain()); 116 - 117 - s = HYPERVISOR_shared_info; 118 - xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT); 119 - 120 - /* Must be done before any hypercall. */ 121 - xencomm_initialize(); 122 - 123 - xen_setup_features(); 124 - /* Register a call for panic conditions. */ 125 - atomic_notifier_chain_register(&panic_notifier_list, 126 - &xen_panic_block); 127 - pm_power_off = xen_pm_power_off; 128 - 129 - xen_ia64_enable_opt_feature(); 130 - } 131 - 132 - static void __init 133 - xen_arch_setup_console(char **cmdline_p) 134 - { 135 - add_preferred_console("xenboot", 0, NULL); 136 - add_preferred_console("tty", 0, NULL); 137 - /* use hvc_xen */ 138 - add_preferred_console("hvc", 0, NULL); 139 - 140 - #if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE) 141 - conswitchp = NULL; 142 - #endif 143 - } 144 - 145 - static int __init 146 - xen_arch_setup_nomca(void) 147 - { 148 - return 1; 149 - } 150 - 151 - static void __init 152 - xen_post_smp_prepare_boot_cpu(void) 153 - { 154 - xen_setup_vcpu_info_placement(); 155 - } 156 - 157 - #ifdef ASM_SUPPORTED 158 - static unsigned long __init_or_module 159 - xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type); 160 - #endif 161 - static void __init 162 - xen_patch_branch(unsigned long tag, unsigned long type); 163 - 164 - static const struct pv_init_ops xen_init_ops __initconst = { 165 - .banner = xen_banner, 166 - 167 - .reserve_memory = xen_reserve_memory, 168 - 169 - .arch_setup_early = xen_arch_setup_early, 170 - .arch_setup_console = xen_arch_setup_console, 171 - .arch_setup_nomca = xen_arch_setup_nomca, 172 - 173 - .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, 174 - #ifdef ASM_SUPPORTED 175 - .patch_bundle = xen_patch_bundle, 176 - #endif 177 - .patch_branch = xen_patch_branch, 178 - }; 179 - 180 - /*************************************************************************** 181 - * pv_fsys_data 182 - * addresses for fsys 183 - */ 184 - 185 - extern unsigned long xen_fsyscall_table[NR_syscalls]; 186 - extern char xen_fsys_bubble_down[]; 187 - struct pv_fsys_data xen_fsys_data __initdata = { 188 - .fsyscall_table = (unsigned long *)xen_fsyscall_table, 189 - .fsys_bubble_down = (void *)xen_fsys_bubble_down, 190 - }; 191 - 192 - /*************************************************************************** 193 - * pv_patchdata 194 - * patchdata addresses 195 - */ 196 - 197 - #define DECLARE(name) \ 198 - extern unsigned long __xen_start_gate_##name##_patchlist[]; \ 199 - extern unsigned long __xen_end_gate_##name##_patchlist[] 200 - 201 - DECLARE(fsyscall); 202 - DECLARE(brl_fsys_bubble_down); 203 - DECLARE(vtop); 204 - DECLARE(mckinley_e9); 205 - 206 - extern unsigned long __xen_start_gate_section[]; 207 - 208 - #define ASSIGN(name) \ 209 - .start_##name##_patchlist = \ 210 - (unsigned long)__xen_start_gate_##name##_patchlist, \ 211 - .end_##name##_patchlist = \ 212 - (unsigned long)__xen_end_gate_##name##_patchlist 213 - 214 - static struct pv_patchdata xen_patchdata __initdata = { 215 - ASSIGN(fsyscall), 216 - ASSIGN(brl_fsys_bubble_down), 217 - ASSIGN(vtop), 218 - ASSIGN(mckinley_e9), 219 - 220 - .gate_section = (void*)__xen_start_gate_section, 221 - }; 222 - 223 - /*************************************************************************** 224 - * pv_cpu_ops 225 - * intrinsics hooks. 226 - */ 227 - 228 - #ifndef ASM_SUPPORTED 229 - static void 230 - xen_set_itm_with_offset(unsigned long val) 231 - { 232 - /* ia64_cpu_local_tick() calls this with interrupt enabled. */ 233 - /* WARN_ON(!irqs_disabled()); */ 234 - xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); 235 - } 236 - 237 - static unsigned long 238 - xen_get_itm_with_offset(void) 239 - { 240 - /* unused at this moment */ 241 - printk(KERN_DEBUG "%s is called.\n", __func__); 242 - 243 - WARN_ON(!irqs_disabled()); 244 - return ia64_native_getreg(_IA64_REG_CR_ITM) + 245 - XEN_MAPPEDREGS->itc_offset; 246 - } 247 - 248 - /* ia64_set_itc() is only called by 249 - * cpu_init() with ia64_set_itc(0) and ia64_sync_itc(). 250 - * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant. 251 - */ 252 - static void 253 - xen_set_itc(unsigned long val) 254 - { 255 - unsigned long mitc; 256 - 257 - WARN_ON(!irqs_disabled()); 258 - mitc = ia64_native_getreg(_IA64_REG_AR_ITC); 259 - XEN_MAPPEDREGS->itc_offset = val - mitc; 260 - XEN_MAPPEDREGS->itc_last = val; 261 - } 262 - 263 - static unsigned long 264 - xen_get_itc(void) 265 - { 266 - unsigned long res; 267 - unsigned long itc_offset; 268 - unsigned long itc_last; 269 - unsigned long ret_itc_last; 270 - 271 - itc_offset = XEN_MAPPEDREGS->itc_offset; 272 - do { 273 - itc_last = XEN_MAPPEDREGS->itc_last; 274 - res = ia64_native_getreg(_IA64_REG_AR_ITC); 275 - res += itc_offset; 276 - if (itc_last >= res) 277 - res = itc_last + 1; 278 - ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, 279 - itc_last, res); 280 - } while (unlikely(ret_itc_last != itc_last)); 281 - return res; 282 - 283 - #if 0 284 - /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled. 285 - Should it be paravirtualized instead? */ 286 - WARN_ON(!irqs_disabled()); 287 - itc_offset = XEN_MAPPEDREGS->itc_offset; 288 - itc_last = XEN_MAPPEDREGS->itc_last; 289 - res = ia64_native_getreg(_IA64_REG_AR_ITC); 290 - res += itc_offset; 291 - if (itc_last >= res) 292 - res = itc_last + 1; 293 - XEN_MAPPEDREGS->itc_last = res; 294 - return res; 295 - #endif 296 - } 297 - 298 - static void xen_setreg(int regnum, unsigned long val) 299 - { 300 - switch (regnum) { 301 - case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: 302 - xen_set_kr(regnum - _IA64_REG_AR_KR0, val); 303 - break; 304 - case _IA64_REG_AR_ITC: 305 - xen_set_itc(val); 306 - break; 307 - case _IA64_REG_CR_TPR: 308 - xen_set_tpr(val); 309 - break; 310 - case _IA64_REG_CR_ITM: 311 - xen_set_itm_with_offset(val); 312 - break; 313 - case _IA64_REG_CR_EOI: 314 - xen_eoi(val); 315 - break; 316 - default: 317 - ia64_native_setreg_func(regnum, val); 318 - break; 319 - } 320 - } 321 - 322 - static unsigned long xen_getreg(int regnum) 323 - { 324 - unsigned long res; 325 - 326 - switch (regnum) { 327 - case _IA64_REG_PSR: 328 - res = xen_get_psr(); 329 - break; 330 - case _IA64_REG_AR_ITC: 331 - res = xen_get_itc(); 332 - break; 333 - case _IA64_REG_CR_ITM: 334 - res = xen_get_itm_with_offset(); 335 - break; 336 - case _IA64_REG_CR_IVR: 337 - res = xen_get_ivr(); 338 - break; 339 - case _IA64_REG_CR_TPR: 340 - res = xen_get_tpr(); 341 - break; 342 - default: 343 - res = ia64_native_getreg_func(regnum); 344 - break; 345 - } 346 - return res; 347 - } 348 - 349 - /* turning on interrupts is a bit more complicated.. write to the 350 - * memory-mapped virtual psr.i bit first (to avoid race condition), 351 - * then if any interrupts were pending, we have to execute a hyperprivop 352 - * to ensure the pending interrupt gets delivered; else we're done! */ 353 - static void 354 - xen_ssm_i(void) 355 - { 356 - int old = xen_get_virtual_psr_i(); 357 - xen_set_virtual_psr_i(1); 358 - barrier(); 359 - if (!old && xen_get_virtual_pend()) 360 - xen_hyper_ssm_i(); 361 - } 362 - 363 - /* turning off interrupts can be paravirtualized simply by writing 364 - * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */ 365 - static void 366 - xen_rsm_i(void) 367 - { 368 - xen_set_virtual_psr_i(0); 369 - barrier(); 370 - } 371 - 372 - static unsigned long 373 - xen_get_psr_i(void) 374 - { 375 - return xen_get_virtual_psr_i() ? IA64_PSR_I : 0; 376 - } 377 - 378 - static void 379 - xen_intrin_local_irq_restore(unsigned long mask) 380 - { 381 - if (mask & IA64_PSR_I) 382 - xen_ssm_i(); 383 - else 384 - xen_rsm_i(); 385 - } 386 - #else 387 - #define __DEFINE_FUNC(name, code) \ 388 - extern const char xen_ ## name ## _direct_start[]; \ 389 - extern const char xen_ ## name ## _direct_end[]; \ 390 - asm (".align 32\n" \ 391 - ".proc xen_" #name "\n" \ 392 - "xen_" #name ":\n" \ 393 - "xen_" #name "_direct_start:\n" \ 394 - code \ 395 - "xen_" #name "_direct_end:\n" \ 396 - "br.cond.sptk.many b6\n" \ 397 - ".endp xen_" #name "\n") 398 - 399 - #define DEFINE_VOID_FUNC0(name, code) \ 400 - extern void \ 401 - xen_ ## name (void); \ 402 - __DEFINE_FUNC(name, code) 403 - 404 - #define DEFINE_VOID_FUNC1(name, code) \ 405 - extern void \ 406 - xen_ ## name (unsigned long arg); \ 407 - __DEFINE_FUNC(name, code) 408 - 409 - #define DEFINE_VOID_FUNC1_VOID(name, code) \ 410 - extern void \ 411 - xen_ ## name (void *arg); \ 412 - __DEFINE_FUNC(name, code) 413 - 414 - #define DEFINE_VOID_FUNC2(name, code) \ 415 - extern void \ 416 - xen_ ## name (unsigned long arg0, \ 417 - unsigned long arg1); \ 418 - __DEFINE_FUNC(name, code) 419 - 420 - #define DEFINE_FUNC0(name, code) \ 421 - extern unsigned long \ 422 - xen_ ## name (void); \ 423 - __DEFINE_FUNC(name, code) 424 - 425 - #define DEFINE_FUNC1(name, type, code) \ 426 - extern unsigned long \ 427 - xen_ ## name (type arg); \ 428 - __DEFINE_FUNC(name, code) 429 - 430 - #define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) 431 - 432 - /* 433 - * static void xen_set_itm_with_offset(unsigned long val) 434 - * xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); 435 - */ 436 - /* 2 bundles */ 437 - DEFINE_VOID_FUNC1(set_itm_with_offset, 438 - "mov r2 = " __stringify(XSI_BASE) " + " 439 - __stringify(XSI_ITC_OFFSET_OFS) "\n" 440 - ";;\n" 441 - "ld8 r3 = [r2]\n" 442 - ";;\n" 443 - "sub r8 = r8, r3\n" 444 - "break " __stringify(HYPERPRIVOP_SET_ITM) "\n"); 445 - 446 - /* 447 - * static unsigned long xen_get_itm_with_offset(void) 448 - * return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset; 449 - */ 450 - /* 2 bundles */ 451 - DEFINE_FUNC0(get_itm_with_offset, 452 - "mov r2 = " __stringify(XSI_BASE) " + " 453 - __stringify(XSI_ITC_OFFSET_OFS) "\n" 454 - ";;\n" 455 - "ld8 r3 = [r2]\n" 456 - "mov r8 = cr.itm\n" 457 - ";;\n" 458 - "add r8 = r8, r2\n"); 459 - 460 - /* 461 - * static void xen_set_itc(unsigned long val) 462 - * unsigned long mitc; 463 - * 464 - * WARN_ON(!irqs_disabled()); 465 - * mitc = ia64_native_getreg(_IA64_REG_AR_ITC); 466 - * XEN_MAPPEDREGS->itc_offset = val - mitc; 467 - * XEN_MAPPEDREGS->itc_last = val; 468 - */ 469 - /* 2 bundles */ 470 - DEFINE_VOID_FUNC1(set_itc, 471 - "mov r2 = " __stringify(XSI_BASE) " + " 472 - __stringify(XSI_ITC_LAST_OFS) "\n" 473 - "mov r3 = ar.itc\n" 474 - ";;\n" 475 - "sub r3 = r8, r3\n" 476 - "st8 [r2] = r8, " 477 - __stringify(XSI_ITC_LAST_OFS) " - " 478 - __stringify(XSI_ITC_OFFSET_OFS) "\n" 479 - ";;\n" 480 - "st8 [r2] = r3\n"); 481 - 482 - /* 483 - * static unsigned long xen_get_itc(void) 484 - * unsigned long res; 485 - * unsigned long itc_offset; 486 - * unsigned long itc_last; 487 - * unsigned long ret_itc_last; 488 - * 489 - * itc_offset = XEN_MAPPEDREGS->itc_offset; 490 - * do { 491 - * itc_last = XEN_MAPPEDREGS->itc_last; 492 - * res = ia64_native_getreg(_IA64_REG_AR_ITC); 493 - * res += itc_offset; 494 - * if (itc_last >= res) 495 - * res = itc_last + 1; 496 - * ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, 497 - * itc_last, res); 498 - * } while (unlikely(ret_itc_last != itc_last)); 499 - * return res; 500 - */ 501 - /* 5 bundles */ 502 - DEFINE_FUNC0(get_itc, 503 - "mov r2 = " __stringify(XSI_BASE) " + " 504 - __stringify(XSI_ITC_OFFSET_OFS) "\n" 505 - ";;\n" 506 - "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - " 507 - __stringify(XSI_ITC_OFFSET_OFS) "\n" 508 - /* r9 = itc_offset */ 509 - /* r2 = XSI_ITC_OFFSET */ 510 - "888:\n" 511 - "mov r8 = ar.itc\n" /* res = ar.itc */ 512 - ";;\n" 513 - "ld8 r3 = [r2]\n" /* r3 = itc_last */ 514 - "add r8 = r8, r9\n" /* res = ar.itc + itc_offset */ 515 - ";;\n" 516 - "cmp.gtu p6, p0 = r3, r8\n" 517 - ";;\n" 518 - "(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */ 519 - ";;\n" 520 - "mov ar.ccv = r8\n" 521 - ";;\n" 522 - "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n" 523 - ";;\n" 524 - "cmp.ne p6, p0 = r10, r3\n" 525 - "(p6) hint @pause\n" 526 - "(p6) br.cond.spnt 888b\n"); 527 - 528 - DEFINE_VOID_FUNC1_VOID(fc, 529 - "break " __stringify(HYPERPRIVOP_FC) "\n"); 530 - 531 - /* 532 - * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR 533 - * masked_addr = *psr_i_addr_addr 534 - * pending_intr_addr = masked_addr - 1 535 - * if (val & IA64_PSR_I) { 536 - * masked = *masked_addr 537 - * *masked_addr = 0:xen_set_virtual_psr_i(1) 538 - * compiler barrier 539 - * if (masked) { 540 - * uint8_t pending = *pending_intr_addr; 541 - * if (pending) 542 - * XEN_HYPER_SSM_I 543 - * } 544 - * } else { 545 - * *masked_addr = 1:xen_set_virtual_psr_i(0) 546 - * } 547 - */ 548 - /* 6 bundles */ 549 - DEFINE_VOID_FUNC1(intrin_local_irq_restore, 550 - /* r8 = input value: 0 or IA64_PSR_I 551 - * p6 = (flags & IA64_PSR_I) 552 - * = if clause 553 - * p7 = !(flags & IA64_PSR_I) 554 - * = else clause 555 - */ 556 - "cmp.ne p6, p7 = r8, r0\n" 557 - "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" 558 - ";;\n" 559 - /* r9 = XEN_PSR_I_ADDR */ 560 - "ld8 r9 = [r9]\n" 561 - ";;\n" 562 - 563 - /* r10 = masked previous value */ 564 - "(p6) ld1.acq r10 = [r9]\n" 565 - ";;\n" 566 - 567 - /* p8 = !masked interrupt masked previously? */ 568 - "(p6) cmp.ne.unc p8, p0 = r10, r0\n" 569 - 570 - /* p7 = else clause */ 571 - "(p7) mov r11 = 1\n" 572 - ";;\n" 573 - /* masked = 1 */ 574 - "(p7) st1.rel [r9] = r11\n" 575 - 576 - /* p6 = if clause */ 577 - /* masked = 0 578 - * r9 = masked_addr - 1 579 - * = pending_intr_addr 580 - */ 581 - "(p8) st1.rel [r9] = r0, -1\n" 582 - ";;\n" 583 - /* r8 = pending_intr */ 584 - "(p8) ld1.acq r11 = [r9]\n" 585 - ";;\n" 586 - /* p9 = interrupt pending? */ 587 - "(p8) cmp.ne.unc p9, p10 = r11, r0\n" 588 - ";;\n" 589 - "(p10) mf\n" 590 - /* issue hypercall to trigger interrupt */ 591 - "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"); 592 - 593 - DEFINE_VOID_FUNC2(ptcga, 594 - "break " __stringify(HYPERPRIVOP_PTC_GA) "\n"); 595 - DEFINE_VOID_FUNC2(set_rr, 596 - "break " __stringify(HYPERPRIVOP_SET_RR) "\n"); 597 - 598 - /* 599 - * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR; 600 - * tmp = *tmp 601 - * tmp = *tmp; 602 - * psr_i = tmp? 0: IA64_PSR_I; 603 - */ 604 - /* 4 bundles */ 605 - DEFINE_FUNC0(get_psr_i, 606 - "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" 607 - ";;\n" 608 - "ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */ 609 - "mov r8 = 0\n" /* psr_i = 0 */ 610 - ";;\n" 611 - "ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */ 612 - ";;\n" 613 - "cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */ 614 - ";;\n" 615 - "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n"); 616 - 617 - DEFINE_FUNC1(thash, unsigned long, 618 - "break " __stringify(HYPERPRIVOP_THASH) "\n"); 619 - DEFINE_FUNC1(get_cpuid, int, 620 - "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n"); 621 - DEFINE_FUNC1(get_pmd, int, 622 - "break " __stringify(HYPERPRIVOP_GET_PMD) "\n"); 623 - DEFINE_FUNC1(get_rr, unsigned long, 624 - "break " __stringify(HYPERPRIVOP_GET_RR) "\n"); 625 - 626 - /* 627 - * void xen_privop_ssm_i(void) 628 - * 629 - * int masked = !xen_get_virtual_psr_i(); 630 - * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr) 631 - * xen_set_virtual_psr_i(1) 632 - * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0 633 - * // compiler barrier 634 - * if (masked) { 635 - * uint8_t* pend_int_addr = 636 - * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1; 637 - * uint8_t pending = *pend_int_addr; 638 - * if (pending) 639 - * XEN_HYPER_SSM_I 640 - * } 641 - */ 642 - /* 4 bundles */ 643 - DEFINE_VOID_FUNC0(ssm_i, 644 - "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" 645 - ";;\n" 646 - "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */ 647 - ";;\n" 648 - "ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */ 649 - ";;\n" 650 - "st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt 651 - * r8 = XEN_PSR_I_ADDR - 1 652 - * = pend_int_addr 653 - */ 654 - "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I 655 - * previously interrupt 656 - * masked? 657 - */ 658 - ";;\n" 659 - "(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */ 660 - ";;\n" 661 - "(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/ 662 - ";;\n" 663 - /* issue hypercall to get interrupt */ 664 - "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n" 665 - ";;\n"); 666 - 667 - /* 668 - * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr 669 - * = XEN_PSR_I_ADDR_ADDR; 670 - * psr_i_addr = *psr_i_addr_addr; 671 - * *psr_i_addr = 1; 672 - */ 673 - /* 2 bundles */ 674 - DEFINE_VOID_FUNC0(rsm_i, 675 - "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" 676 - /* r8 = XEN_PSR_I_ADDR */ 677 - "mov r9 = 1\n" 678 - ";;\n" 679 - "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */ 680 - ";;\n" 681 - "st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */ 682 - 683 - extern void 684 - xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, 685 - unsigned long val2, unsigned long val3, 686 - unsigned long val4); 687 - __DEFINE_FUNC(set_rr0_to_rr4, 688 - "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n"); 689 - 690 - 691 - extern unsigned long xen_getreg(int regnum); 692 - #define __DEFINE_GET_REG(id, privop) \ 693 - "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ 694 - ";;\n" \ 695 - "cmp.eq p6, p0 = r2, r8\n" \ 696 - ";;\n" \ 697 - "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \ 698 - "(p6) br.cond.sptk.many b6\n" \ 699 - ";;\n" 700 - 701 - __DEFINE_FUNC(getreg, 702 - __DEFINE_GET_REG(PSR, PSR) 703 - 704 - /* get_itc */ 705 - "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" 706 - ";;\n" 707 - "cmp.eq p6, p0 = r2, r8\n" 708 - ";;\n" 709 - "(p6) br.cond.spnt xen_get_itc\n" 710 - ";;\n" 711 - 712 - /* get itm */ 713 - "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" 714 - ";;\n" 715 - "cmp.eq p6, p0 = r2, r8\n" 716 - ";;\n" 717 - "(p6) br.cond.spnt xen_get_itm_with_offset\n" 718 - ";;\n" 719 - 720 - __DEFINE_GET_REG(CR_IVR, IVR) 721 - __DEFINE_GET_REG(CR_TPR, TPR) 722 - 723 - /* fall back */ 724 - "movl r2 = ia64_native_getreg_func\n" 725 - ";;\n" 726 - "mov b7 = r2\n" 727 - ";;\n" 728 - "br.cond.sptk.many b7\n"); 729 - 730 - extern void xen_setreg(int regnum, unsigned long val); 731 - #define __DEFINE_SET_REG(id, privop) \ 732 - "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ 733 - ";;\n" \ 734 - "cmp.eq p6, p0 = r2, r9\n" \ 735 - ";;\n" \ 736 - "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \ 737 - "(p6) br.cond.sptk.many b6\n" \ 738 - ";;\n" 739 - 740 - __DEFINE_FUNC(setreg, 741 - /* kr0 .. kr 7*/ 742 - /* 743 - * if (_IA64_REG_AR_KR0 <= regnum && 744 - * regnum <= _IA64_REG_AR_KR7) { 745 - * register __index asm ("r8") = regnum - _IA64_REG_AR_KR0 746 - * register __val asm ("r9") = val 747 - * "break HYPERPRIVOP_SET_KR" 748 - * } 749 - */ 750 - "mov r17 = r9\n" 751 - "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n" 752 - ";;\n" 753 - "cmp.ge p6, p0 = r9, r2\n" 754 - "sub r17 = r17, r2\n" 755 - ";;\n" 756 - "(p6) cmp.ge.unc p7, p0 = " 757 - __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0) 758 - ", r17\n" 759 - ";;\n" 760 - "(p7) mov r9 = r8\n" 761 - ";;\n" 762 - "(p7) mov r8 = r17\n" 763 - "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n" 764 - 765 - /* set itm */ 766 - "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" 767 - ";;\n" 768 - "cmp.eq p6, p0 = r2, r8\n" 769 - ";;\n" 770 - "(p6) br.cond.spnt xen_set_itm_with_offset\n" 771 - 772 - /* set itc */ 773 - "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" 774 - ";;\n" 775 - "cmp.eq p6, p0 = r2, r8\n" 776 - ";;\n" 777 - "(p6) br.cond.spnt xen_set_itc\n" 778 - 779 - __DEFINE_SET_REG(CR_TPR, SET_TPR) 780 - __DEFINE_SET_REG(CR_EOI, EOI) 781 - 782 - /* fall back */ 783 - "movl r2 = ia64_native_setreg_func\n" 784 - ";;\n" 785 - "mov b7 = r2\n" 786 - ";;\n" 787 - "br.cond.sptk.many b7\n"); 788 - #endif 789 - 790 - static const struct pv_cpu_ops xen_cpu_ops __initconst = { 791 - .fc = xen_fc, 792 - .thash = xen_thash, 793 - .get_cpuid = xen_get_cpuid, 794 - .get_pmd = xen_get_pmd, 795 - .getreg = xen_getreg, 796 - .setreg = xen_setreg, 797 - .ptcga = xen_ptcga, 798 - .get_rr = xen_get_rr, 799 - .set_rr = xen_set_rr, 800 - .set_rr0_to_rr4 = xen_set_rr0_to_rr4, 801 - .ssm_i = xen_ssm_i, 802 - .rsm_i = xen_rsm_i, 803 - .get_psr_i = xen_get_psr_i, 804 - .intrin_local_irq_restore 805 - = xen_intrin_local_irq_restore, 806 - }; 807 - 808 - /****************************************************************************** 809 - * replacement of hand written assembly codes. 810 - */ 811 - 812 - extern char xen_switch_to; 813 - extern char xen_leave_syscall; 814 - extern char xen_work_processed_syscall; 815 - extern char xen_leave_kernel; 816 - 817 - const struct pv_cpu_asm_switch xen_cpu_asm_switch = { 818 - .switch_to = (unsigned long)&xen_switch_to, 819 - .leave_syscall = (unsigned long)&xen_leave_syscall, 820 - .work_processed_syscall = (unsigned long)&xen_work_processed_syscall, 821 - .leave_kernel = (unsigned long)&xen_leave_kernel, 822 - }; 823 - 824 - /*************************************************************************** 825 - * pv_iosapic_ops 826 - * iosapic read/write hooks. 827 - */ 828 - static void 829 - xen_pcat_compat_init(void) 830 - { 831 - /* nothing */ 832 - } 833 - 834 - static struct irq_chip* 835 - xen_iosapic_get_irq_chip(unsigned long trigger) 836 - { 837 - return NULL; 838 - } 839 - 840 - static unsigned int 841 - xen_iosapic_read(char __iomem *iosapic, unsigned int reg) 842 - { 843 - struct physdev_apic apic_op; 844 - int ret; 845 - 846 - apic_op.apic_physbase = (unsigned long)iosapic - 847 - __IA64_UNCACHED_OFFSET; 848 - apic_op.reg = reg; 849 - ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op); 850 - if (ret) 851 - return ret; 852 - return apic_op.value; 853 - } 854 - 855 - static void 856 - xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) 857 - { 858 - struct physdev_apic apic_op; 859 - 860 - apic_op.apic_physbase = (unsigned long)iosapic - 861 - __IA64_UNCACHED_OFFSET; 862 - apic_op.reg = reg; 863 - apic_op.value = val; 864 - HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op); 865 - } 866 - 867 - static struct pv_iosapic_ops xen_iosapic_ops __initdata = { 868 - .pcat_compat_init = xen_pcat_compat_init, 869 - .__get_irq_chip = xen_iosapic_get_irq_chip, 870 - 871 - .__read = xen_iosapic_read, 872 - .__write = xen_iosapic_write, 873 - }; 874 - 875 - /*************************************************************************** 876 - * pv_ops initialization 877 - */ 878 - 879 - void __init 880 - xen_setup_pv_ops(void) 881 - { 882 - xen_info_init(); 883 - pv_info = xen_info; 884 - pv_init_ops = xen_init_ops; 885 - pv_fsys_data = xen_fsys_data; 886 - pv_patchdata = xen_patchdata; 887 - pv_cpu_ops = xen_cpu_ops; 888 - pv_iosapic_ops = xen_iosapic_ops; 889 - pv_irq_ops = xen_irq_ops; 890 - pv_time_ops = xen_time_ops; 891 - 892 - paravirt_cpu_asm_init(&xen_cpu_asm_switch); 893 - } 894 - 895 - #ifdef ASM_SUPPORTED 896 - /*************************************************************************** 897 - * binary pacthing 898 - * pv_init_ops.patch_bundle 899 - */ 900 - 901 - #define DEFINE_FUNC_GETREG(name, privop) \ 902 - DEFINE_FUNC0(get_ ## name, \ 903 - "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n") 904 - 905 - DEFINE_FUNC_GETREG(psr, PSR); 906 - DEFINE_FUNC_GETREG(eflag, EFLAG); 907 - DEFINE_FUNC_GETREG(ivr, IVR); 908 - DEFINE_FUNC_GETREG(tpr, TPR); 909 - 910 - #define DEFINE_FUNC_SET_KR(n) \ 911 - DEFINE_VOID_FUNC0(set_kr ## n, \ 912 - ";;\n" \ 913 - "mov r9 = r8\n" \ 914 - "mov r8 = " #n "\n" \ 915 - "break " __stringify(HYPERPRIVOP_SET_KR) "\n") 916 - 917 - DEFINE_FUNC_SET_KR(0); 918 - DEFINE_FUNC_SET_KR(1); 919 - DEFINE_FUNC_SET_KR(2); 920 - DEFINE_FUNC_SET_KR(3); 921 - DEFINE_FUNC_SET_KR(4); 922 - DEFINE_FUNC_SET_KR(5); 923 - DEFINE_FUNC_SET_KR(6); 924 - DEFINE_FUNC_SET_KR(7); 925 - 926 - #define __DEFINE_FUNC_SETREG(name, privop) \ 927 - DEFINE_VOID_FUNC0(name, \ 928 - "break "__stringify(HYPERPRIVOP_ ## privop) "\n") 929 - 930 - #define DEFINE_FUNC_SETREG(name, privop) \ 931 - __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop) 932 - 933 - DEFINE_FUNC_SETREG(eflag, EFLAG); 934 - DEFINE_FUNC_SETREG(tpr, TPR); 935 - __DEFINE_FUNC_SETREG(eoi, EOI); 936 - 937 - extern const char xen_check_events[]; 938 - extern const char __xen_intrin_local_irq_restore_direct_start[]; 939 - extern const char __xen_intrin_local_irq_restore_direct_end[]; 940 - extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc; 941 - 942 - asm ( 943 - ".align 32\n" 944 - ".proc xen_check_events\n" 945 - "xen_check_events:\n" 946 - /* masked = 0 947 - * r9 = masked_addr - 1 948 - * = pending_intr_addr 949 - */ 950 - "st1.rel [r9] = r0, -1\n" 951 - ";;\n" 952 - /* r8 = pending_intr */ 953 - "ld1.acq r11 = [r9]\n" 954 - ";;\n" 955 - /* p9 = interrupt pending? */ 956 - "cmp.ne p9, p10 = r11, r0\n" 957 - ";;\n" 958 - "(p10) mf\n" 959 - /* issue hypercall to trigger interrupt */ 960 - "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n" 961 - "br.cond.sptk.many b6\n" 962 - ".endp xen_check_events\n" 963 - "\n" 964 - ".align 32\n" 965 - ".proc __xen_intrin_local_irq_restore_direct\n" 966 - "__xen_intrin_local_irq_restore_direct:\n" 967 - "__xen_intrin_local_irq_restore_direct_start:\n" 968 - "1:\n" 969 - "{\n" 970 - "cmp.ne p6, p7 = r8, r0\n" 971 - "mov r17 = ip\n" /* get ip to calc return address */ 972 - "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n" 973 - ";;\n" 974 - "}\n" 975 - "{\n" 976 - /* r9 = XEN_PSR_I_ADDR */ 977 - "ld8 r9 = [r9]\n" 978 - ";;\n" 979 - /* r10 = masked previous value */ 980 - "(p6) ld1.acq r10 = [r9]\n" 981 - "adds r17 = 1f - 1b, r17\n" /* calculate return address */ 982 - ";;\n" 983 - "}\n" 984 - "{\n" 985 - /* p8 = !masked interrupt masked previously? */ 986 - "(p6) cmp.ne.unc p8, p0 = r10, r0\n" 987 - "\n" 988 - /* p7 = else clause */ 989 - "(p7) mov r11 = 1\n" 990 - ";;\n" 991 - "(p8) mov b6 = r17\n" /* set return address */ 992 - "}\n" 993 - "{\n" 994 - /* masked = 1 */ 995 - "(p7) st1.rel [r9] = r11\n" 996 - "\n" 997 - "[99:]\n" 998 - "(p8) brl.cond.dptk.few xen_check_events\n" 999 - "}\n" 1000 - /* pv calling stub is 5 bundles. fill nop to adjust return address */ 1001 - "{\n" 1002 - "nop 0\n" 1003 - "nop 0\n" 1004 - "nop 0\n" 1005 - "}\n" 1006 - "1:\n" 1007 - "__xen_intrin_local_irq_restore_direct_end:\n" 1008 - ".endp __xen_intrin_local_irq_restore_direct\n" 1009 - "\n" 1010 - ".align 8\n" 1011 - "__xen_intrin_local_irq_restore_direct_reloc:\n" 1012 - "data8 99b\n" 1013 - ); 1014 - 1015 - static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[] 1016 - __initdata_or_module = 1017 - { 1018 - #define XEN_PATCH_BUNDLE_ELEM(name, type) \ 1019 - { \ 1020 - (void*)xen_ ## name ## _direct_start, \ 1021 - (void*)xen_ ## name ## _direct_end, \ 1022 - PARAVIRT_PATCH_TYPE_ ## type, \ 1023 - } 1024 - 1025 - XEN_PATCH_BUNDLE_ELEM(fc, FC), 1026 - XEN_PATCH_BUNDLE_ELEM(thash, THASH), 1027 - XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID), 1028 - XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD), 1029 - XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA), 1030 - XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR), 1031 - XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR), 1032 - XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4), 1033 - XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I), 1034 - XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I), 1035 - XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I), 1036 - { 1037 - (void*)__xen_intrin_local_irq_restore_direct_start, 1038 - (void*)__xen_intrin_local_irq_restore_direct_end, 1039 - PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE, 1040 - }, 1041 - 1042 - #define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \ 1043 - { \ 1044 - xen_get_ ## name ## _direct_start, \ 1045 - xen_get_ ## name ## _direct_end, \ 1046 - PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \ 1047 - } 1048 - 1049 - XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR), 1050 - XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG), 1051 - 1052 - XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR), 1053 - XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR), 1054 - 1055 - XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC), 1056 - XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM), 1057 - 1058 - 1059 - #define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ 1060 - { \ 1061 - xen_ ## name ## _direct_start, \ 1062 - xen_ ## name ## _direct_end, \ 1063 - PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \ 1064 - } 1065 - 1066 - #define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ 1067 - __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg) 1068 - 1069 - XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0), 1070 - XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1), 1071 - XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2), 1072 - XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3), 1073 - XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4), 1074 - XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5), 1075 - XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6), 1076 - XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7), 1077 - 1078 - XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG), 1079 - XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR), 1080 - __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI), 1081 - 1082 - XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC), 1083 - XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM), 1084 - }; 1085 - 1086 - static unsigned long __init_or_module 1087 - xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type) 1088 - { 1089 - const unsigned long nelems = sizeof(xen_patch_bundle_elems) / 1090 - sizeof(xen_patch_bundle_elems[0]); 1091 - unsigned long used; 1092 - const struct paravirt_patch_bundle_elem *found; 1093 - 1094 - used = __paravirt_patch_apply_bundle(sbundle, ebundle, type, 1095 - xen_patch_bundle_elems, nelems, 1096 - &found); 1097 - 1098 - if (found == NULL) 1099 - /* fallback */ 1100 - return ia64_native_patch_bundle(sbundle, ebundle, type); 1101 - if (used == 0) 1102 - return used; 1103 - 1104 - /* relocation */ 1105 - switch (type) { 1106 - case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: { 1107 - unsigned long reloc = 1108 - __xen_intrin_local_irq_restore_direct_reloc; 1109 - unsigned long reloc_offset = reloc - (unsigned long) 1110 - __xen_intrin_local_irq_restore_direct_start; 1111 - unsigned long tag = (unsigned long)sbundle + reloc_offset; 1112 - paravirt_patch_reloc_brl(tag, xen_check_events); 1113 - break; 1114 - } 1115 - default: 1116 - /* nothing */ 1117 - break; 1118 - } 1119 - return used; 1120 - } 1121 - #endif /* ASM_SUPPOTED */ 1122 - 1123 - const struct paravirt_patch_branch_target xen_branch_target[] 1124 - __initconst = { 1125 - #define PARAVIRT_BR_TARGET(name, type) \ 1126 - { \ 1127 - &xen_ ## name, \ 1128 - PARAVIRT_PATCH_TYPE_BR_ ## type, \ 1129 - } 1130 - PARAVIRT_BR_TARGET(switch_to, SWITCH_TO), 1131 - PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL), 1132 - PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL), 1133 - PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL), 1134 - }; 1135 - 1136 - static void __init 1137 - xen_patch_branch(unsigned long tag, unsigned long type) 1138 - { 1139 - __paravirt_patch_apply_branch(tag, type, xen_branch_target, 1140 - ARRAY_SIZE(xen_branch_target)); 1141 - }
-106
arch/ia64/xen/xencomm.c
··· 1 - /* 2 - * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation 3 - * 4 - * This program is free software; you can redistribute it and/or modify 5 - * it under the terms of the GNU General Public License as published by 6 - * the Free Software Foundation; either version 2 of the License, or 7 - * (at your option) any later version. 8 - * 9 - * This program is distributed in the hope that it will be useful, 10 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 - * GNU General Public License for more details. 13 - * 14 - * You should have received a copy of the GNU General Public License 15 - * along with this program; if not, write to the Free Software 16 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 - */ 18 - 19 - #include <linux/mm.h> 20 - #include <linux/err.h> 21 - 22 - static unsigned long kernel_virtual_offset; 23 - static int is_xencomm_initialized; 24 - 25 - /* for xen early printk. It uses console io hypercall which uses xencomm. 26 - * However early printk may use it before xencomm initialization. 27 - */ 28 - int 29 - xencomm_is_initialized(void) 30 - { 31 - return is_xencomm_initialized; 32 - } 33 - 34 - void 35 - xencomm_initialize(void) 36 - { 37 - kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START); 38 - is_xencomm_initialized = 1; 39 - } 40 - 41 - /* Translate virtual address to physical address. */ 42 - unsigned long 43 - xencomm_vtop(unsigned long vaddr) 44 - { 45 - struct page *page; 46 - struct vm_area_struct *vma; 47 - 48 - if (vaddr == 0) 49 - return 0UL; 50 - 51 - if (REGION_NUMBER(vaddr) == 5) { 52 - pgd_t *pgd; 53 - pud_t *pud; 54 - pmd_t *pmd; 55 - pte_t *ptep; 56 - 57 - /* On ia64, TASK_SIZE refers to current. It is not initialized 58 - during boot. 59 - Furthermore the kernel is relocatable and __pa() doesn't 60 - work on addresses. */ 61 - if (vaddr >= KERNEL_START 62 - && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE)) 63 - return vaddr - kernel_virtual_offset; 64 - 65 - /* In kernel area -- virtually mapped. */ 66 - pgd = pgd_offset_k(vaddr); 67 - if (pgd_none(*pgd) || pgd_bad(*pgd)) 68 - return ~0UL; 69 - 70 - pud = pud_offset(pgd, vaddr); 71 - if (pud_none(*pud) || pud_bad(*pud)) 72 - return ~0UL; 73 - 74 - pmd = pmd_offset(pud, vaddr); 75 - if (pmd_none(*pmd) || pmd_bad(*pmd)) 76 - return ~0UL; 77 - 78 - ptep = pte_offset_kernel(pmd, vaddr); 79 - if (!ptep) 80 - return ~0UL; 81 - 82 - return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK); 83 - } 84 - 85 - if (vaddr > TASK_SIZE) { 86 - /* percpu variables */ 87 - if (REGION_NUMBER(vaddr) == 7 && 88 - REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS)) 89 - ia64_tpa(vaddr); 90 - 91 - /* kernel address */ 92 - return __pa(vaddr); 93 - } 94 - 95 - /* XXX double-check (lack of) locking */ 96 - vma = find_extend_vma(current->mm, vaddr); 97 - if (!vma) 98 - return ~0UL; 99 - 100 - /* We assume the page is modified. */ 101 - page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH); 102 - if (IS_ERR_OR_NULL(page)) 103 - return ~0UL; 104 - 105 - return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK); 106 - }
-52
arch/ia64/xen/xenivt.S
··· 1 - /* 2 - * arch/ia64/xen/ivt.S 3 - * 4 - * Copyright (C) 2005 Hewlett-Packard Co 5 - * Dan Magenheimer <dan.magenheimer@hp.com> 6 - * 7 - * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 8 - * VA Linux Systems Japan K.K. 9 - * pv_ops. 10 - */ 11 - 12 - #include <asm/asmmacro.h> 13 - #include <asm/kregs.h> 14 - #include <asm/pgtable.h> 15 - 16 - #include "../kernel/minstate.h" 17 - 18 - .section .text,"ax" 19 - GLOBAL_ENTRY(xen_event_callback) 20 - mov r31=pr // prepare to save predicates 21 - ;; 22 - SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 23 - ;; 24 - movl r3=XSI_PSR_IC 25 - mov r14=1 26 - ;; 27 - st4 [r3]=r14 28 - ;; 29 - adds r3=8,r2 // set up second base pointer for SAVE_REST 30 - srlz.i // ensure everybody knows psr.ic is back on 31 - ;; 32 - SAVE_REST 33 - ;; 34 - 1: 35 - alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group 36 - add out0=16,sp // pass pointer to pt_regs as first arg 37 - ;; 38 - br.call.sptk.many b0=xen_evtchn_do_upcall 39 - ;; 40 - movl r20=XSI_PSR_I_ADDR 41 - ;; 42 - ld8 r20=[r20] 43 - ;; 44 - adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending 45 - ;; 46 - ld1 r20=[r20] 47 - ;; 48 - cmp.ne p6,p0=r20,r0 // if there are pending events, 49 - (p6) br.spnt.few 1b // call evtchn_do_upcall again. 50 - br.sptk.many xen_leave_kernel // we know ia64_leave_kernel is 51 - // paravirtualized as xen_leave_kernel 52 - END(xen_event_callback)
-80
arch/ia64/xen/xensetup.S
··· 1 - /* 2 - * Support routines for Xen 3 - * 4 - * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com> 5 - */ 6 - 7 - #include <asm/processor.h> 8 - #include <asm/asmmacro.h> 9 - #include <asm/pgtable.h> 10 - #include <asm/paravirt.h> 11 - #include <asm/xen/privop.h> 12 - #include <linux/elfnote.h> 13 - #include <linux/init.h> 14 - #include <xen/interface/elfnote.h> 15 - 16 - .section .data..read_mostly 17 - .align 8 18 - .global xen_domain_type 19 - xen_domain_type: 20 - data4 XEN_NATIVE_ASM 21 - .previous 22 - 23 - __INIT 24 - ENTRY(startup_xen) 25 - // Calculate load offset. 26 - // The constant, LOAD_OFFSET, can't be used because the boot 27 - // loader doesn't always load to the LMA specified by the vmlinux.lds. 28 - mov r9=ip // must be the first instruction to make sure 29 - // that r9 = the physical address of startup_xen. 30 - // Usually r9 = startup_xen - LOAD_OFFSET 31 - movl r8=startup_xen 32 - ;; 33 - sub r9=r9,r8 // Usually r9 = -LOAD_OFFSET. 34 - 35 - mov r10=PARAVIRT_HYPERVISOR_TYPE_XEN 36 - movl r11=_start 37 - ;; 38 - add r11=r11,r9 39 - movl r8=hypervisor_type 40 - ;; 41 - add r8=r8,r9 42 - mov b0=r11 43 - ;; 44 - st8 [r8]=r10 45 - br.cond.sptk.many b0 46 - ;; 47 - END(startup_xen) 48 - 49 - ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") 50 - ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6") 51 - ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0") 52 - ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, data8.ua startup_xen - LOAD_OFFSET) 53 - 54 - #define isBP p3 // are we the Bootstrap Processor? 55 - 56 - GLOBAL_ENTRY(xen_setup_hook) 57 - mov r8=XEN_PV_DOMAIN_ASM 58 - (isBP) movl r9=xen_domain_type;; 59 - (isBP) st4 [r9]=r8 60 - movl r10=xen_ivt;; 61 - 62 - mov cr.iva=r10 63 - 64 - /* Set xsi base. */ 65 - #define FW_HYPERCALL_SET_SHARED_INFO_VA 0x600 66 - (isBP) mov r2=FW_HYPERCALL_SET_SHARED_INFO_VA 67 - (isBP) movl r28=XSI_BASE;; 68 - (isBP) break 0x1000;; 69 - 70 - /* setup pv_ops */ 71 - (isBP) mov r4=rp 72 - ;; 73 - (isBP) br.call.sptk.many rp=xen_setup_pv_ops 74 - ;; 75 - (isBP) mov rp=r4 76 - ;; 77 - 78 - br.ret.sptk.many rp 79 - ;; 80 - END(xen_setup_hook)
+1 -1
include/xen/interface/callback.h
··· 36 36 * @extra_args == Operation-specific extra arguments (NULL if none). 37 37 */ 38 38 39 - /* ia64, x86: Callback for event delivery. */ 39 + /* x86: Callback for event delivery. */ 40 40 #define CALLBACKTYPE_event 0 41 41 42 42 /* x86: Failsafe callback when guest state cannot be restored by Xen. */
-3
include/xen/interface/io/protocols.h
··· 3 3 4 4 #define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi" 5 5 #define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi" 6 - #define XEN_IO_PROTO_ABI_IA64 "ia64-abi" 7 6 #define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi" 8 7 #define XEN_IO_PROTO_ABI_ARM "arm-abi" 9 8 ··· 10 11 # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32 11 12 #elif defined(__x86_64__) 12 13 # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64 13 - #elif defined(__ia64__) 14 - # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64 15 14 #elif defined(__powerpc64__) 16 15 # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64 17 16 #elif defined(__arm__) || defined(__aarch64__)