Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'score' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic into for-linus

* 'score' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic:
score: clean up mm/init.c
score: make irq.h definitions local
score: cleanups: dead code, 0 as pointer, shadowed variables
score: fix function prototypes
score: add address space annotations
score: add missing #includes
score: move save arg5 and arg6 instruction in front of enable_irq
score: add prototypes for wrapped syscalls
score: remove init_mm
score: add generic sys_call_table
score: remove __{put,get}_user_unknown
score: unset __ARCH_WANT_IPC_PARSE_VERSION
score: update files according to review comments
score: add maintainers for score architecture
score: Add support for Sunplus S+core architecture

+8423
+8
MAINTAINERS
··· 4392 4392 F: kernel/sched* 4393 4393 F: include/linux/sched.h 4394 4394 4395 + SCORE ARCHITECTURE 4396 + P: Chen Liqin 4397 + M: liqin.chen@sunplusct.com 4398 + P: Lennox Wu 4399 + M: lennox.wu@sunplusct.com 4400 + W: http://www.sunplusct.com 4401 + S: Supported 4402 + 4395 4403 SCSI CDROM DRIVER 4396 4404 M: Jens Axboe <axboe@kernel.dk> 4397 4405 L: linux-scsi@vger.kernel.org
+141
arch/score/Kconfig
··· 1 + # For a description of the syntax of this configuration file, 2 + # see Documentation/kbuild/kconfig-language.txt. 3 + 4 + mainmenu "Linux/SCORE Kernel Configuration" 5 + 6 + menu "Machine selection" 7 + 8 + choice 9 + prompt "System type" 10 + default MACH_SPCT6600 11 + 12 + config ARCH_SCORE7 13 + bool "SCORE7 processor" 14 + select SYS_SUPPORTS_32BIT_KERNEL 15 + select CPU_SCORE7 16 + select GENERIC_HAS_IOMAP 17 + 18 + config MACH_SPCT6600 19 + bool "SPCT6600 series based machines" 20 + select SYS_SUPPORTS_32BIT_KERNEL 21 + select CPU_SCORE7 22 + select GENERIC_HAS_IOMAP 23 + 24 + config SCORE_SIM 25 + bool "Score simulator" 26 + select SYS_SUPPORTS_32BIT_KERNEL 27 + select CPU_SCORE7 28 + select GENERIC_HAS_IOMAP 29 + endchoice 30 + 31 + endmenu 32 + 33 + config CPU_SCORE7 34 + bool 35 + 36 + config GENERIC_IOMAP 37 + def_bool y 38 + 39 + config NO_DMA 40 + bool 41 + default y 42 + 43 + config RWSEM_GENERIC_SPINLOCK 44 + def_bool y 45 + 46 + config GENERIC_FIND_NEXT_BIT 47 + def_bool y 48 + 49 + config GENERIC_HWEIGHT 50 + def_bool y 51 + 52 + config GENERIC_CALIBRATE_DELAY 53 + def_bool y 54 + 55 + config GENERIC_CLOCKEVENTS 56 + def_bool y 57 + 58 + config GENERIC_TIME 59 + def_bool y 60 + 61 + config SCHED_NO_NO_OMIT_FRAME_POINTER 62 + def_bool y 63 + 64 + config GENERIC_HARDIRQS_NO__DO_IRQ 65 + def_bool y 66 + 67 + config GENERIC_SYSCALL_TABLE 68 + def_bool y 69 + 70 + config SCORE_L1_CACHE_SHIFT 71 + int 72 + default "4" 73 + 74 + menu "Kernel type" 75 + 76 + config 32BIT 77 + def_bool y 78 + 79 + config GENERIC_HARDIRQS 80 + def_bool y 81 + 82 + config ARCH_FLATMEM_ENABLE 83 + def_bool y 84 + 85 + config ARCH_POPULATES_NODE_MAP 86 + def_bool y 87 + 88 + source "mm/Kconfig" 89 + 90 + config MEMORY_START 91 + hex 92 + default 0xa0000000 93 + 94 + source "kernel/time/Kconfig" 95 + source "kernel/Kconfig.hz" 96 + source "kernel/Kconfig.preempt" 97 + 98 + endmenu 99 + 100 + config RWSEM_GENERIC_SPINLOCK 101 + def_bool y 102 + 103 + config LOCKDEP_SUPPORT 104 + def_bool y 105 + 106 + config STACKTRACE_SUPPORT 107 + def_bool y 108 + 109 + source "init/Kconfig" 110 + 111 + config PROBE_INITRD_HEADER 112 + bool "Probe initrd header created by addinitrd" 113 + depends on BLK_DEV_INITRD 114 + help 115 + Probe initrd header at the last page of kernel image. 116 + Say Y here if you are using arch/score/boot/addinitrd.c to 117 + add initrd or initramfs image to the kernel image. 118 + Otherwise, say N. 119 + 120 + config MMU 121 + def_bool y 122 + 123 + menu "Executable file formats" 124 + 125 + source "fs/Kconfig.binfmt" 126 + 127 + endmenu 128 + 129 + source "net/Kconfig" 130 + 131 + source "drivers/Kconfig" 132 + 133 + source "fs/Kconfig" 134 + 135 + source "arch/score/Kconfig.debug" 136 + 137 + source "security/Kconfig" 138 + 139 + source "crypto/Kconfig" 140 + 141 + source "lib/Kconfig"
+37
arch/score/Kconfig.debug
··· 1 + menu "Kernel hacking" 2 + 3 + config TRACE_IRQFLAGS_SUPPORT 4 + bool 5 + default y 6 + 7 + source "lib/Kconfig.debug" 8 + 9 + config CMDLINE 10 + string "Default kernel command string" 11 + default "" 12 + help 13 + On some platforms, there is currently no way for the boot loader to 14 + pass arguments to the kernel. For these platforms, you can supply 15 + some command-line options at build time by entering them here. In 16 + other cases you can specify kernel args so that you don't have 17 + to set them up in board prom initialization routines. 18 + 19 + config DEBUG_STACK_USAGE 20 + bool "Enable stack utilization instrumentation" 21 + depends on DEBUG_KERNEL 22 + help 23 + Enables the display of the minimum amount of free stack which each 24 + task has ever had available in the sysrq-T and sysrq-P debug output. 25 + 26 + This option will slow down process creation somewhat. 27 + 28 + config RUNTIME_DEBUG 29 + bool "Enable run-time debugging" 30 + depends on DEBUG_KERNEL 31 + help 32 + If you say Y here, some debugging macros will do run-time checking. 33 + If you say N here, those macros will mostly turn to no-ops. See 34 + include/asm-score/debug.h for debuging macros. 35 + If unsure, say N. 36 + 37 + endmenu
+43
arch/score/Makefile
··· 1 + # 2 + # arch/score/Makefile 3 + # 4 + # This file is subject to the terms and conditions of the GNU General Public 5 + # License. See the file "COPYING" in the main directory of this archive 6 + # for more details. 7 + # 8 + 9 + KBUILD_DEFCONFIG := spct6600_defconfig 10 + CROSS_COMPILE := score-linux- 11 + 12 + # 13 + # CPU-dependent compiler/assembler options for optimization. 14 + # 15 + cflags-y += -G0 -pipe -mel -mnhwloop -D__SCOREEL__ \ 16 + -D__linux__ -ffunction-sections -ffreestanding 17 + 18 + # 19 + # Board-dependent options and extra files 20 + # 21 + KBUILD_AFLAGS += $(cflags-y) 22 + KBUILD_CFLAGS += $(cflags-y) 23 + MODFLAGS += -mlong-calls 24 + LDFLAGS += --oformat elf32-littlescore 25 + LDFLAGS_vmlinux += -G0 -static -nostdlib 26 + 27 + head-y := arch/score/kernel/head.o 28 + libs-y += arch/score/lib/ 29 + core-y += arch/score/kernel/ arch/score/mm/ 30 + 31 + boot := arch/score/boot 32 + 33 + vmlinux.bin: vmlinux 34 + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 35 + 36 + archclean: 37 + @$(MAKE) $(clean)=$(boot) 38 + 39 + define archhelp 40 + echo ' vmlinux.bin - Raw binary boot image' 41 + echo 42 + echo ' These will be default as apropriate for a configured platform.' 43 + endef
+15
arch/score/boot/Makefile
··· 1 + # 2 + # arch/score/boot/Makefile 3 + # 4 + # This file is subject to the terms and conditions of the GNU General Public 5 + # License. See the file "COPYING" in the main directory of this archive 6 + # for more details. 7 + # 8 + 9 + targets := vmlinux.bin 10 + 11 + $(obj)/vmlinux.bin: vmlinux FORCE 12 + $(call if_changed,objcopy) 13 + @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' 14 + 15 + clean-files += vmlinux.bin
+717
arch/score/configs/spct6600_defconfig
··· 1 + # 2 + # Automatically generated make config: don't edit 3 + # Linux kernel version: 2.6.30-rc5 4 + # Fri Jun 12 18:57:07 2009 5 + # 6 + 7 + # 8 + # Machine selection 9 + # 10 + # CONFIG_ARCH_SCORE7 is not set 11 + CONFIG_MACH_SPCT6600=y 12 + # CONFIG_SCORE_SIM is not set 13 + CONFIG_CPU_SCORE7=y 14 + CONFIG_GENERIC_IOMAP=y 15 + CONFIG_NO_DMA=y 16 + CONFIG_RWSEM_GENERIC_SPINLOCK=y 17 + CONFIG_GENERIC_FIND_NEXT_BIT=y 18 + CONFIG_GENERIC_HWEIGHT=y 19 + CONFIG_GENERIC_CALIBRATE_DELAY=y 20 + CONFIG_GENERIC_CLOCKEVENTS=y 21 + CONFIG_GENERIC_TIME=y 22 + CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y 23 + CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 24 + CONFIG_GENERIC_SYSCALL_TABLE=y 25 + CONFIG_SCORE_L1_CACHE_SHIFT=4 26 + 27 + # 28 + # Kernel type 29 + # 30 + CONFIG_32BIT=y 31 + CONFIG_GENERIC_HARDIRQS=y 32 + CONFIG_ARCH_FLATMEM_ENABLE=y 33 + CONFIG_ARCH_POPULATES_NODE_MAP=y 34 + CONFIG_SELECT_MEMORY_MODEL=y 35 + CONFIG_FLATMEM_MANUAL=y 36 + # CONFIG_DISCONTIGMEM_MANUAL is not set 37 + # CONFIG_SPARSEMEM_MANUAL is not set 38 + CONFIG_FLATMEM=y 39 + CONFIG_FLAT_NODE_MEM_MAP=y 40 + CONFIG_PAGEFLAGS_EXTENDED=y 41 + CONFIG_SPLIT_PTLOCK_CPUS=4 42 + # CONFIG_PHYS_ADDR_T_64BIT is not set 43 + CONFIG_ZONE_DMA_FLAG=0 44 + CONFIG_VIRT_TO_BUS=y 45 + CONFIG_UNEVICTABLE_LRU=y 46 + CONFIG_HAVE_MLOCK=y 47 + CONFIG_HAVE_MLOCKED_PAGE_BIT=y 48 + CONFIG_MEMORY_START=0xa0000000 49 + # CONFIG_NO_HZ is not set 50 + # CONFIG_HIGH_RES_TIMERS is not set 51 + CONFIG_GENERIC_CLOCKEVENTS_BUILD=y 52 + CONFIG_HZ_100=y 53 + # CONFIG_HZ_250 is not set 54 + # CONFIG_HZ_300 is not set 55 + # CONFIG_HZ_1000 is not set 56 + CONFIG_HZ=100 57 + # CONFIG_SCHED_HRTICK is not set 58 + # CONFIG_PREEMPT_NONE is not set 59 + CONFIG_PREEMPT_VOLUNTARY=y 60 + # CONFIG_PREEMPT is not set 61 + CONFIG_LOCKDEP_SUPPORT=y 62 + CONFIG_STACKTRACE_SUPPORT=y 63 + CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 64 + 65 + # 66 + # General setup 67 + # 68 + CONFIG_EXPERIMENTAL=y 69 + CONFIG_BROKEN_ON_SMP=y 70 + CONFIG_INIT_ENV_ARG_LIMIT=32 71 + CONFIG_LOCALVERSION="" 72 + # CONFIG_LOCALVERSION_AUTO is not set 73 + CONFIG_SWAP=y 74 + CONFIG_SYSVIPC=y 75 + CONFIG_SYSVIPC_SYSCTL=y 76 + CONFIG_POSIX_MQUEUE=y 77 + CONFIG_POSIX_MQUEUE_SYSCTL=y 78 + CONFIG_BSD_PROCESS_ACCT=y 79 + # CONFIG_BSD_PROCESS_ACCT_V3 is not set 80 + # CONFIG_TASKSTATS is not set 81 + # CONFIG_AUDIT is not set 82 + 83 + # 84 + # RCU Subsystem 85 + # 86 + CONFIG_CLASSIC_RCU=y 87 + # CONFIG_TREE_RCU is not set 88 + # CONFIG_PREEMPT_RCU is not set 89 + # CONFIG_TREE_RCU_TRACE is not set 90 + # CONFIG_PREEMPT_RCU_TRACE is not set 91 + # CONFIG_IKCONFIG is not set 92 + CONFIG_LOG_BUF_SHIFT=12 93 + # CONFIG_GROUP_SCHED is not set 94 + # CONFIG_CGROUPS is not set 95 + CONFIG_SYSFS_DEPRECATED=y 96 + CONFIG_SYSFS_DEPRECATED_V2=y 97 + # CONFIG_RELAY is not set 98 + # CONFIG_NAMESPACES is not set 99 + CONFIG_BLK_DEV_INITRD=y 100 + CONFIG_INITRAMFS_SOURCE="" 101 + CONFIG_RD_GZIP=y 102 + # CONFIG_RD_BZIP2 is not set 103 + # CONFIG_RD_LZMA is not set 104 + # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 105 + CONFIG_SYSCTL=y 106 + CONFIG_ANON_INODES=y 107 + CONFIG_EMBEDDED=y 108 + CONFIG_SYSCTL_SYSCALL=y 109 + # CONFIG_KALLSYMS is not set 110 + # CONFIG_STRIP_ASM_SYMS is not set 111 + # CONFIG_HOTPLUG is not set 112 + CONFIG_PRINTK=y 113 + CONFIG_BUG=y 114 + CONFIG_ELF_CORE=y 115 + CONFIG_BASE_FULL=y 116 + CONFIG_FUTEX=y 117 + CONFIG_EPOLL=y 118 + CONFIG_SIGNALFD=y 119 + CONFIG_TIMERFD=y 120 + CONFIG_EVENTFD=y 121 + CONFIG_SHMEM=y 122 + CONFIG_AIO=y 123 + CONFIG_VM_EVENT_COUNTERS=y 124 + CONFIG_COMPAT_BRK=y 125 + CONFIG_SLAB=y 126 + # CONFIG_SLUB is not set 127 + # CONFIG_SLOB is not set 128 + # CONFIG_PROFILING is not set 129 + # CONFIG_MARKERS is not set 130 + # CONFIG_SLOW_WORK is not set 131 + # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 132 + CONFIG_SLABINFO=y 133 + CONFIG_RT_MUTEXES=y 134 + CONFIG_BASE_SMALL=0 135 + CONFIG_MODULES=y 136 + CONFIG_MODULE_FORCE_LOAD=y 137 + CONFIG_MODULE_UNLOAD=y 138 + CONFIG_MODULE_FORCE_UNLOAD=y 139 + # CONFIG_MODVERSIONS is not set 140 + # CONFIG_MODULE_SRCVERSION_ALL is not set 141 + CONFIG_BLOCK=y 142 + CONFIG_LBD=y 143 + # CONFIG_BLK_DEV_BSG is not set 144 + # CONFIG_BLK_DEV_INTEGRITY is not set 145 + 146 + # 147 + # IO Schedulers 148 + # 149 + CONFIG_IOSCHED_NOOP=y 150 + CONFIG_IOSCHED_AS=y 151 + CONFIG_IOSCHED_DEADLINE=y 152 + CONFIG_IOSCHED_CFQ=y 153 + # CONFIG_DEFAULT_AS is not set 154 + # CONFIG_DEFAULT_DEADLINE is not set 155 + CONFIG_DEFAULT_CFQ=y 156 + # CONFIG_DEFAULT_NOOP is not set 157 + CONFIG_DEFAULT_IOSCHED="cfq" 158 + # CONFIG_PROBE_INITRD_HEADER is not set 159 + CONFIG_MMU=y 160 + 161 + # 162 + # Executable file formats 163 + # 164 + CONFIG_BINFMT_ELF=y 165 + # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 166 + # CONFIG_HAVE_AOUT is not set 167 + CONFIG_BINFMT_MISC=y 168 + CONFIG_NET=y 169 + 170 + # 171 + # Networking options 172 + # 173 + # CONFIG_PACKET is not set 174 + CONFIG_UNIX=y 175 + CONFIG_XFRM=y 176 + # CONFIG_XFRM_USER is not set 177 + # CONFIG_XFRM_SUB_POLICY is not set 178 + # CONFIG_XFRM_MIGRATE is not set 179 + # CONFIG_XFRM_STATISTICS is not set 180 + CONFIG_NET_KEY=y 181 + # CONFIG_NET_KEY_MIGRATE is not set 182 + CONFIG_INET=y 183 + CONFIG_IP_MULTICAST=y 184 + # CONFIG_IP_ADVANCED_ROUTER is not set 185 + CONFIG_IP_FIB_HASH=y 186 + # CONFIG_IP_PNP is not set 187 + # CONFIG_NET_IPIP is not set 188 + # CONFIG_NET_IPGRE is not set 189 + # CONFIG_IP_MROUTE is not set 190 + CONFIG_ARPD=y 191 + # CONFIG_SYN_COOKIES is not set 192 + # CONFIG_INET_AH is not set 193 + # CONFIG_INET_ESP is not set 194 + # CONFIG_INET_IPCOMP is not set 195 + # CONFIG_INET_XFRM_TUNNEL is not set 196 + # CONFIG_INET_TUNNEL is not set 197 + CONFIG_INET_XFRM_MODE_TRANSPORT=y 198 + CONFIG_INET_XFRM_MODE_TUNNEL=y 199 + CONFIG_INET_XFRM_MODE_BEET=y 200 + # CONFIG_INET_LRO is not set 201 + CONFIG_INET_DIAG=y 202 + CONFIG_INET_TCP_DIAG=y 203 + # CONFIG_TCP_CONG_ADVANCED is not set 204 + CONFIG_TCP_CONG_CUBIC=y 205 + CONFIG_DEFAULT_TCP_CONG="cubic" 206 + # CONFIG_TCP_MD5SIG is not set 207 + # CONFIG_IPV6 is not set 208 + # CONFIG_NETLABEL is not set 209 + # CONFIG_NETWORK_SECMARK is not set 210 + # CONFIG_NETFILTER is not set 211 + # CONFIG_IP_DCCP is not set 212 + # CONFIG_IP_SCTP is not set 213 + # CONFIG_TIPC is not set 214 + # CONFIG_ATM is not set 215 + # CONFIG_BRIDGE is not set 216 + # CONFIG_NET_DSA is not set 217 + # CONFIG_VLAN_8021Q is not set 218 + # CONFIG_DECNET is not set 219 + # CONFIG_LLC2 is not set 220 + # CONFIG_IPX is not set 221 + # CONFIG_ATALK is not set 222 + # CONFIG_X25 is not set 223 + # CONFIG_LAPB is not set 224 + # CONFIG_ECONET is not set 225 + # CONFIG_WAN_ROUTER is not set 226 + # CONFIG_PHONET is not set 227 + # CONFIG_NET_SCHED is not set 228 + # CONFIG_DCB is not set 229 + 230 + # 231 + # Network testing 232 + # 233 + # CONFIG_NET_PKTGEN is not set 234 + # CONFIG_HAMRADIO is not set 235 + # CONFIG_CAN is not set 236 + # CONFIG_IRDA is not set 237 + # CONFIG_BT is not set 238 + # CONFIG_AF_RXRPC is not set 239 + # CONFIG_WIRELESS is not set 240 + # CONFIG_WIMAX is not set 241 + # CONFIG_RFKILL is not set 242 + # CONFIG_NET_9P is not set 243 + 244 + # 245 + # Device Drivers 246 + # 247 + 248 + # 249 + # Generic Driver Options 250 + # 251 + # CONFIG_STANDALONE is not set 252 + # CONFIG_PREVENT_FIRMWARE_BUILD is not set 253 + # CONFIG_SYS_HYPERVISOR is not set 254 + # CONFIG_CONNECTOR is not set 255 + # CONFIG_MTD is not set 256 + # CONFIG_PARPORT is not set 257 + CONFIG_BLK_DEV=y 258 + # CONFIG_BLK_DEV_COW_COMMON is not set 259 + CONFIG_BLK_DEV_LOOP=y 260 + CONFIG_BLK_DEV_CRYPTOLOOP=y 261 + # CONFIG_BLK_DEV_NBD is not set 262 + CONFIG_BLK_DEV_RAM=y 263 + CONFIG_BLK_DEV_RAM_COUNT=1 264 + CONFIG_BLK_DEV_RAM_SIZE=4096 265 + # CONFIG_BLK_DEV_XIP is not set 266 + # CONFIG_CDROM_PKTCDVD is not set 267 + # CONFIG_ATA_OVER_ETH is not set 268 + # CONFIG_MISC_DEVICES is not set 269 + 270 + # 271 + # SCSI device support 272 + # 273 + # CONFIG_RAID_ATTRS is not set 274 + # CONFIG_SCSI is not set 275 + # CONFIG_SCSI_DMA is not set 276 + # CONFIG_SCSI_NETLINK is not set 277 + # CONFIG_MD is not set 278 + CONFIG_NETDEVICES=y 279 + CONFIG_COMPAT_NET_DEV_OPS=y 280 + # CONFIG_DUMMY is not set 281 + # CONFIG_BONDING is not set 282 + # CONFIG_MACVLAN is not set 283 + # CONFIG_EQUALIZER is not set 284 + # CONFIG_TUN is not set 285 + # CONFIG_VETH is not set 286 + # CONFIG_NET_ETHERNET is not set 287 + # CONFIG_NETDEV_1000 is not set 288 + # CONFIG_NETDEV_10000 is not set 289 + 290 + # 291 + # Wireless LAN 292 + # 293 + # CONFIG_WLAN_PRE80211 is not set 294 + # CONFIG_WLAN_80211 is not set 295 + 296 + # 297 + # Enable WiMAX (Networking options) to see the WiMAX drivers 298 + # 299 + # CONFIG_WAN is not set 300 + # CONFIG_PPP is not set 301 + # CONFIG_SLIP is not set 302 + # CONFIG_NETCONSOLE is not set 303 + # CONFIG_NETPOLL is not set 304 + # CONFIG_NET_POLL_CONTROLLER is not set 305 + # CONFIG_ISDN is not set 306 + # CONFIG_PHONE is not set 307 + 308 + # 309 + # Input device support 310 + # 311 + CONFIG_INPUT=y 312 + # CONFIG_INPUT_FF_MEMLESS is not set 313 + # CONFIG_INPUT_POLLDEV is not set 314 + 315 + # 316 + # Userland interfaces 317 + # 318 + # CONFIG_INPUT_MOUSEDEV is not set 319 + # CONFIG_INPUT_JOYDEV is not set 320 + # CONFIG_INPUT_EVDEV is not set 321 + # CONFIG_INPUT_EVBUG is not set 322 + 323 + # 324 + # Input Device Drivers 325 + # 326 + # CONFIG_INPUT_KEYBOARD is not set 327 + # CONFIG_INPUT_MOUSE is not set 328 + # CONFIG_INPUT_JOYSTICK is not set 329 + # CONFIG_INPUT_TABLET is not set 330 + # CONFIG_INPUT_TOUCHSCREEN is not set 331 + # CONFIG_INPUT_MISC is not set 332 + 333 + # 334 + # Hardware I/O ports 335 + # 336 + # CONFIG_SERIO is not set 337 + # CONFIG_GAMEPORT is not set 338 + 339 + # 340 + # Character devices 341 + # 342 + CONFIG_VT=y 343 + CONFIG_CONSOLE_TRANSLATIONS=y 344 + CONFIG_VT_CONSOLE=y 345 + CONFIG_HW_CONSOLE=y 346 + # CONFIG_VT_HW_CONSOLE_BINDING is not set 347 + CONFIG_DEVKMEM=y 348 + CONFIG_SERIAL_NONSTANDARD=y 349 + # CONFIG_N_HDLC is not set 350 + # CONFIG_RISCOM8 is not set 351 + # CONFIG_SPECIALIX is not set 352 + # CONFIG_RIO is not set 353 + CONFIG_STALDRV=y 354 + 355 + # 356 + # Serial drivers 357 + # 358 + # CONFIG_SERIAL_8250 is not set 359 + 360 + # 361 + # Non-8250 serial port support 362 + # 363 + CONFIG_UNIX98_PTYS=y 364 + # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 365 + CONFIG_LEGACY_PTYS=y 366 + CONFIG_LEGACY_PTY_COUNT=256 367 + # CONFIG_IPMI_HANDLER is not set 368 + # CONFIG_HW_RANDOM is not set 369 + # CONFIG_RTC is not set 370 + # CONFIG_GEN_RTC is not set 371 + # CONFIG_R3964 is not set 372 + CONFIG_RAW_DRIVER=y 373 + CONFIG_MAX_RAW_DEVS=8192 374 + # CONFIG_TCG_TPM is not set 375 + # CONFIG_I2C is not set 376 + # CONFIG_SPI is not set 377 + # CONFIG_W1 is not set 378 + # CONFIG_POWER_SUPPLY is not set 379 + # CONFIG_HWMON is not set 380 + # CONFIG_THERMAL is not set 381 + # CONFIG_THERMAL_HWMON is not set 382 + # CONFIG_WATCHDOG is not set 383 + 384 + # 385 + # Multifunction device drivers 386 + # 387 + # CONFIG_MFD_CORE is not set 388 + # CONFIG_MFD_SM501 is not set 389 + # CONFIG_HTC_PASIC3 is not set 390 + # CONFIG_MFD_TMIO is not set 391 + # CONFIG_REGULATOR is not set 392 + 393 + # 394 + # Multimedia devices 395 + # 396 + 397 + # 398 + # Multimedia core support 399 + # 400 + # CONFIG_VIDEO_DEV is not set 401 + # CONFIG_DVB_CORE is not set 402 + # CONFIG_VIDEO_MEDIA is not set 403 + 404 + # 405 + # Multimedia drivers 406 + # 407 + # CONFIG_DAB is not set 408 + 409 + # 410 + # Graphics support 411 + # 412 + # CONFIG_VGASTATE is not set 413 + # CONFIG_VIDEO_OUTPUT_CONTROL is not set 414 + # CONFIG_FB is not set 415 + # CONFIG_BACKLIGHT_LCD_SUPPORT is not set 416 + 417 + # 418 + # Display device support 419 + # 420 + # CONFIG_DISPLAY_SUPPORT is not set 421 + 422 + # 423 + # Console display driver support 424 + # 425 + # CONFIG_VGA_CONSOLE is not set 426 + CONFIG_DUMMY_CONSOLE=y 427 + # CONFIG_SOUND is not set 428 + # CONFIG_HID_SUPPORT is not set 429 + # CONFIG_USB_SUPPORT is not set 430 + # CONFIG_MMC is not set 431 + # CONFIG_MEMSTICK is not set 432 + # CONFIG_NEW_LEDS is not set 433 + # CONFIG_ACCESSIBILITY is not set 434 + # CONFIG_RTC_CLASS is not set 435 + # CONFIG_AUXDISPLAY is not set 436 + # CONFIG_UIO is not set 437 + # CONFIG_STAGING is not set 438 + 439 + # 440 + # File systems 441 + # 442 + CONFIG_EXT2_FS=y 443 + CONFIG_EXT2_FS_XATTR=y 444 + CONFIG_EXT2_FS_POSIX_ACL=y 445 + # CONFIG_EXT2_FS_SECURITY is not set 446 + # CONFIG_EXT2_FS_XIP is not set 447 + CONFIG_EXT3_FS=y 448 + # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 449 + CONFIG_EXT3_FS_XATTR=y 450 + CONFIG_EXT3_FS_POSIX_ACL=y 451 + # CONFIG_EXT3_FS_SECURITY is not set 452 + # CONFIG_EXT4_FS is not set 453 + CONFIG_JBD=y 454 + CONFIG_FS_MBCACHE=y 455 + # CONFIG_REISERFS_FS is not set 456 + # CONFIG_JFS_FS is not set 457 + CONFIG_FS_POSIX_ACL=y 458 + CONFIG_FILE_LOCKING=y 459 + # CONFIG_XFS_FS is not set 460 + # CONFIG_GFS2_FS is not set 461 + # CONFIG_OCFS2_FS is not set 462 + # CONFIG_BTRFS_FS is not set 463 + CONFIG_DNOTIFY=y 464 + CONFIG_INOTIFY=y 465 + CONFIG_INOTIFY_USER=y 466 + # CONFIG_QUOTA is not set 467 + CONFIG_AUTOFS_FS=y 468 + CONFIG_AUTOFS4_FS=y 469 + # CONFIG_FUSE_FS is not set 470 + CONFIG_GENERIC_ACL=y 471 + 472 + # 473 + # Caches 474 + # 475 + # CONFIG_FSCACHE is not set 476 + 477 + # 478 + # CD-ROM/DVD Filesystems 479 + # 480 + # CONFIG_ISO9660_FS is not set 481 + # CONFIG_UDF_FS is not set 482 + 483 + # 484 + # DOS/FAT/NT Filesystems 485 + # 486 + # CONFIG_MSDOS_FS is not set 487 + # CONFIG_VFAT_FS is not set 488 + # CONFIG_NTFS_FS is not set 489 + 490 + # 491 + # Pseudo filesystems 492 + # 493 + CONFIG_PROC_FS=y 494 + CONFIG_PROC_KCORE=y 495 + CONFIG_PROC_SYSCTL=y 496 + # CONFIG_PROC_PAGE_MONITOR is not set 497 + CONFIG_SYSFS=y 498 + CONFIG_TMPFS=y 499 + CONFIG_TMPFS_POSIX_ACL=y 500 + # CONFIG_HUGETLB_PAGE is not set 501 + # CONFIG_CONFIGFS_FS is not set 502 + CONFIG_MISC_FILESYSTEMS=y 503 + # CONFIG_ADFS_FS is not set 504 + # CONFIG_AFFS_FS is not set 505 + # CONFIG_ECRYPT_FS is not set 506 + # CONFIG_HFS_FS is not set 507 + # CONFIG_HFSPLUS_FS is not set 508 + # CONFIG_BEFS_FS is not set 509 + # CONFIG_BFS_FS is not set 510 + # CONFIG_EFS_FS is not set 511 + # CONFIG_CRAMFS is not set 512 + # CONFIG_SQUASHFS is not set 513 + # CONFIG_VXFS_FS is not set 514 + # CONFIG_MINIX_FS is not set 515 + # CONFIG_OMFS_FS is not set 516 + # CONFIG_HPFS_FS is not set 517 + # CONFIG_QNX4FS_FS is not set 518 + # CONFIG_ROMFS_FS is not set 519 + # CONFIG_SYSV_FS is not set 520 + # CONFIG_UFS_FS is not set 521 + # CONFIG_NILFS2_FS is not set 522 + CONFIG_NETWORK_FILESYSTEMS=y 523 + CONFIG_NFS_FS=y 524 + CONFIG_NFS_V3=y 525 + CONFIG_NFS_V3_ACL=y 526 + CONFIG_NFS_V4=y 527 + CONFIG_NFSD=y 528 + CONFIG_NFSD_V2_ACL=y 529 + CONFIG_NFSD_V3=y 530 + CONFIG_NFSD_V3_ACL=y 531 + CONFIG_NFSD_V4=y 532 + CONFIG_LOCKD=y 533 + CONFIG_LOCKD_V4=y 534 + CONFIG_EXPORTFS=y 535 + CONFIG_NFS_ACL_SUPPORT=y 536 + CONFIG_NFS_COMMON=y 537 + CONFIG_SUNRPC=y 538 + CONFIG_SUNRPC_GSS=y 539 + CONFIG_RPCSEC_GSS_KRB5=y 540 + # CONFIG_RPCSEC_GSS_SPKM3 is not set 541 + # CONFIG_SMB_FS is not set 542 + # CONFIG_CIFS is not set 543 + # CONFIG_NCP_FS is not set 544 + # CONFIG_CODA_FS is not set 545 + # CONFIG_AFS_FS is not set 546 + 547 + # 548 + # Partition Types 549 + # 550 + # CONFIG_PARTITION_ADVANCED is not set 551 + CONFIG_MSDOS_PARTITION=y 552 + # CONFIG_NLS is not set 553 + # CONFIG_DLM is not set 554 + 555 + # 556 + # Kernel hacking 557 + # 558 + CONFIG_TRACE_IRQFLAGS_SUPPORT=y 559 + # CONFIG_PRINTK_TIME is not set 560 + CONFIG_ENABLE_WARN_DEPRECATED=y 561 + CONFIG_ENABLE_MUST_CHECK=y 562 + CONFIG_FRAME_WARN=1024 563 + # CONFIG_MAGIC_SYSRQ is not set 564 + # CONFIG_UNUSED_SYMBOLS is not set 565 + # CONFIG_DEBUG_FS is not set 566 + # CONFIG_HEADERS_CHECK is not set 567 + # CONFIG_DEBUG_KERNEL is not set 568 + # CONFIG_DEBUG_MEMORY_INIT is not set 569 + # CONFIG_RCU_CPU_STALL_DETECTOR is not set 570 + # CONFIG_SYSCTL_SYSCALL_CHECK is not set 571 + CONFIG_TRACING_SUPPORT=y 572 + 573 + # 574 + # Tracers 575 + # 576 + # CONFIG_IRQSOFF_TRACER is not set 577 + # CONFIG_SCHED_TRACER is not set 578 + # CONFIG_CONTEXT_SWITCH_TRACER is not set 579 + # CONFIG_EVENT_TRACER is not set 580 + # CONFIG_BOOT_TRACER is not set 581 + # CONFIG_TRACE_BRANCH_PROFILING is not set 582 + # CONFIG_KMEMTRACE is not set 583 + # CONFIG_WORKQUEUE_TRACER is not set 584 + # CONFIG_BLK_DEV_IO_TRACE is not set 585 + # CONFIG_SAMPLES is not set 586 + CONFIG_CMDLINE="" 587 + 588 + # 589 + # Security options 590 + # 591 + CONFIG_KEYS=y 592 + CONFIG_KEYS_DEBUG_PROC_KEYS=y 593 + CONFIG_SECURITY=y 594 + # CONFIG_SECURITYFS is not set 595 + CONFIG_SECURITY_NETWORK=y 596 + # CONFIG_SECURITY_NETWORK_XFRM is not set 597 + # CONFIG_SECURITY_PATH is not set 598 + CONFIG_SECURITY_FILE_CAPABILITIES=y 599 + CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 600 + # CONFIG_SECURITY_TOMOYO is not set 601 + CONFIG_CRYPTO=y 602 + 603 + # 604 + # Crypto core or helper 605 + # 606 + # CONFIG_CRYPTO_FIPS is not set 607 + CONFIG_CRYPTO_ALGAPI=y 608 + CONFIG_CRYPTO_ALGAPI2=y 609 + CONFIG_CRYPTO_AEAD=y 610 + CONFIG_CRYPTO_AEAD2=y 611 + CONFIG_CRYPTO_BLKCIPHER=y 612 + CONFIG_CRYPTO_BLKCIPHER2=y 613 + CONFIG_CRYPTO_HASH=y 614 + CONFIG_CRYPTO_HASH2=y 615 + CONFIG_CRYPTO_RNG=y 616 + CONFIG_CRYPTO_RNG2=y 617 + CONFIG_CRYPTO_PCOMP=y 618 + CONFIG_CRYPTO_MANAGER=y 619 + CONFIG_CRYPTO_MANAGER2=y 620 + # CONFIG_CRYPTO_GF128MUL is not set 621 + CONFIG_CRYPTO_NULL=y 622 + CONFIG_CRYPTO_WORKQUEUE=y 623 + CONFIG_CRYPTO_CRYPTD=y 624 + # CONFIG_CRYPTO_AUTHENC is not set 625 + # CONFIG_CRYPTO_TEST is not set 626 + 627 + # 628 + # Authenticated Encryption with Associated Data 629 + # 630 + # CONFIG_CRYPTO_CCM is not set 631 + # CONFIG_CRYPTO_GCM is not set 632 + CONFIG_CRYPTO_SEQIV=y 633 + 634 + # 635 + # Block modes 636 + # 637 + CONFIG_CRYPTO_CBC=y 638 + # CONFIG_CRYPTO_CTR is not set 639 + # CONFIG_CRYPTO_CTS is not set 640 + # CONFIG_CRYPTO_ECB is not set 641 + # CONFIG_CRYPTO_LRW is not set 642 + # CONFIG_CRYPTO_PCBC is not set 643 + # CONFIG_CRYPTO_XTS is not set 644 + 645 + # 646 + # Hash modes 647 + # 648 + # CONFIG_CRYPTO_HMAC is not set 649 + # CONFIG_CRYPTO_XCBC is not set 650 + 651 + # 652 + # Digest 653 + # 654 + CONFIG_CRYPTO_CRC32C=y 655 + CONFIG_CRYPTO_MD4=y 656 + CONFIG_CRYPTO_MD5=y 657 + CONFIG_CRYPTO_MICHAEL_MIC=y 658 + # CONFIG_CRYPTO_RMD128 is not set 659 + # CONFIG_CRYPTO_RMD160 is not set 660 + # CONFIG_CRYPTO_RMD256 is not set 661 + # CONFIG_CRYPTO_RMD320 is not set 662 + # CONFIG_CRYPTO_SHA1 is not set 663 + # CONFIG_CRYPTO_SHA256 is not set 664 + # CONFIG_CRYPTO_SHA512 is not set 665 + # CONFIG_CRYPTO_TGR192 is not set 666 + # CONFIG_CRYPTO_WP512 is not set 667 + 668 + # 669 + # Ciphers 670 + # 671 + # CONFIG_CRYPTO_AES is not set 672 + # CONFIG_CRYPTO_ANUBIS is not set 673 + # CONFIG_CRYPTO_ARC4 is not set 674 + # CONFIG_CRYPTO_BLOWFISH is not set 675 + # CONFIG_CRYPTO_CAMELLIA is not set 676 + # CONFIG_CRYPTO_CAST5 is not set 677 + # CONFIG_CRYPTO_CAST6 is not set 678 + CONFIG_CRYPTO_DES=y 679 + # CONFIG_CRYPTO_FCRYPT is not set 680 + # CONFIG_CRYPTO_KHAZAD is not set 681 + # CONFIG_CRYPTO_SALSA20 is not set 682 + # CONFIG_CRYPTO_SEED is not set 683 + # CONFIG_CRYPTO_SERPENT is not set 684 + # CONFIG_CRYPTO_TEA is not set 685 + # CONFIG_CRYPTO_TWOFISH is not set 686 + 687 + # 688 + # Compression 689 + # 690 + # CONFIG_CRYPTO_DEFLATE is not set 691 + # CONFIG_CRYPTO_ZLIB is not set 692 + # CONFIG_CRYPTO_LZO is not set 693 + 694 + # 695 + # Random Number Generation 696 + # 697 + # CONFIG_CRYPTO_ANSI_CPRNG is not set 698 + # CONFIG_CRYPTO_HW is not set 699 + # CONFIG_BINARY_PRINTF is not set 700 + 701 + # 702 + # Library routines 703 + # 704 + CONFIG_BITREVERSE=y 705 + CONFIG_GENERIC_FIND_LAST_BIT=y 706 + CONFIG_CRC_CCITT=y 707 + CONFIG_CRC16=y 708 + # CONFIG_CRC_T10DIF is not set 709 + # CONFIG_CRC_ITU_T is not set 710 + CONFIG_CRC32=y 711 + # CONFIG_CRC7 is not set 712 + CONFIG_LIBCRC32C=y 713 + CONFIG_ZLIB_INFLATE=y 714 + CONFIG_DECOMPRESS_GZIP=y 715 + CONFIG_HAS_IOMEM=y 716 + CONFIG_HAS_IOPORT=y 717 + CONFIG_NLATTR=y
+3
arch/score/include/asm/Kbuild
··· 1 + include include/asm-generic/Kbuild.asm 2 + 3 + header-y +=
+161
arch/score/include/asm/asmmacro.h
··· 1 + #ifndef _ASM_SCORE_ASMMACRO_H 2 + #define _ASM_SCORE_ASMMACRO_H 3 + 4 + #include <asm/asm-offsets.h> 5 + 6 + #ifdef __ASSEMBLY__ 7 + 8 + .macro SAVE_ALL 9 + mfcr r30, cr0 10 + mv r31, r0 11 + nop 12 + /* if UMs == 1, change stack. */ 13 + slli.c r30, r30, 28 14 + bpl 1f 15 + la r31, kernelsp 16 + lw r31, [r31] 17 + 1: 18 + mv r30, r0 19 + addri r0, r31, -PT_SIZE 20 + 21 + sw r30, [r0, PT_R0] 22 + .set r1 23 + sw r1, [r0, PT_R1] 24 + .set nor1 25 + sw r2, [r0, PT_R2] 26 + sw r3, [r0, PT_R3] 27 + sw r4, [r0, PT_R4] 28 + sw r5, [r0, PT_R5] 29 + sw r6, [r0, PT_R6] 30 + sw r7, [r0, PT_R7] 31 + 32 + sw r8, [r0, PT_R8] 33 + sw r9, [r0, PT_R9] 34 + sw r10, [r0, PT_R10] 35 + sw r11, [r0, PT_R11] 36 + sw r12, [r0, PT_R12] 37 + sw r13, [r0, PT_R13] 38 + sw r14, [r0, PT_R14] 39 + sw r15, [r0, PT_R15] 40 + 41 + sw r16, [r0, PT_R16] 42 + sw r17, [r0, PT_R17] 43 + sw r18, [r0, PT_R18] 44 + sw r19, [r0, PT_R19] 45 + sw r20, [r0, PT_R20] 46 + sw r21, [r0, PT_R21] 47 + sw r22, [r0, PT_R22] 48 + sw r23, [r0, PT_R23] 49 + 50 + sw r24, [r0, PT_R24] 51 + sw r25, [r0, PT_R25] 52 + sw r25, [r0, PT_R25] 53 + sw r26, [r0, PT_R26] 54 + sw r27, [r0, PT_R27] 55 + 56 + sw r28, [r0, PT_R28] 57 + sw r29, [r0, PT_R29] 58 + orri r28, r0, 0x1fff 59 + li r31, 0x00001fff 60 + xor r28, r28, r31 61 + 62 + mfcehl r30, r31 63 + sw r30, [r0, PT_CEH] 64 + sw r31, [r0, PT_CEL] 65 + 66 + mfcr r31, cr0 67 + sw r31, [r0, PT_PSR] 68 + 69 + mfcr r31, cr1 70 + sw r31, [r0, PT_CONDITION] 71 + 72 + mfcr r31, cr2 73 + sw r31, [r0, PT_ECR] 74 + 75 + mfcr r31, cr5 76 + srli r31, r31, 1 77 + slli r31, r31, 1 78 + sw r31, [r0, PT_EPC] 79 + .endm 80 + 81 + .macro RESTORE_ALL_AND_RET 82 + mfcr r30, cr0 83 + srli r30, r30, 1 84 + slli r30, r30, 1 85 + mtcr r30, cr0 86 + nop 87 + nop 88 + nop 89 + nop 90 + nop 91 + 92 + .set r1 93 + ldis r1, 0x00ff 94 + and r30, r30, r1 95 + not r1, r1 96 + lw r31, [r0, PT_PSR] 97 + and r31, r31, r1 98 + .set nor1 99 + or r31, r31, r30 100 + mtcr r31, cr0 101 + nop 102 + nop 103 + nop 104 + nop 105 + nop 106 + 107 + lw r30, [r0, PT_CONDITION] 108 + mtcr r30, cr1 109 + nop 110 + nop 111 + nop 112 + nop 113 + nop 114 + 115 + lw r30, [r0, PT_CEH] 116 + lw r31, [r0, PT_CEL] 117 + mtcehl r30, r31 118 + 119 + .set r1 120 + lw r1, [r0, PT_R1] 121 + .set nor1 122 + lw r2, [r0, PT_R2] 123 + lw r3, [r0, PT_R3] 124 + lw r4, [r0, PT_R4] 125 + lw r5, [r0, PT_R5] 126 + lw r6, [r0, PT_R6] 127 + lw r7, [r0, PT_R7] 128 + 129 + lw r8, [r0, PT_R8] 130 + lw r9, [r0, PT_R9] 131 + lw r10, [r0, PT_R10] 132 + lw r11, [r0, PT_R11] 133 + lw r12, [r0, PT_R12] 134 + lw r13, [r0, PT_R13] 135 + lw r14, [r0, PT_R14] 136 + lw r15, [r0, PT_R15] 137 + 138 + lw r16, [r0, PT_R16] 139 + lw r17, [r0, PT_R17] 140 + lw r18, [r0, PT_R18] 141 + lw r19, [r0, PT_R19] 142 + lw r20, [r0, PT_R20] 143 + lw r21, [r0, PT_R21] 144 + lw r22, [r0, PT_R22] 145 + lw r23, [r0, PT_R23] 146 + 147 + lw r24, [r0, PT_R24] 148 + lw r25, [r0, PT_R25] 149 + lw r26, [r0, PT_R26] 150 + lw r27, [r0, PT_R27] 151 + lw r28, [r0, PT_R28] 152 + lw r29, [r0, PT_R29] 153 + 154 + lw r30, [r0, PT_EPC] 155 + lw r0, [r0, PT_R0] 156 + mtcr r30, cr5 157 + rte 158 + .endm 159 + 160 + #endif /* __ASSEMBLY__ */ 161 + #endif /* _ASM_SCORE_ASMMACRO_H */
+6
arch/score/include/asm/atomic.h
··· 1 + #ifndef _ASM_SCORE_ATOMIC_H 2 + #define _ASM_SCORE_ATOMIC_H 3 + 4 + #include <asm-generic/atomic.h> 5 + 6 + #endif /* _ASM_SCORE_ATOMIC_H */
+4
arch/score/include/asm/auxvec.h
··· 1 + #ifndef _ASM_SCORE_AUXVEC_H 2 + #define _ASM_SCORE_AUXVEC_H 3 + 4 + #endif /* _ASM_SCORE_AUXVEC_H */
+16
arch/score/include/asm/bitops.h
··· 1 + #ifndef _ASM_SCORE_BITOPS_H 2 + #define _ASM_SCORE_BITOPS_H 3 + 4 + #include <asm/byteorder.h> /* swab32 */ 5 + #include <asm/system.h> /* save_flags */ 6 + 7 + /* 8 + * clear_bit() doesn't provide any barrier for the compiler. 9 + */ 10 + #define smp_mb__before_clear_bit() barrier() 11 + #define smp_mb__after_clear_bit() barrier() 12 + 13 + #include <asm-generic/bitops.h> 14 + #include <asm-generic/bitops/__fls.h> 15 + 16 + #endif /* _ASM_SCORE_BITOPS_H */
+6
arch/score/include/asm/bitsperlong.h
··· 1 + #ifndef _ASM_SCORE_BITSPERLONG_H 2 + #define _ASM_SCORE_BITSPERLONG_H 3 + 4 + #include <asm-generic/bitsperlong.h> 5 + 6 + #endif /* _ASM_SCORE_BITSPERLONG_H */
+6
arch/score/include/asm/bug.h
··· 1 + #ifndef _ASM_SCORE_BUG_H 2 + #define _ASM_SCORE_BUG_H 3 + 4 + #include <asm-generic/bug.h> 5 + 6 + #endif /* _ASM_SCORE_BUG_H */
+6
arch/score/include/asm/bugs.h
··· 1 + #ifndef _ASM_SCORE_BUGS_H 2 + #define _ASM_SCORE_BUGS_H 3 + 4 + #include <asm-generic/bugs.h> 5 + 6 + #endif /* _ASM_SCORE_BUGS_H */
+6
arch/score/include/asm/byteorder.h
··· 1 + #ifndef _ASM_SCORE_BYTEORDER_H 2 + #define _ASM_SCORE_BYTEORDER_H 3 + 4 + #include <linux/byteorder/little_endian.h> 5 + 6 + #endif /* _ASM_SCORE_BYTEORDER_H */
+7
arch/score/include/asm/cache.h
··· 1 + #ifndef _ASM_SCORE_CACHE_H 2 + #define _ASM_SCORE_CACHE_H 3 + 4 + #define L1_CACHE_SHIFT 4 5 + #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 6 + 7 + #endif /* _ASM_SCORE_CACHE_H */
+45
arch/score/include/asm/cacheflush.h
··· 1 + #ifndef _ASM_SCORE_CACHEFLUSH_H 2 + #define _ASM_SCORE_CACHEFLUSH_H 3 + 4 + /* Keep includes the same across arches. */ 5 + #include <linux/mm.h> 6 + 7 + extern void flush_cache_all(void); 8 + extern void flush_cache_mm(struct mm_struct *mm); 9 + extern void flush_cache_range(struct vm_area_struct *vma, 10 + unsigned long start, unsigned long end); 11 + extern void flush_cache_page(struct vm_area_struct *vma, 12 + unsigned long page, unsigned long pfn); 13 + extern void flush_cache_sigtramp(unsigned long addr); 14 + extern void flush_icache_all(void); 15 + extern void flush_icache_range(unsigned long start, unsigned long end); 16 + extern void flush_dcache_range(unsigned long start, unsigned long end); 17 + 18 + #define flush_cache_dup_mm(mm) do {} while (0) 19 + #define flush_dcache_page(page) do {} while (0) 20 + #define flush_dcache_mmap_lock(mapping) do {} while (0) 21 + #define flush_dcache_mmap_unlock(mapping) do {} while (0) 22 + #define flush_cache_vmap(start, end) do {} while (0) 23 + #define flush_cache_vunmap(start, end) do {} while (0) 24 + 25 + static inline void flush_icache_page(struct vm_area_struct *vma, 26 + struct page *page) 27 + { 28 + if (vma->vm_flags & VM_EXEC) { 29 + void *v = page_address(page); 30 + flush_icache_range((unsigned long) v, 31 + (unsigned long) v + PAGE_SIZE); 32 + } 33 + } 34 + 35 + #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 36 + memcpy(dst, src, len) 37 + 38 + #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 39 + do { \ 40 + memcpy(dst, src, len); \ 41 + if ((vma->vm_flags & VM_EXEC)) \ 42 + flush_cache_page(vma, vaddr, page_to_pfn(page));\ 43 + } while (0) 44 + 45 + #endif /* _ASM_SCORE_CACHEFLUSH_H */
+235
arch/score/include/asm/checksum.h
··· 1 + #ifndef _ASM_SCORE_CHECKSUM_H 2 + #define _ASM_SCORE_CHECKSUM_H 3 + 4 + #include <linux/in6.h> 5 + #include <asm/uaccess.h> 6 + 7 + /* 8 + * computes the checksum of a memory block at buff, length len, 9 + * and adds in "sum" (32-bit) 10 + * 11 + * returns a 32-bit number suitable for feeding into itself 12 + * or csum_tcpudp_magic 13 + * 14 + * this function must be called with even lengths, except 15 + * for the last fragment, which may be odd 16 + * 17 + * it's best to have buff aligned on a 32-bit boundary 18 + */ 19 + unsigned int csum_partial(const void *buff, int len, __wsum sum); 20 + unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len, 21 + unsigned int sum, int *csum_err); 22 + unsigned int csum_partial_copy(const char *src, char *dst, 23 + int len, unsigned int sum); 24 + 25 + /* 26 + * this is a new version of the above that records errors it finds in *errp, 27 + * but continues and zeros the rest of the buffer. 28 + */ 29 + 30 + /* 31 + * Copy and checksum to user 32 + */ 33 + #define HAVE_CSUM_COPY_USER 34 + static inline 35 + __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, 36 + __wsum sum, int *err_ptr) 37 + { 38 + sum = csum_partial(src, len, sum); 39 + if (copy_to_user(dst, src, len)) { 40 + *err_ptr = -EFAULT; 41 + return (__force __wsum) -1; /* invalid checksum */ 42 + } 43 + return sum; 44 + } 45 + 46 + 47 + #define csum_partial_copy_nocheck csum_partial_copy 48 + /* 49 + * Fold a partial checksum without adding pseudo headers 50 + */ 51 + 52 + static inline __sum16 csum_fold(__wsum sum) 53 + { 54 + /* the while loop is unnecessary really, it's always enough with two 55 + iterations */ 56 + __asm__ __volatile__( 57 + ".set volatile\n\t" 58 + ".set\tr1\n\t" 59 + "slli\tr1,%0, 16\n\t" 60 + "add\t%0,%0, r1\n\t" 61 + "cmp.c\tr1, %0\n\t" 62 + "srli\t%0, %0, 16\n\t" 63 + "bleu\t1f\n\t" 64 + "addi\t%0, 0x1\n\t" 65 + "1:ldi\tr30, 0xffff\n\t" 66 + "xor\t%0, %0, r30\n\t" 67 + "slli\t%0, %0, 16\n\t" 68 + "srli\t%0, %0, 16\n\t" 69 + ".set\tnor1\n\t" 70 + ".set optimize\n\t" 71 + : "=r" (sum) 72 + : "0" (sum)); 73 + return sum; 74 + } 75 + 76 + /* 77 + * This is a version of ip_compute_csum() optimized for IP headers, 78 + * which always checksum on 4 octet boundaries. 79 + * 80 + * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by 81 + * Arnt Gulbrandsen. 82 + */ 83 + static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 84 + { 85 + unsigned int sum; 86 + unsigned long dummy; 87 + 88 + __asm__ __volatile__( 89 + ".set volatile\n\t" 90 + ".set\tnor1\n\t" 91 + "lw\t%0, [%1]\n\t" 92 + "subri\t%2, %2, 4\n\t" 93 + "slli\t%2, %2, 2\n\t" 94 + "lw\t%3, [%1, 4]\n\t" 95 + "add\t%2, %2, %1\n\t" 96 + "add\t%0, %0, %3\n\t" 97 + "cmp.c\t%3, %0\n\t" 98 + "lw\t%3, [%1, 8]\n\t" 99 + "bleu\t1f\n\t" 100 + "addi\t%0, 0x1\n\t" 101 + "1:\n\t" 102 + "add\t%0, %0, %3\n\t" 103 + "cmp.c\t%3, %0\n\t" 104 + "lw\t%3, [%1, 12]\n\t" 105 + "bleu\t1f\n\t" 106 + "addi\t%0, 0x1\n\t" 107 + "1:add\t%0, %0, %3\n\t" 108 + "cmp.c\t%3, %0\n\t" 109 + "bleu\t1f\n\t" 110 + "addi\t%0, 0x1\n" 111 + 112 + "1:\tlw\t%3, [%1, 16]\n\t" 113 + "addi\t%1, 4\n\t" 114 + "add\t%0, %0, %3\n\t" 115 + "cmp.c\t%3, %0\n\t" 116 + "bleu\t2f\n\t" 117 + "addi\t%0, 0x1\n" 118 + "2:cmp.c\t%2, %1\n\t" 119 + "bne\t1b\n\t" 120 + 121 + ".set\tr1\n\t" 122 + ".set optimize\n\t" 123 + : "=&r" (sum), "=&r" (iph), "=&r" (ihl), "=&r" (dummy) 124 + : "1" (iph), "2" (ihl)); 125 + 126 + return csum_fold(sum); 127 + } 128 + 129 + static inline __wsum 130 + csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, 131 + unsigned short proto, __wsum sum) 132 + { 133 + unsigned long tmp = (ntohs(len) << 16) + proto * 256; 134 + __asm__ __volatile__( 135 + ".set volatile\n\t" 136 + "add\t%0, %0, %2\n\t" 137 + "cmp.c\t%2, %0\n\t" 138 + "bleu\t1f\n\t" 139 + "addi\t%0, 0x1\n\t" 140 + "1:\n\t" 141 + "add\t%0, %0, %3\n\t" 142 + "cmp.c\t%3, %0\n\t" 143 + "bleu\t1f\n\t" 144 + "addi\t%0, 0x1\n\t" 145 + "1:\n\t" 146 + "add\t%0, %0, %4\n\t" 147 + "cmp.c\t%4, %0\n\t" 148 + "bleu\t1f\n\t" 149 + "addi\t%0, 0x1\n\t" 150 + "1:\n\t" 151 + ".set optimize\n\t" 152 + : "=r" (sum) 153 + : "0" (daddr), "r"(saddr), 154 + "r" (tmp), 155 + "r" (sum)); 156 + return sum; 157 + } 158 + 159 + /* 160 + * computes the checksum of the TCP/UDP pseudo-header 161 + * returns a 16-bit checksum, already complemented 162 + */ 163 + static inline __sum16 164 + csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, 165 + unsigned short proto, __wsum sum) 166 + { 167 + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); 168 + } 169 + 170 + /* 171 + * this routine is used for miscellaneous IP-like checksums, mainly 172 + * in icmp.c 173 + */ 174 + 175 + static inline unsigned short ip_compute_csum(const void *buff, int len) 176 + { 177 + return csum_fold(csum_partial(buff, len, 0)); 178 + } 179 + 180 + #define _HAVE_ARCH_IPV6_CSUM 181 + static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, 182 + const struct in6_addr *daddr, 183 + __u32 len, unsigned short proto, 184 + __wsum sum) 185 + { 186 + __asm__ __volatile__( 187 + ".set\tnoreorder\t\t\t# csum_ipv6_magic\n\t" 188 + ".set\tnoat\n\t" 189 + "addu\t%0, %5\t\t\t# proto (long in network byte order)\n\t" 190 + "sltu\t$1, %0, %5\n\t" 191 + "addu\t%0, $1\n\t" 192 + "addu\t%0, %6\t\t\t# csum\n\t" 193 + "sltu\t$1, %0, %6\n\t" 194 + "lw\t%1, 0(%2)\t\t\t# four words source address\n\t" 195 + "addu\t%0, $1\n\t" 196 + "addu\t%0, %1\n\t" 197 + "sltu\t$1, %0, %1\n\t" 198 + "lw\t%1, 4(%2)\n\t" 199 + "addu\t%0, $1\n\t" 200 + "addu\t%0, %1\n\t" 201 + "sltu\t$1, %0, %1\n\t" 202 + "lw\t%1, 8(%2)\n\t" 203 + "addu\t%0, $1\n\t" 204 + "addu\t%0, %1\n\t" 205 + "sltu\t$1, %0, %1\n\t" 206 + "lw\t%1, 12(%2)\n\t" 207 + "addu\t%0, $1\n\t" 208 + "addu\t%0, %1\n\t" 209 + "sltu\t$1, %0, %1\n\t" 210 + "lw\t%1, 0(%3)\n\t" 211 + "addu\t%0, $1\n\t" 212 + "addu\t%0, %1\n\t" 213 + "sltu\t$1, %0, %1\n\t" 214 + "lw\t%1, 4(%3)\n\t" 215 + "addu\t%0, $1\n\t" 216 + "addu\t%0, %1\n\t" 217 + "sltu\t$1, %0, %1\n\t" 218 + "lw\t%1, 8(%3)\n\t" 219 + "addu\t%0, $1\n\t" 220 + "addu\t%0, %1\n\t" 221 + "sltu\t$1, %0, %1\n\t" 222 + "lw\t%1, 12(%3)\n\t" 223 + "addu\t%0, $1\n\t" 224 + "addu\t%0, %1\n\t" 225 + "sltu\t$1, %0, %1\n\t" 226 + "addu\t%0, $1\t\t\t# Add final carry\n\t" 227 + ".set\tnoat\n\t" 228 + ".set\tnoreorder" 229 + : "=r" (sum), "=r" (proto) 230 + : "r" (saddr), "r" (daddr), 231 + "0" (htonl(len)), "1" (htonl(proto)), "r" (sum)); 232 + 233 + return csum_fold(sum); 234 + } 235 + #endif /* _ASM_SCORE_CHECKSUM_H */
+6
arch/score/include/asm/cputime.h
··· 1 + #ifndef _ASM_SCORE_CPUTIME_H 2 + #define _ASM_SCORE_CPUTIME_H 3 + 4 + #include <asm-generic/cputime.h> 5 + 6 + #endif /* _ASM_SCORE_CPUTIME_H */
+6
arch/score/include/asm/current.h
··· 1 + #ifndef _ASM_SCORE_CURRENT_H 2 + #define _ASM_SCORE_CURRENT_H 3 + 4 + #include <asm-generic/current.h> 5 + 6 + #endif /* _ASM_SCORE_CURRENT_H */
+26
arch/score/include/asm/delay.h
··· 1 + #ifndef _ASM_SCORE_DELAY_H 2 + #define _ASM_SCORE_DELAY_H 3 + 4 + static inline void __delay(unsigned long loops) 5 + { 6 + /* 3 cycles per loop. */ 7 + __asm__ __volatile__ ( 8 + "1:\tsubi\t%0, 3\n\t" 9 + "cmpz.c\t%0\n\t" 10 + "ble\t1b\n\t" 11 + : "=r" (loops) 12 + : "0" (loops)); 13 + } 14 + 15 + static inline void __udelay(unsigned long usecs) 16 + { 17 + unsigned long loops_per_usec; 18 + 19 + loops_per_usec = (loops_per_jiffy * HZ) / 1000000; 20 + 21 + __delay(usecs * loops_per_usec); 22 + } 23 + 24 + #define udelay(usecs) __udelay(usecs) 25 + 26 + #endif /* _ASM_SCORE_DELAY_H */
+6
arch/score/include/asm/device.h
··· 1 + #ifndef _ASM_SCORE_DEVICE_H 2 + #define _ASM_SCORE_DEVICE_H 3 + 4 + #include <asm-generic/device.h> 5 + 6 + #endif /* _ASM_SCORE_DEVICE_H */
+6
arch/score/include/asm/div64.h
··· 1 + #ifndef _ASM_SCORE_DIV64_H 2 + #define _ASM_SCORE_DIV64_H 3 + 4 + #include <asm-generic/div64.h> 5 + 6 + #endif /* _ASM_SCORE_DIV64_H */
+6
arch/score/include/asm/dma-mapping.h
··· 1 + #ifndef _ASM_SCORE_DMA_MAPPING_H 2 + #define _ASM_SCORE_DMA_MAPPING_H 3 + 4 + #include <asm-generic/dma-mapping-broken.h> 5 + 6 + #endif /* _ASM_SCORE_DMA_MAPPING_H */
+8
arch/score/include/asm/dma.h
··· 1 + #ifndef _ASM_SCORE_DMA_H 2 + #define _ASM_SCORE_DMA_H 3 + 4 + #include <asm/io.h> 5 + 6 + #define MAX_DMA_ADDRESS (0) 7 + 8 + #endif /* _ASM_SCORE_DMA_H */
+99
arch/score/include/asm/elf.h
··· 1 + #ifndef _ASM_SCORE_ELF_H 2 + #define _ASM_SCORE_ELF_H 3 + 4 + /* ELF register definitions */ 5 + #define ELF_NGREG 45 6 + #define ELF_NFPREG 33 7 + #define EM_SCORE7 135 8 + 9 + /* Relocation types. */ 10 + #define R_SCORE_NONE 0 11 + #define R_SCORE_HI16 1 12 + #define R_SCORE_LO16 2 13 + #define R_SCORE_BCMP 3 14 + #define R_SCORE_24 4 15 + #define R_SCORE_PC19 5 16 + #define R_SCORE16_11 6 17 + #define R_SCORE16_PC8 7 18 + #define R_SCORE_ABS32 8 19 + #define R_SCORE_ABS16 9 20 + #define R_SCORE_DUMMY2 10 21 + #define R_SCORE_GP15 11 22 + #define R_SCORE_GNU_VTINHERIT 12 23 + #define R_SCORE_GNU_VTENTRY 13 24 + #define R_SCORE_GOT15 14 25 + #define R_SCORE_GOT_LO16 15 26 + #define R_SCORE_CALL15 16 27 + #define R_SCORE_GPREL32 17 28 + #define R_SCORE_REL32 18 29 + #define R_SCORE_DUMMY_HI16 19 30 + #define R_SCORE_IMM30 20 31 + #define R_SCORE_IMM32 21 32 + 33 + typedef unsigned long elf_greg_t; 34 + typedef elf_greg_t elf_gregset_t[ELF_NGREG]; 35 + 36 + typedef double elf_fpreg_t; 37 + typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; 38 + 39 + #define elf_check_arch(x) ((x)->e_machine == EM_SCORE7) 40 + 41 + /* 42 + * These are used to set parameters in the core dumps. 43 + */ 44 + #define ELF_CLASS ELFCLASS32 45 + 46 + /* 47 + * These are used to set parameters in the core dumps. 48 + */ 49 + #define ELF_DATA ELFDATA2LSB 50 + #define ELF_ARCH EM_SCORE7 51 + 52 + #define SET_PERSONALITY(ex) \ 53 + do { \ 54 + set_personality(PER_LINUX); \ 55 + } while (0) 56 + 57 + struct task_struct; 58 + struct pt_regs; 59 + 60 + #define USE_ELF_CORE_DUMP 61 + #define ELF_EXEC_PAGESIZE PAGE_SIZE 62 + 63 + /* This yields a mask that user programs can use to figure out what 64 + instruction set this cpu supports. This could be done in userspace, 65 + but it's not easy, and we've already done it here. */ 66 + 67 + #define ELF_HWCAP (0) 68 + 69 + /* This yields a string that ld.so will use to load implementation 70 + specific libraries for optimization. This is more specific in 71 + intent than poking at uname or /proc/cpuinfo. 72 + 73 + For the moment, we have only optimizations for the Intel generations, 74 + but that could change... */ 75 + 76 + #define ELF_PLATFORM (NULL) 77 + 78 + #define ELF_PLAT_INIT(_r, load_addr) \ 79 + do { \ 80 + _r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \ 81 + _r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \ 82 + _r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0; \ 83 + _r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \ 84 + _r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \ 85 + _r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \ 86 + _r->regs[25] = _r->regs[26] = _r->regs[27] = _r->regs[28] = 0; \ 87 + _r->regs[30] = _r->regs[31] = 0; \ 88 + } while (0) 89 + 90 + /* This is the location that an ET_DYN program is loaded if exec'ed. Typical 91 + use of this is to invoke "./ld.so someprog" to test out a new version of 92 + the loader. We need to make sure that it is out of the way of the program 93 + that it will "exec", and that there is sufficient room for the brk. */ 94 + 95 + #ifndef ELF_ET_DYN_BASE 96 + #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) 97 + #endif 98 + 99 + #endif /* _ASM_SCORE_ELF_H */
+6
arch/score/include/asm/emergency-restart.h
··· 1 + #ifndef _ASM_SCORE_EMERGENCY_RESTART_H 2 + #define _ASM_SCORE_EMERGENCY_RESTART_H 3 + 4 + #include <asm-generic/emergency-restart.h> 5 + 6 + #endif /* _ASM_SCORE_EMERGENCY_RESTART_H */
+6
arch/score/include/asm/errno.h
··· 1 + #ifndef _ASM_SCORE_ERRNO_H 2 + #define _ASM_SCORE_ERRNO_H 3 + 4 + #include <asm-generic/errno.h> 5 + 6 + #endif /* _ASM_SCORE_ERRNO_H */
+6
arch/score/include/asm/fcntl.h
··· 1 + #ifndef _ASM_SCORE_FCNTL_H 2 + #define _ASM_SCORE_FCNTL_H 3 + 4 + #include <asm-generic/fcntl.h> 5 + 6 + #endif /* _ASM_SCORE_FCNTL_H */
+82
arch/score/include/asm/fixmap.h
··· 1 + #ifndef _ASM_SCORE_FIXMAP_H 2 + #define _ASM_SCORE_FIXMAP_H 3 + 4 + #include <asm/page.h> 5 + 6 + #define PHY_RAM_BASE 0x00000000 7 + #define PHY_IO_BASE 0x10000000 8 + 9 + #define VIRTUAL_RAM_BASE 0xa0000000 10 + #define VIRTUAL_IO_BASE 0xb0000000 11 + 12 + #define RAM_SPACE_SIZE 0x10000000 13 + #define IO_SPACE_SIZE 0x10000000 14 + 15 + /* Kernel unmapped, cached 512MB */ 16 + #define KSEG1 0xa0000000 17 + 18 + /* 19 + * Here we define all the compile-time 'special' virtual 20 + * addresses. The point is to have a constant address at 21 + * compile time, but to set the physical address only 22 + * in the boot process. We allocate these special addresses 23 + * from the end of virtual memory (0xfffff000) backwards. 24 + * Also this lets us do fail-safe vmalloc(), we 25 + * can guarantee that these special addresses and 26 + * vmalloc()-ed addresses never overlap. 27 + * 28 + * these 'compile-time allocated' memory buffers are 29 + * fixed-size 4k pages. (or larger if used with an increment 30 + * highger than 1) use fixmap_set(idx,phys) to associate 31 + * physical memory with fixmap indices. 32 + * 33 + * TLB entries of such buffers will not be flushed across 34 + * task switches. 35 + */ 36 + 37 + /* 38 + * on UP currently we will have no trace of the fixmap mechanizm, 39 + * no page table allocations, etc. This might change in the 40 + * future, say framebuffers for the console driver(s) could be 41 + * fix-mapped? 42 + */ 43 + enum fixed_addresses { 44 + #define FIX_N_COLOURS 8 45 + FIX_CMAP_BEGIN, 46 + FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, 47 + __end_of_fixed_addresses 48 + }; 49 + 50 + /* 51 + * used by vmalloc.c. 52 + * 53 + * Leave one empty page between vmalloc'ed areas and 54 + * the start of the fixmap, and leave one page empty 55 + * at the top of mem.. 56 + */ 57 + #define FIXADDR_TOP ((unsigned long)(long)(int)0xfefe0000) 58 + #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 59 + #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 60 + 61 + #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 62 + #define __virt_to_fix(x) \ 63 + ((FIXADDR_TOP - ((x) & PAGE_MASK)) >> PAGE_SHIFT) 64 + 65 + extern void __this_fixmap_does_not_exist(void); 66 + 67 + /* 68 + * 'index to address' translation. If anyone tries to use the idx 69 + * directly without tranlation, we catch the bug with a NULL-deference 70 + * kernel oops. Illegal ranges of incoming indices are caught too. 71 + */ 72 + static inline unsigned long fix_to_virt(const unsigned int idx) 73 + { 74 + return __fix_to_virt(idx); 75 + } 76 + 77 + static inline unsigned long virt_to_fix(const unsigned long vaddr) 78 + { 79 + return __virt_to_fix(vaddr); 80 + } 81 + 82 + #endif /* _ASM_SCORE_FIXMAP_H */
+4
arch/score/include/asm/ftrace.h
··· 1 + #ifndef _ASM_SCORE_FTRACE_H 2 + #define _ASM_SCORE_FTRACE_H 3 + 4 + #endif /* _ASM_SCORE_FTRACE_H */
+6
arch/score/include/asm/futex.h
··· 1 + #ifndef _ASM_SCORE_FUTEX_H 2 + #define _ASM_SCORE_FUTEX_H 3 + 4 + #include <asm-generic/futex.h> 5 + 6 + #endif /* _ASM_SCORE_FUTEX_H */
+6
arch/score/include/asm/hardirq.h
··· 1 + #ifndef _ASM_SCORE_HARDIRQ_H 2 + #define _ASM_SCORE_HARDIRQ_H 3 + 4 + #include <asm-generic/hardirq.h> 5 + 6 + #endif /* _ASM_SCORE_HARDIRQ_H */
+4
arch/score/include/asm/hw_irq.h
··· 1 + #ifndef _ASM_SCORE_HW_IRQ_H 2 + #define _ASM_SCORE_HW_IRQ_H 3 + 4 + #endif /* _ASM_SCORE_HW_IRQ_H */
+9
arch/score/include/asm/io.h
··· 1 + #ifndef _ASM_SCORE_IO_H 2 + #define _ASM_SCORE_IO_H 3 + 4 + #include <asm-generic/io.h> 5 + 6 + #define virt_to_bus virt_to_phys 7 + #define bus_to_virt phys_to_virt 8 + 9 + #endif /* _ASM_SCORE_IO_H */
+6
arch/score/include/asm/ioctl.h
··· 1 + #ifndef _ASM_SCORE_IOCTL_H 2 + #define _ASM_SCORE_IOCTL_H 3 + 4 + #include <asm-generic/ioctl.h> 5 + 6 + #endif /* _ASM_SCORE_IOCTL_H */
+6
arch/score/include/asm/ioctls.h
··· 1 + #ifndef _ASM_SCORE_IOCTLS_H 2 + #define _ASM_SCORE_IOCTLS_H 3 + 4 + #include <asm-generic/ioctls.h> 5 + 6 + #endif /* _ASM_SCORE_IOCTLS_H */
+6
arch/score/include/asm/ipcbuf.h
··· 1 + #ifndef _ASM_SCORE_IPCBUF_H 2 + #define _ASM_SCORE_IPCBUF_H 3 + 4 + #include <asm-generic/ipcbuf.h> 5 + 6 + #endif /* _ASM_SCORE_IPCBUF_H */
+23
arch/score/include/asm/irq.h
··· 1 + #ifndef _ASM_SCORE_IRQ_H 2 + #define _ASM_SCORE_IRQ_H 3 + 4 + #define EXCEPTION_VECTOR_BASE_ADDR 0xa0000000 5 + #define VECTOR_ADDRESS_OFFSET_MODE4 0 6 + #define VECTOR_ADDRESS_OFFSET_MODE16 1 7 + 8 + #define DEBUG_VECTOR_SIZE (0x4) 9 + #define DEBUG_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x1fc) 10 + 11 + #define GENERAL_VECTOR_SIZE (0x10) 12 + #define GENERAL_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x200) 13 + 14 + #define NR_IRQS 64 15 + #define IRQ_VECTOR_SIZE (0x10) 16 + #define IRQ_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x210) 17 + #define IRQ_VECTOR_END_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x5f0) 18 + 19 + #define irq_canonicalize(irq) (irq) 20 + 21 + #define IRQ_TIMER (7) /* Timer IRQ number of SPCT6600 */ 22 + 23 + #endif /* _ASM_SCORE_IRQ_H */
+6
arch/score/include/asm/irq_regs.h
··· 1 + #ifndef _ASM_SCORE_IRQ_REGS_H 2 + #define _ASM_SCORE_IRQ_REGS_H 3 + 4 + #include <asm-generic/irq_regs.h> 5 + 6 + #endif /* _ASM_SCORE_IRQ_REGS_H */
+111
arch/score/include/asm/irqflags.h
··· 1 + #ifndef _ASM_SCORE_IRQFLAGS_H 2 + #define _ASM_SCORE_IRQFLAGS_H 3 + 4 + #ifndef __ASSEMBLY__ 5 + 6 + #define raw_local_irq_save(x) \ 7 + { \ 8 + __asm__ __volatile__( \ 9 + "mfcr r8, cr0;" \ 10 + "li r9, 0xfffffffe;" \ 11 + "nop;" \ 12 + "mv %0, r8;" \ 13 + "and r8, r8, r9;" \ 14 + "mtcr r8, cr0;" \ 15 + "nop;" \ 16 + "nop;" \ 17 + "nop;" \ 18 + "nop;" \ 19 + "nop;" \ 20 + "ldi r9, 0x1;" \ 21 + "and %0, %0, r9;" \ 22 + : "=r" (x) \ 23 + : \ 24 + : "r8", "r9" \ 25 + ); \ 26 + } 27 + 28 + #define raw_local_irq_restore(x) \ 29 + { \ 30 + __asm__ __volatile__( \ 31 + "mfcr r8, cr0;" \ 32 + "ldi r9, 0x1;" \ 33 + "and %0, %0, r9;" \ 34 + "or r8, r8, %0;" \ 35 + "mtcr r8, cr0;" \ 36 + "nop;" \ 37 + "nop;" \ 38 + "nop;" \ 39 + "nop;" \ 40 + "nop;" \ 41 + : \ 42 + : "r"(x) \ 43 + : "r8", "r9" \ 44 + ); \ 45 + } 46 + 47 + #define raw_local_irq_enable(void) \ 48 + { \ 49 + __asm__ __volatile__( \ 50 + "mfcr\tr8,cr0;" \ 51 + "nop;" \ 52 + "nop;" \ 53 + "ori\tr8,0x1;" \ 54 + "mtcr\tr8,cr0;" \ 55 + "nop;" \ 56 + "nop;" \ 57 + "nop;" \ 58 + "nop;" \ 59 + "nop;" \ 60 + : \ 61 + : \ 62 + : "r8"); \ 63 + } 64 + 65 + #define raw_local_irq_disable(void) \ 66 + { \ 67 + __asm__ __volatile__( \ 68 + "mfcr\tr8,cr0;" \ 69 + "nop;" \ 70 + "nop;" \ 71 + "srli\tr8,r8,1;" \ 72 + "slli\tr8,r8,1;" \ 73 + "mtcr\tr8,cr0;" \ 74 + "nop;" \ 75 + "nop;" \ 76 + "nop;" \ 77 + "nop;" \ 78 + "nop;" \ 79 + : \ 80 + : \ 81 + : "r8"); \ 82 + } 83 + 84 + #define raw_local_save_flags(x) \ 85 + { \ 86 + __asm__ __volatile__( \ 87 + "mfcr r8, cr0;" \ 88 + "nop;" \ 89 + "nop;" \ 90 + "mv %0, r8;" \ 91 + "nop;" \ 92 + "nop;" \ 93 + "nop;" \ 94 + "nop;" \ 95 + "nop;" \ 96 + "ldi r9, 0x1;" \ 97 + "and %0, %0, r9;" \ 98 + : "=r" (x) \ 99 + : \ 100 + : "r8", "r9" \ 101 + ); \ 102 + } 103 + 104 + static inline int raw_irqs_disabled_flags(unsigned long flags) 105 + { 106 + return !(flags & 1); 107 + } 108 + 109 + #endif 110 + 111 + #endif /* _ASM_SCORE_IRQFLAGS_H */
+6
arch/score/include/asm/kdebug.h
··· 1 + #ifndef _ASM_SCORE_KDEBUG_H 2 + #define _ASM_SCORE_KDEBUG_H 3 + 4 + #include <asm-generic/kdebug.h> 5 + 6 + #endif /* _ASM_SCORE_KDEBUG_H */
+6
arch/score/include/asm/kmap_types.h
··· 1 + #ifndef _ASM_SCORE_KMAP_TYPES_H 2 + #define _ASM_SCORE_KMAP_TYPES_H 3 + 4 + #include <asm-generic/kmap_types.h> 5 + 6 + #endif /* _ASM_SCORE_KMAP_TYPES_H */
+4
arch/score/include/asm/linkage.h
··· 1 + #ifndef _ASM_SCORE_LINKAGE_H 2 + #define _ASM_SCORE_LINKAGE_H 3 + 4 + #endif /* _ASM_SCORE_LINKAGE_H */
+6
arch/score/include/asm/local.h
··· 1 + #ifndef _ASM_SCORE_LOCAL_H 2 + #define _ASM_SCORE_LOCAL_H 3 + 4 + #include <asm-generic/local.h> 5 + 6 + #endif /* _ASM_SCORE_LOCAL_H */
+6
arch/score/include/asm/mman.h
··· 1 + #ifndef _ASM_SCORE_MMAN_H 2 + #define _ASM_SCORE_MMAN_H 3 + 4 + #include <asm-generic/mman.h> 5 + 6 + #endif /* _ASM_SCORE_MMAN_H */
+6
arch/score/include/asm/mmu.h
··· 1 + #ifndef _ASM_SCORE_MMU_H 2 + #define _ASM_SCORE_MMU_H 3 + 4 + typedef unsigned long mm_context_t; 5 + 6 + #endif /* _ASM_SCORE_MMU_H */
+113
arch/score/include/asm/mmu_context.h
··· 1 + #ifndef _ASM_SCORE_MMU_CONTEXT_H 2 + #define _ASM_SCORE_MMU_CONTEXT_H 3 + 4 + #include <linux/errno.h> 5 + #include <linux/sched.h> 6 + #include <linux/slab.h> 7 + #include <asm-generic/mm_hooks.h> 8 + 9 + #include <asm/cacheflush.h> 10 + #include <asm/tlbflush.h> 11 + #include <asm/scoreregs.h> 12 + 13 + /* 14 + * For the fast tlb miss handlers, we keep a per cpu array of pointers 15 + * to the current pgd for each processor. Also, the proc. id is stuffed 16 + * into the context register. 17 + */ 18 + extern unsigned long asid_cache; 19 + extern unsigned long pgd_current; 20 + 21 + #define TLBMISS_HANDLER_SETUP_PGD(pgd) (pgd_current = (unsigned long)(pgd)) 22 + 23 + #define TLBMISS_HANDLER_SETUP() \ 24 + do { \ 25 + write_c0_context(0); \ 26 + TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) \ 27 + } while (0) 28 + 29 + /* 30 + * All unused by hardware upper bits will be considered 31 + * as a software asid extension. 32 + */ 33 + #define ASID_VERSION_MASK 0xfffff000 34 + #define ASID_FIRST_VERSION 0x1000 35 + 36 + /* PEVN --------- VPN ---------- --ASID--- -NA- */ 37 + /* binary: 0000 0000 0000 0000 0000 0000 0001 0000 */ 38 + /* binary: 0000 0000 0000 0000 0000 1111 1111 0000 */ 39 + #define ASID_INC 0x10 40 + #define ASID_MASK 0xff0 41 + 42 + static inline void enter_lazy_tlb(struct mm_struct *mm, 43 + struct task_struct *tsk) 44 + {} 45 + 46 + static inline void 47 + get_new_mmu_context(struct mm_struct *mm) 48 + { 49 + unsigned long asid = asid_cache + ASID_INC; 50 + 51 + if (!(asid & ASID_MASK)) { 52 + local_flush_tlb_all(); /* start new asid cycle */ 53 + if (!asid) /* fix version if needed */ 54 + asid = ASID_FIRST_VERSION; 55 + } 56 + 57 + mm->context = asid; 58 + asid_cache = asid; 59 + } 60 + 61 + /* 62 + * Initialize the context related info for a new mm_struct 63 + * instance. 64 + */ 65 + static inline int 66 + init_new_context(struct task_struct *tsk, struct mm_struct *mm) 67 + { 68 + mm->context = 0; 69 + return 0; 70 + } 71 + 72 + static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 73 + struct task_struct *tsk) 74 + { 75 + unsigned long flags; 76 + 77 + local_irq_save(flags); 78 + if ((next->context ^ asid_cache) & ASID_VERSION_MASK) 79 + get_new_mmu_context(next); 80 + 81 + pevn_set(next->context); 82 + TLBMISS_HANDLER_SETUP_PGD(next->pgd); 83 + local_irq_restore(flags); 84 + } 85 + 86 + /* 87 + * Destroy context related info for an mm_struct that is about 88 + * to be put to rest. 89 + */ 90 + static inline void destroy_context(struct mm_struct *mm) 91 + {} 92 + 93 + static inline void 94 + deactivate_mm(struct task_struct *task, struct mm_struct *mm) 95 + {} 96 + 97 + /* 98 + * After we have set current->mm to a new value, this activates 99 + * the context for the new mm so we see the new mappings. 100 + */ 101 + static inline void 102 + activate_mm(struct mm_struct *prev, struct mm_struct *next) 103 + { 104 + unsigned long flags; 105 + 106 + local_irq_save(flags); 107 + get_new_mmu_context(next); 108 + pevn_set(next->context); 109 + TLBMISS_HANDLER_SETUP_PGD(next->pgd); 110 + local_irq_restore(flags); 111 + } 112 + 113 + #endif /* _ASM_SCORE_MMU_CONTEXT_H */
+39
arch/score/include/asm/module.h
··· 1 + #ifndef _ASM_SCORE_MODULE_H 2 + #define _ASM_SCORE_MODULE_H 3 + 4 + #include <linux/list.h> 5 + #include <asm/uaccess.h> 6 + 7 + struct mod_arch_specific { 8 + /* Data Bus Error exception tables */ 9 + struct list_head dbe_list; 10 + const struct exception_table_entry *dbe_start; 11 + const struct exception_table_entry *dbe_end; 12 + }; 13 + 14 + typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */ 15 + 16 + #define Elf_Shdr Elf32_Shdr 17 + #define Elf_Sym Elf32_Sym 18 + #define Elf_Ehdr Elf32_Ehdr 19 + #define Elf_Addr Elf32_Addr 20 + 21 + /* Given an address, look for it in the exception tables. */ 22 + #ifdef CONFIG_MODULES 23 + const struct exception_table_entry *search_module_dbetables(unsigned long addr); 24 + #else 25 + static inline const struct exception_table_entry 26 + *search_module_dbetables(unsigned long addr) 27 + { 28 + return NULL; 29 + } 30 + #endif 31 + 32 + #define MODULE_PROC_FAMILY "SCORE7" 33 + #define MODULE_KERNEL_TYPE "32BIT " 34 + #define MODULE_KERNEL_SMTC "" 35 + 36 + #define MODULE_ARCH_VERMAGIC \ 37 + MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC 38 + 39 + #endif /* _ASM_SCORE_MODULE_H */
+6
arch/score/include/asm/msgbuf.h
··· 1 + #ifndef _ASM_SCORE_MSGBUF_H 2 + #define _ASM_SCORE_MSGBUF_H 3 + 4 + #include <asm-generic/msgbuf.h> 5 + 6 + #endif /* _ASM_SCORE_MSGBUF_H */
+6
arch/score/include/asm/mutex.h
··· 1 + #ifndef _ASM_SCORE_MUTEX_H 2 + #define _ASM_SCORE_MUTEX_H 3 + 4 + #include <asm-generic/mutex-dec.h> 5 + 6 + #endif /* _ASM_SCORE_MUTEX_H */
+92
arch/score/include/asm/page.h
··· 1 + #ifndef _ASM_SCORE_PAGE_H 2 + #define _ASM_SCORE_PAGE_H 3 + 4 + #include <linux/pfn.h> 5 + 6 + /* PAGE_SHIFT determines the page size */ 7 + #define PAGE_SHIFT (12) 8 + #define PAGE_SIZE (1UL << PAGE_SHIFT) 9 + #define PAGE_MASK (~(PAGE_SIZE-1)) 10 + 11 + #ifdef __KERNEL__ 12 + 13 + #ifndef __ASSEMBLY__ 14 + 15 + #define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) 16 + #define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1))) 17 + 18 + /* align addr on a size boundary - adjust address up/down if needed */ 19 + #define _ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((size)-1))) 20 + #define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1))) 21 + 22 + /* align addr on a size boundary - adjust address up if needed */ 23 + #define _ALIGN(addr, size) _ALIGN_UP(addr, size) 24 + 25 + /* 26 + * PAGE_OFFSET -- the first address of the first page of memory. When not 27 + * using MMU this corresponds to the first free page in physical memory (aligned 28 + * on a page boundary). 29 + */ 30 + #define PAGE_OFFSET (0xA0000000UL) 31 + 32 + #define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) 33 + #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) 34 + 35 + #define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) 36 + #define copy_user_page(vto, vfrom, vaddr, topg) \ 37 + memcpy((vto), (vfrom), PAGE_SIZE) 38 + 39 + /* 40 + * These are used to make use of C type-checking.. 41 + */ 42 + 43 + typedef struct { unsigned long pte; } pte_t; /* page table entry */ 44 + typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */ 45 + typedef struct { unsigned long pgprot; } pgprot_t; 46 + typedef struct page *pgtable_t; 47 + 48 + #define pte_val(x) ((x).pte) 49 + #define pgd_val(x) ((x).pgd) 50 + #define pgprot_val(x) ((x).pgprot) 51 + 52 + #define __pte(x) ((pte_t) { (x) }) 53 + #define __pgd(x) ((pgd_t) { (x) }) 54 + #define __pgprot(x) ((pgprot_t) { (x) }) 55 + 56 + extern unsigned long max_low_pfn; 57 + extern unsigned long min_low_pfn; 58 + extern unsigned long max_pfn; 59 + 60 + #define __pa(vaddr) ((unsigned long) (vaddr)) 61 + #define __va(paddr) ((void *) (paddr)) 62 + 63 + #define phys_to_pfn(phys) (PFN_DOWN(phys)) 64 + #define pfn_to_phys(pfn) (PFN_PHYS(pfn)) 65 + 66 + #define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr)))) 67 + #define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) 68 + 69 + #define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) 70 + #define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) 71 + 72 + #define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) 73 + #define page_to_bus(page) (page_to_phys(page)) 74 + #define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) 75 + 76 + #define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_mapnr) 77 + 78 + #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) 79 + 80 + #endif /* __ASSEMBLY__ */ 81 + 82 + #define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) 83 + 84 + #endif /* __KERNEL__ */ 85 + 86 + #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 87 + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 88 + 89 + #include <asm-generic/memory_model.h> 90 + #include <asm-generic/getorder.h> 91 + 92 + #endif /* _ASM_SCORE_PAGE_H */
+6
arch/score/include/asm/param.h
··· 1 + #ifndef _ASM_SCORE_PARAM_H 2 + #define _ASM_SCORE_PARAM_H 3 + 4 + #include <asm-generic/param.h> 5 + 6 + #endif /* _ASM_SCORE_PARAM_H */
+4
arch/score/include/asm/pci.h
··· 1 + #ifndef _ASM_SCORE_PCI_H 2 + #define _ASM_SCORE_PCI_H 3 + 4 + #endif /* _ASM_SCORE_PCI_H */
+6
arch/score/include/asm/percpu.h
··· 1 + #ifndef _ASM_SCORE_PERCPU_H 2 + #define _ASM_SCORE_PERCPU_H 3 + 4 + #include <asm-generic/percpu.h> 5 + 6 + #endif /* _ASM_SCORE_PERCPU_H */
+83
arch/score/include/asm/pgalloc.h
··· 1 + #ifndef _ASM_SCORE_PGALLOC_H 2 + #define _ASM_SCORE_PGALLOC_H 3 + 4 + #include <linux/mm.h> 5 + 6 + static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 7 + pte_t *pte) 8 + { 9 + set_pmd(pmd, __pmd((unsigned long)pte)); 10 + } 11 + 12 + static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 13 + pgtable_t pte) 14 + { 15 + set_pmd(pmd, __pmd((unsigned long)page_address(pte))); 16 + } 17 + 18 + #define pmd_pgtable(pmd) pmd_page(pmd) 19 + 20 + static inline pgd_t *pgd_alloc(struct mm_struct *mm) 21 + { 22 + pgd_t *ret, *init; 23 + 24 + ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); 25 + if (ret) { 26 + init = pgd_offset(&init_mm, 0UL); 27 + pgd_init((unsigned long)ret); 28 + memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, 29 + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 30 + } 31 + 32 + return ret; 33 + } 34 + 35 + static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 36 + { 37 + free_pages((unsigned long)pgd, PGD_ORDER); 38 + } 39 + 40 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 41 + unsigned long address) 42 + { 43 + pte_t *pte; 44 + 45 + pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 46 + PTE_ORDER); 47 + 48 + return pte; 49 + } 50 + 51 + static inline struct page *pte_alloc_one(struct mm_struct *mm, 52 + unsigned long address) 53 + { 54 + struct page *pte; 55 + 56 + pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); 57 + if (pte) { 58 + clear_highpage(pte); 59 + pgtable_page_ctor(pte); 60 + } 61 + return pte; 62 + } 63 + 64 + static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 65 + { 66 + free_pages((unsigned long)pte, PTE_ORDER); 67 + } 68 + 69 + static inline void pte_free(struct mm_struct *mm, pgtable_t pte) 70 + { 71 + pgtable_page_dtor(pte); 72 + __free_pages(pte, PTE_ORDER); 73 + } 74 + 75 + #define __pte_free_tlb(tlb, pte) \ 76 + do { \ 77 + pgtable_page_dtor(pte); \ 78 + tlb_remove_page((tlb), pte); \ 79 + } while (0) 80 + 81 + #define check_pgt_cache() do {} while (0) 82 + 83 + #endif /* _ASM_SCORE_PGALLOC_H */
+25
arch/score/include/asm/pgtable-bits.h
··· 1 + #ifndef _ASM_SCORE_PGTABLE_BITS_H 2 + #define _ASM_SCORE_PGTABLE_BITS_H 3 + 4 + #define _PAGE_ACCESSED (1<<5) /* implemented in software */ 5 + #define _PAGE_READ (1<<6) /* implemented in software */ 6 + #define _PAGE_WRITE (1<<7) /* implemented in software */ 7 + #define _PAGE_PRESENT (1<<9) /* implemented in software */ 8 + #define _PAGE_MODIFIED (1<<10) /* implemented in software */ 9 + #define _PAGE_FILE (1<<10) 10 + 11 + #define _PAGE_GLOBAL (1<<0) 12 + #define _PAGE_VALID (1<<1) 13 + #define _PAGE_SILENT_READ (1<<1) /* synonym */ 14 + #define _PAGE_DIRTY (1<<2) /* Write bit */ 15 + #define _PAGE_SILENT_WRITE (1<<2) 16 + #define _PAGE_CACHE (1<<3) /* cache */ 17 + #define _CACHE_MASK (1<<3) 18 + #define _PAGE_BUFFERABLE (1<<4) /*Fallow Spec. */ 19 + 20 + #define __READABLE (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED) 21 + #define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) 22 + #define _PAGE_CHG_MASK \ 23 + (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_CACHE) 24 + 25 + #endif /* _ASM_SCORE_PGTABLE_BITS_H */
+276
arch/score/include/asm/pgtable.h
··· 1 + #ifndef _ASM_SCORE_PGTABLE_H 2 + #define _ASM_SCORE_PGTABLE_H 3 + 4 + #include <linux/const.h> 5 + #include <asm-generic/pgtable-nopmd.h> 6 + 7 + #include <asm/fixmap.h> 8 + #include <asm/setup.h> 9 + #include <asm/pgtable-bits.h> 10 + 11 + extern void load_pgd(unsigned long pg_dir); 12 + extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)]; 13 + 14 + /* PGDIR_SHIFT determines what a third-level page table entry can map */ 15 + #define PGDIR_SHIFT 22 16 + #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) 17 + #define PGDIR_MASK (~(PGDIR_SIZE - 1)) 18 + 19 + /* 20 + * Entries per page directory level: we use two-level, so 21 + * we don't really have any PUD/PMD directory physically. 22 + */ 23 + #define PGD_ORDER 0 24 + #define PTE_ORDER 0 25 + 26 + #define PTRS_PER_PGD 1024 27 + #define PTRS_PER_PTE 1024 28 + 29 + #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) 30 + #define FIRST_USER_ADDRESS 0 31 + 32 + #define VMALLOC_START (0xc0000000UL) 33 + 34 + #define PKMAP_BASE (0xfd000000UL) 35 + 36 + #define VMALLOC_END (FIXADDR_START - 2*PAGE_SIZE) 37 + 38 + #define pte_ERROR(e) \ 39 + printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \ 40 + __FILE__, __LINE__, pte_val(e)) 41 + #define pgd_ERROR(e) \ 42 + printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \ 43 + __FILE__, __LINE__, pgd_val(e)) 44 + 45 + /* 46 + * Empty pgd/pmd entries point to the invalid_pte_table. 47 + */ 48 + static inline int pmd_none(pmd_t pmd) 49 + { 50 + return pmd_val(pmd) == (unsigned long) invalid_pte_table; 51 + } 52 + 53 + #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) 54 + 55 + static inline int pmd_present(pmd_t pmd) 56 + { 57 + return pmd_val(pmd) != (unsigned long) invalid_pte_table; 58 + } 59 + 60 + static inline void pmd_clear(pmd_t *pmdp) 61 + { 62 + pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); 63 + } 64 + 65 + #define pte_page(x) pfn_to_page(pte_pfn(x)) 66 + #define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT)) 67 + #define pfn_pte(pfn, prot) \ 68 + __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 69 + 70 + #define __pgd_offset(address) pgd_index(address) 71 + #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 72 + #define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 73 + 74 + /* to find an entry in a kernel page-table-directory */ 75 + #define pgd_offset_k(address) pgd_offset(&init_mm, address) 76 + #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 77 + 78 + /* to find an entry in a page-table-directory */ 79 + #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) 80 + 81 + /* Find an entry in the third-level page table.. */ 82 + #define __pte_offset(address) \ 83 + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 84 + #define pte_offset(dir, address) \ 85 + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) 86 + #define pte_offset_kernel(dir, address) \ 87 + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) 88 + 89 + #define pte_offset_map(dir, address) \ 90 + ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) 91 + #define pte_offset_map_nested(dir, address) \ 92 + ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) 93 + #define pte_unmap(pte) ((void)(pte)) 94 + #define pte_unmap_nested(pte) ((void)(pte)) 95 + 96 + /* 97 + * Bits 9(_PAGE_PRESENT) and 10(_PAGE_FILE)are taken, 98 + * split up 30 bits of offset into this range: 99 + */ 100 + #define PTE_FILE_MAX_BITS 30 101 + #define pte_to_pgoff(_pte) \ 102 + (((_pte).pte & 0x1ff) | (((_pte).pte >> 11) << 9)) 103 + #define pgoff_to_pte(off) \ 104 + ((pte_t) {((off) & 0x1ff) | (((off) >> 9) << 11) | _PAGE_FILE}) 105 + #define __pte_to_swp_entry(pte) \ 106 + ((swp_entry_t) { pte_val(pte)}) 107 + #define __swp_entry_to_pte(x) ((pte_t) {(x).val}) 108 + 109 + #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) 110 + #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 111 + static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 112 + 113 + #define set_pte(pteptr, pteval) (*(pteptr) = pteval) 114 + #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) 115 + #define pte_clear(mm, addr, xp) \ 116 + do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) 117 + 118 + #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 119 + remap_pfn_range(vma, vaddr, pfn, size, prot) 120 + 121 + /* 122 + * The "pgd_xxx()" functions here are trivial for a folded two-level 123 + * setup: the pgd is never bad, and a pmd always exists (as it's folded 124 + * into the pgd entry) 125 + */ 126 + #define pgd_present(pgd) (1) 127 + #define pgd_none(pgd) (0) 128 + #define pgd_bad(pgd) (0) 129 + #define pgd_clear(pgdp) do { } while (0) 130 + 131 + #define kern_addr_valid(addr) (1) 132 + #define pmd_offset(a, b) ((void *) 0) 133 + #define pmd_page_vaddr(pmd) pmd_val(pmd) 134 + 135 + #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) 136 + #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 137 + 138 + #define pud_offset(pgd, address) ((pud_t *) pgd) 139 + 140 + #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_CACHE) 141 + #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 142 + _PAGE_CACHE) 143 + #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHE) 144 + #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHE) 145 + #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 146 + _PAGE_GLOBAL | _PAGE_CACHE) 147 + #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ 148 + __WRITEABLE | _PAGE_GLOBAL & ~_PAGE_CACHE) 149 + 150 + #define __P000 PAGE_NONE 151 + #define __P001 PAGE_READONLY 152 + #define __P010 PAGE_COPY 153 + #define __P011 PAGE_COPY 154 + #define __P100 PAGE_READONLY 155 + #define __P101 PAGE_READONLY 156 + #define __P110 PAGE_COPY 157 + #define __P111 PAGE_COPY 158 + 159 + #define __S000 PAGE_NONE 160 + #define __S001 PAGE_READONLY 161 + #define __S010 PAGE_SHARED 162 + #define __S011 PAGE_SHARED 163 + #define __S100 PAGE_READONLY 164 + #define __S101 PAGE_READONLY 165 + #define __S110 PAGE_SHARED 166 + #define __S111 PAGE_SHARED 167 + 168 + #define pgprot_noncached(x) (x) 169 + 170 + #define __swp_type(x) (0) 171 + #define __swp_offset(x) (0) 172 + #define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) 173 + 174 + #define ZERO_PAGE(vaddr) ({ BUG(); NULL; }) 175 + 176 + #define swapper_pg_dir ((pgd_t *) NULL) 177 + 178 + #define pgtable_cache_init() do {} while (0) 179 + 180 + #define arch_enter_lazy_cpu_mode() do {} while (0) 181 + 182 + static inline int pte_write(pte_t pte) 183 + { 184 + return pte_val(pte) & _PAGE_WRITE; 185 + } 186 + 187 + static inline int pte_dirty(pte_t pte) 188 + { 189 + return pte_val(pte) & _PAGE_MODIFIED; 190 + } 191 + 192 + static inline int pte_young(pte_t pte) 193 + { 194 + return pte_val(pte) & _PAGE_ACCESSED; 195 + } 196 + 197 + static inline int pte_file(pte_t pte) 198 + { 199 + return pte_val(pte) & _PAGE_FILE; 200 + } 201 + 202 + #define pte_special(pte) (0) 203 + 204 + static inline pte_t pte_wrprotect(pte_t pte) 205 + { 206 + pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 207 + return pte; 208 + } 209 + 210 + static inline pte_t pte_mkclean(pte_t pte) 211 + { 212 + pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); 213 + return pte; 214 + } 215 + 216 + static inline pte_t pte_mkold(pte_t pte) 217 + { 218 + pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); 219 + return pte; 220 + } 221 + 222 + static inline pte_t pte_mkwrite(pte_t pte) 223 + { 224 + pte_val(pte) |= _PAGE_WRITE; 225 + if (pte_val(pte) & _PAGE_MODIFIED) 226 + pte_val(pte) |= _PAGE_SILENT_WRITE; 227 + return pte; 228 + } 229 + 230 + static inline pte_t pte_mkdirty(pte_t pte) 231 + { 232 + pte_val(pte) |= _PAGE_MODIFIED; 233 + if (pte_val(pte) & _PAGE_WRITE) 234 + pte_val(pte) |= _PAGE_SILENT_WRITE; 235 + return pte; 236 + } 237 + 238 + static inline pte_t pte_mkyoung(pte_t pte) 239 + { 240 + pte_val(pte) |= _PAGE_ACCESSED; 241 + if (pte_val(pte) & _PAGE_READ) 242 + pte_val(pte) |= _PAGE_SILENT_READ; 243 + return pte; 244 + } 245 + 246 + #define set_pmd(pmdptr, pmdval) \ 247 + do { *(pmdptr) = (pmdval); } while (0) 248 + #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 249 + 250 + extern unsigned long pgd_current; 251 + extern void paging_init(void); 252 + 253 + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 254 + { 255 + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); 256 + } 257 + 258 + extern void __update_tlb(struct vm_area_struct *vma, 259 + unsigned long address, pte_t pte); 260 + extern void __update_cache(struct vm_area_struct *vma, 261 + unsigned long address, pte_t pte); 262 + 263 + static inline void update_mmu_cache(struct vm_area_struct *vma, 264 + unsigned long address, pte_t pte) 265 + { 266 + __update_tlb(vma, address, pte); 267 + __update_cache(vma, address, pte); 268 + } 269 + 270 + #ifndef __ASSEMBLY__ 271 + #include <asm-generic/pgtable.h> 272 + 273 + void setup_memory(void); 274 + #endif /* __ASSEMBLY__ */ 275 + 276 + #endif /* _ASM_SCORE_PGTABLE_H */
+6
arch/score/include/asm/poll.h
··· 1 + #ifndef _ASM_SCORE_POLL_H 2 + #define _ASM_SCORE_POLL_H 3 + 4 + #include <asm-generic/poll.h> 5 + 6 + #endif /* _ASM_SCORE_POLL_H */
+6
arch/score/include/asm/posix_types.h
··· 1 + #ifndef _ASM_SCORE_POSIX_TYPES_H 2 + #define _ASM_SCORE_POSIX_TYPES_H 3 + 4 + #include <asm-generic/posix_types.h> 5 + 6 + #endif /* _ASM_SCORE_POSIX_TYPES_H */
+106
arch/score/include/asm/processor.h
··· 1 + #ifndef _ASM_SCORE_PROCESSOR_H 2 + #define _ASM_SCORE_PROCESSOR_H 3 + 4 + #include <linux/cpumask.h> 5 + #include <linux/threads.h> 6 + 7 + #include <asm/segment.h> 8 + 9 + struct task_struct; 10 + 11 + /* 12 + * System setup and hardware flags.. 13 + */ 14 + extern void (*cpu_wait)(void); 15 + 16 + extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 17 + extern unsigned long thread_saved_pc(struct task_struct *tsk); 18 + extern void start_thread(struct pt_regs *regs, 19 + unsigned long pc, unsigned long sp); 20 + extern unsigned long get_wchan(struct task_struct *p); 21 + 22 + /* 23 + * Return current * instruction pointer ("program counter"). 24 + */ 25 + #define current_text_addr() ({ __label__ _l; _l: &&_l; }) 26 + 27 + #define cpu_relax() barrier() 28 + #define release_thread(thread) do {} while (0) 29 + #define prepare_to_copy(tsk) do {} while (0) 30 + 31 + /* 32 + * User space process size: 2GB. This is hardcoded into a few places, 33 + * so don't change it unless you know what you are doing. 34 + */ 35 + #define TASK_SIZE 0x7fff8000UL 36 + 37 + /* 38 + * This decides where the kernel will search for a free chunk of vm 39 + * space during mmap's. 40 + */ 41 + #define TASK_UNMAPPED_BASE ((TASK_SIZE / 3) & ~(PAGE_SIZE)) 42 + 43 + #ifdef __KERNEL__ 44 + #define STACK_TOP TASK_SIZE 45 + #define STACK_TOP_MAX TASK_SIZE 46 + #endif 47 + 48 + /* 49 + * If you change thread_struct remember to change the #defines below too! 50 + */ 51 + struct thread_struct { 52 + unsigned long reg0, reg2, reg3; 53 + unsigned long reg12, reg13, reg14, reg15, reg16; 54 + unsigned long reg17, reg18, reg19, reg20, reg21; 55 + 56 + unsigned long cp0_psr; 57 + unsigned long cp0_ema; /* Last user fault */ 58 + unsigned long cp0_badvaddr; /* Last user fault */ 59 + unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */ 60 + unsigned long error_code; 61 + unsigned long trap_no; 62 + 63 + unsigned long mflags; 64 + unsigned long reg29; 65 + 66 + unsigned long single_step; 67 + unsigned long ss_nextcnt; 68 + 69 + unsigned long insn1_type; 70 + unsigned long addr1; 71 + unsigned long insn1; 72 + 73 + unsigned long insn2_type; 74 + unsigned long addr2; 75 + unsigned long insn2; 76 + 77 + mm_segment_t current_ds; 78 + }; 79 + 80 + #define INIT_THREAD { \ 81 + .reg0 = 0, \ 82 + .reg2 = 0, \ 83 + .reg3 = 0, \ 84 + .reg12 = 0, \ 85 + .reg13 = 0, \ 86 + .reg14 = 0, \ 87 + .reg15 = 0, \ 88 + .reg16 = 0, \ 89 + .reg17 = 0, \ 90 + .reg18 = 0, \ 91 + .reg19 = 0, \ 92 + .reg20 = 0, \ 93 + .reg21 = 0, \ 94 + .cp0_psr = 0, \ 95 + .error_code = 0, \ 96 + .trap_no = 0, \ 97 + } 98 + 99 + #define kstk_tos(tsk) \ 100 + ((unsigned long)task_stack_page(tsk) + THREAD_SIZE - 32) 101 + #define task_pt_regs(tsk) ((struct pt_regs *)kstk_tos(tsk) - 1) 102 + 103 + #define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc) 104 + #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29]) 105 + 106 + #endif /* _ASM_SCORE_PROCESSOR_H */
+95
arch/score/include/asm/ptrace.h
··· 1 + #ifndef _ASM_SCORE_PTRACE_H 2 + #define _ASM_SCORE_PTRACE_H 3 + 4 + #define PTRACE_GETREGS 12 5 + #define PTRACE_SETREGS 13 6 + 7 + #define PC 32 8 + #define CONDITION 33 9 + #define ECR 34 10 + #define EMA 35 11 + #define CEH 36 12 + #define CEL 37 13 + #define COUNTER 38 14 + #define LDCR 39 15 + #define STCR 40 16 + #define PSR 41 17 + 18 + #define SINGLESTEP16_INSN 0x7006 19 + #define SINGLESTEP32_INSN 0x840C8000 20 + #define BREAKPOINT16_INSN 0x7002 /* work on SPG300 */ 21 + #define BREAKPOINT32_INSN 0x84048000 /* work on SPG300 */ 22 + 23 + /* Define instruction mask */ 24 + #define INSN32_MASK 0x80008000 25 + 26 + #define J32 0x88008000 /* 1_00010_0000000000_1_000000000000000 */ 27 + #define J32M 0xFC008000 /* 1_11111_0000000000_1_000000000000000 */ 28 + 29 + #define B32 0x90008000 /* 1_00100_0000000000_1_000000000000000 */ 30 + #define B32M 0xFC008000 31 + #define BL32 0x90008001 /* 1_00100_0000000000_1_000000000000001 */ 32 + #define BL32M B32 33 + #define BR32 0x80008008 /* 1_00000_0000000000_1_00000000_000100_0 */ 34 + #define BR32M 0xFFE0807E 35 + #define BRL32 0x80008009 /* 1_00000_0000000000_1_00000000_000100_1 */ 36 + #define BRL32M BR32M 37 + 38 + #define B32_SET (J32 | B32 | BL32 | BR32 | BRL32) 39 + 40 + #define J16 0x3000 /* 0_011_....... */ 41 + #define J16M 0xF000 42 + #define B16 0x4000 /* 0_100_....... */ 43 + #define B16M 0xF000 44 + #define BR16 0x0004 /* 0_000.......0100 */ 45 + #define BR16M 0xF00F 46 + #define B16_SET (J16 | B16 | BR16) 47 + 48 + 49 + /* 50 + * This struct defines the way the registers are stored on the stack during a 51 + * system call/exception. As usual the registers k0/k1 aren't being saved. 52 + */ 53 + struct pt_regs { 54 + unsigned long pad0[6]; 55 + unsigned long orig_r4; 56 + unsigned long orig_r7; 57 + unsigned long regs[32]; 58 + 59 + unsigned long cel; 60 + unsigned long ceh; 61 + 62 + unsigned long sr0; /* cnt */ 63 + unsigned long sr1; /* lcr */ 64 + unsigned long sr2; /* scr */ 65 + 66 + unsigned long cp0_epc; 67 + unsigned long cp0_ema; 68 + unsigned long cp0_psr; 69 + unsigned long cp0_ecr; 70 + unsigned long cp0_condition; 71 + 72 + long is_syscall; 73 + }; 74 + 75 + #ifdef __KERNEL__ 76 + 77 + /* 78 + * Does the process account for user or for system time? 79 + */ 80 + #define user_mode(regs) ((regs->cp0_psr & 8) == 8) 81 + 82 + #define instruction_pointer(regs) ((unsigned long)(regs)->cp0_epc) 83 + #define profile_pc(regs) instruction_pointer(regs) 84 + 85 + extern void do_syscall_trace(struct pt_regs *regs, int entryexit); 86 + extern int read_tsk_long(struct task_struct *, unsigned long, unsigned long *); 87 + extern int read_tsk_short(struct task_struct *, unsigned long, 88 + unsigned short *); 89 + 90 + #define arch_has_single_step() (1) 91 + extern void user_enable_single_step(struct task_struct *); 92 + extern void user_disable_single_step(struct task_struct *); 93 + #endif /* __KERNEL__ */ 94 + 95 + #endif /* _ASM_SCORE_PTRACE_H */
+6
arch/score/include/asm/resource.h
··· 1 + #ifndef _ASM_SCORE_RESOURCE_H 2 + #define _ASM_SCORE_RESOURCE_H 3 + 4 + #include <asm-generic/resource.h> 5 + 6 + #endif /* _ASM_SCORE_RESOURCE_H */
+6
arch/score/include/asm/scatterlist.h
··· 1 + #ifndef _ASM_SCORE_SCATTERLIST_H 2 + #define _ASM_SCORE_SCATTERLIST_H 3 + 4 + #include <asm-generic/scatterlist.h> 5 + 6 + #endif /* _ASM_SCORE_SCATTERLIST_H */
+51
arch/score/include/asm/scoreregs.h
··· 1 + #ifndef _ASM_SCORE_SCOREREGS_H 2 + #define _ASM_SCORE_SCOREREGS_H 3 + 4 + #include <linux/linkage.h> 5 + 6 + /* TIMER register */ 7 + #define TIME0BASE 0x96080000 8 + #define P_TIMER0_CTRL (TIME0BASE + 0x00) 9 + #define P_TIMER0_CPP_CTRL (TIME0BASE + 0x04) 10 + #define P_TIMER0_PRELOAD (TIME0BASE + 0x08) 11 + #define P_TIMER0_CPP_REG (TIME0BASE + 0x0C) 12 + #define P_TIMER0_UPCNT (TIME0BASE + 0x10) 13 + 14 + /* Timer Controller Register */ 15 + /* bit 0 Timer enable */ 16 + #define TMR_DISABLE 0x0000 17 + #define TMR_ENABLE 0x0001 18 + 19 + /* bit 1 Interrupt enable */ 20 + #define TMR_IE_DISABLE 0x0000 21 + #define TMR_IE_ENABLE 0x0002 22 + 23 + /* bit 2 Output enable */ 24 + #define TMR_OE_DISABLE 0x0004 25 + #define TMR_OE_ENABLE 0x0000 26 + 27 + /* bit4 Up/Down counting selection */ 28 + #define TMR_UD_DOWN 0x0000 29 + #define TMR_UD_UP 0x0010 30 + 31 + /* bit5 Up/Down counting control selection */ 32 + #define TMR_UDS_UD 0x0000 33 + #define TMR_UDS_EXTUD 0x0020 34 + 35 + /* bit6 Time output mode */ 36 + #define TMR_OM_TOGGLE 0x0000 37 + #define TMR_OM_PILSE 0x0040 38 + 39 + /* bit 8..9 External input active edge selection */ 40 + #define TMR_ES_PE 0x0000 41 + #define TMR_ES_NE 0x0100 42 + #define TMR_ES_BOTH 0x0200 43 + 44 + /* bit 10..11 Operating mode */ 45 + #define TMR_M_FREE 0x0000 /* free running timer mode */ 46 + #define TMR_M_PERIODIC 0x0400 /* periodic timer mode */ 47 + #define TMR_M_FC 0x0800 /* free running counter mode */ 48 + #define TMR_M_PC 0x0c00 /* periodic counter mode */ 49 + 50 + #define SYSTEM_CLOCK (27*1000000/4) /* 27 MHz */ 51 + #endif /* _ASM_SCORE_SCOREREGS_H */
+6
arch/score/include/asm/sections.h
··· 1 + #ifndef _ASM_SCORE_SECTIONS_H 2 + #define _ASM_SCORE_SECTIONS_H 3 + 4 + #include <asm-generic/sections.h> 5 + 6 + #endif /* _ASM_SCORE_SECTIONS_H */
+21
arch/score/include/asm/segment.h
··· 1 + #ifndef _ASM_SCORE_SEGMENT_H 2 + #define _ASM_SCORE_SEGMENT_H 3 + 4 + #ifndef __ASSEMBLY__ 5 + 6 + typedef struct { 7 + unsigned long seg; 8 + } mm_segment_t; 9 + 10 + #define KERNEL_DS ((mm_segment_t){0}) 11 + #define USER_DS KERNEL_DS 12 + 13 + # define get_ds() (KERNEL_DS) 14 + # define get_fs() (current_thread_info()->addr_limit) 15 + # define set_fs(x) \ 16 + do { current_thread_info()->addr_limit = (x); } while (0) 17 + 18 + # define segment_eq(a, b) ((a).seg == (b).seg) 19 + 20 + # endif /* __ASSEMBLY__ */ 21 + #endif /* _ASM_SCORE_SEGMENT_H */
+6
arch/score/include/asm/sembuf.h
··· 1 + #ifndef _ASM_SCORE_SEMBUF_H 2 + #define _ASM_SCORE_SEMBUF_H 3 + 4 + #include <asm-generic/sembuf.h> 5 + 6 + #endif /* _ASM_SCORE_SEMBUF_H */
+40
arch/score/include/asm/setup.h
··· 1 + #ifndef _ASM_SCORE_SETUP_H 2 + #define _ASM_SCORE_SETUP_H 3 + 4 + #define COMMAND_LINE_SIZE 256 5 + #define MEM_SIZE 0x2000000 6 + 7 + #ifdef __KERNEL__ 8 + 9 + extern void pagetable_init(void); 10 + extern void pgd_init(unsigned long page); 11 + 12 + extern void setup_early_printk(void); 13 + extern void cpu_cache_init(void); 14 + extern void tlb_init(void); 15 + 16 + extern void handle_nmi(void); 17 + extern void handle_adelinsn(void); 18 + extern void handle_adedata(void); 19 + extern void handle_ibe(void); 20 + extern void handle_pel(void); 21 + extern void handle_sys(void); 22 + extern void handle_ccu(void); 23 + extern void handle_ri(void); 24 + extern void handle_tr(void); 25 + extern void handle_ades(void); 26 + extern void handle_cee(void); 27 + extern void handle_cpe(void); 28 + extern void handle_dve(void); 29 + extern void handle_dbe(void); 30 + extern void handle_reserved(void); 31 + extern void handle_tlb_refill(void); 32 + extern void handle_tlb_invaild(void); 33 + extern void handle_mod(void); 34 + extern void debug_exception_vector(void); 35 + extern void general_exception_vector(void); 36 + extern void interrupt_exception_vector(void); 37 + 38 + #endif /* __KERNEL__ */ 39 + 40 + #endif /* _ASM_SCORE_SETUP_H */
+6
arch/score/include/asm/shmbuf.h
··· 1 + #ifndef _ASM_SCORE_SHMBUF_H 2 + #define _ASM_SCORE_SHMBUF_H 3 + 4 + #include <asm-generic/shmbuf.h> 5 + 6 + #endif /* _ASM_SCORE_SHMBUF_H */
+6
arch/score/include/asm/shmparam.h
··· 1 + #ifndef _ASM_SCORE_SHMPARAM_H 2 + #define _ASM_SCORE_SHMPARAM_H 3 + 4 + #include <asm-generic/shmparam.h> 5 + 6 + #endif /* _ASM_SCORE_SHMPARAM_H */
+22
arch/score/include/asm/sigcontext.h
··· 1 + #ifndef _ASM_SCORE_SIGCONTEXT_H 2 + #define _ASM_SCORE_SIGCONTEXT_H 3 + 4 + /* 5 + * Keep this struct definition in sync with the sigcontext fragment 6 + * in arch/score/tools/offset.c 7 + */ 8 + struct sigcontext { 9 + unsigned int sc_regmask; 10 + unsigned int sc_psr; 11 + unsigned int sc_condition; 12 + unsigned long sc_pc; 13 + unsigned long sc_regs[32]; 14 + unsigned int sc_ssflags; 15 + unsigned int sc_mdceh; 16 + unsigned int sc_mdcel; 17 + unsigned int sc_ecr; 18 + unsigned long sc_ema; 19 + unsigned long sc_sigset[4]; 20 + }; 21 + 22 + #endif /* _ASM_SCORE_SIGCONTEXT_H */
+6
arch/score/include/asm/siginfo.h
··· 1 + #ifndef _ASM_SCORE_SIGINFO_H 2 + #define _ASM_SCORE_SIGINFO_H 3 + 4 + #include <asm-generic/siginfo.h> 5 + 6 + #endif /* _ASM_SCORE_SIGINFO_H */
+6
arch/score/include/asm/signal.h
··· 1 + #ifndef _ASM_SCORE_SIGNAL_H 2 + #define _ASM_SCORE_SIGNAL_H 3 + 4 + #include <asm-generic/signal.h> 5 + 6 + #endif /* _ASM_SCORE_SIGNAL_H */
+6
arch/score/include/asm/socket.h
··· 1 + #ifndef _ASM_SCORE_SOCKET_H 2 + #define _ASM_SCORE_SOCKET_H 3 + 4 + #include <asm-generic/socket.h> 5 + 6 + #endif /* _ASM_SCORE_SOCKET_H */
+6
arch/score/include/asm/sockios.h
··· 1 + #ifndef _ASM_SCORE_SOCKIOS_H 2 + #define _ASM_SCORE_SOCKIOS_H 3 + 4 + #include <asm-generic/sockios.h> 5 + 6 + #endif /* _ASM_SCORE_SOCKIOS_H */
+6
arch/score/include/asm/stat.h
··· 1 + #ifndef _ASM_SCORE_STAT_H 2 + #define _ASM_SCORE_STAT_H 3 + 4 + #include <asm-generic/stat.h> 5 + 6 + #endif /* _ASM_SCORE_STAT_H */
+6
arch/score/include/asm/statfs.h
··· 1 + #ifndef _ASM_SCORE_STATFS_H 2 + #define _ASM_SCORE_STATFS_H 3 + 4 + #include <asm-generic/statfs.h> 5 + 6 + #endif /* _ASM_SCORE_STATFS_H */
+8
arch/score/include/asm/string.h
··· 1 + #ifndef _ASM_SCORE_STRING_H 2 + #define _ASM_SCORE_STRING_H 3 + 4 + extern void *memset(void *__s, int __c, size_t __count); 5 + extern void *memcpy(void *__to, __const__ void *__from, size_t __n); 6 + extern void *memmove(void *__dest, __const__ void *__src, size_t __n); 7 + 8 + #endif /* _ASM_SCORE_STRING_H */
+6
arch/score/include/asm/swab.h
··· 1 + #ifndef _ASM_SCORE_SWAB_H 2 + #define _ASM_SCORE_SWAB_H 3 + 4 + #include <asm-generic/swab.h> 5 + 6 + #endif /* _ASM_SCORE_SWAB_H */
+11
arch/score/include/asm/syscalls.h
··· 1 + #ifndef _ASM_SCORE_SYSCALLS_H 2 + #define _ASM_SCORE_SYSCALLS_H 3 + 4 + asmlinkage long score_clone(struct pt_regs *regs); 5 + asmlinkage long score_execve(struct pt_regs *regs); 6 + asmlinkage long score_sigaltstack(struct pt_regs *regs); 7 + asmlinkage long score_rt_sigreturn(struct pt_regs *regs); 8 + 9 + #include <asm-generic/syscalls.h> 10 + 11 + #endif /* _ASM_SCORE_SYSCALLS_H */
+90
arch/score/include/asm/system.h
··· 1 + #ifndef _ASM_SCORE_SYSTEM_H 2 + #define _ASM_SCORE_SYSTEM_H 3 + 4 + #include <linux/types.h> 5 + #include <linux/irqflags.h> 6 + 7 + struct pt_regs; 8 + struct task_struct; 9 + 10 + extern void *resume(void *last, void *next, void *next_ti); 11 + 12 + #define switch_to(prev, next, last) \ 13 + do { \ 14 + (last) = resume(prev, next, task_thread_info(next)); \ 15 + } while (0) 16 + 17 + #define finish_arch_switch(prev) do {} while (0) 18 + 19 + typedef void (*vi_handler_t)(void); 20 + extern unsigned long arch_align_stack(unsigned long sp); 21 + 22 + #define mb() barrier() 23 + #define rmb() barrier() 24 + #define wmb() barrier() 25 + #define smp_mb() barrier() 26 + #define smp_rmb() barrier() 27 + #define smp_wmb() barrier() 28 + 29 + #define read_barrier_depends() do {} while (0) 30 + #define smp_read_barrier_depends() do {} while (0) 31 + 32 + #define set_mb(var, value) do {var = value; wmb(); } while (0) 33 + 34 + #define __HAVE_ARCH_CMPXCHG 1 35 + 36 + #include <asm-generic/cmpxchg-local.h> 37 + 38 + #ifndef __ASSEMBLY__ 39 + 40 + struct __xchg_dummy { unsigned long a[100]; }; 41 + #define __xg(x) ((struct __xchg_dummy *)(x)) 42 + 43 + static inline 44 + unsigned long __xchg(volatile unsigned long *m, unsigned long val) 45 + { 46 + unsigned long retval; 47 + unsigned long flags; 48 + 49 + local_irq_save(flags); 50 + retval = *m; 51 + *m = val; 52 + local_irq_restore(flags); 53 + return retval; 54 + } 55 + 56 + #define xchg(ptr, v) \ 57 + ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \ 58 + (unsigned long)(v))) 59 + 60 + static inline unsigned long __cmpxchg(volatile unsigned long *m, 61 + unsigned long old, unsigned long new) 62 + { 63 + unsigned long retval; 64 + unsigned long flags; 65 + 66 + local_irq_save(flags); 67 + retval = *m; 68 + if (retval == old) 69 + *m = new; 70 + local_irq_restore(flags); 71 + return retval; 72 + } 73 + 74 + #define cmpxchg(ptr, o, n) \ 75 + ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ 76 + (unsigned long)(o), \ 77 + (unsigned long)(n))) 78 + 79 + extern void __die(const char *, struct pt_regs *, const char *, 80 + const char *, unsigned long) __attribute__((noreturn)); 81 + extern void __die_if_kernel(const char *, struct pt_regs *, const char *, 82 + const char *, unsigned long); 83 + 84 + #define die(msg, regs) \ 85 + __die(msg, regs, __FILE__ ":", __func__, __LINE__) 86 + #define die_if_kernel(msg, regs) \ 87 + __die_if_kernel(msg, regs, __FILE__ ":", __func__, __LINE__) 88 + 89 + #endif /* !__ASSEMBLY__ */ 90 + #endif /* _ASM_SCORE_SYSTEM_H */
+6
arch/score/include/asm/termbits.h
··· 1 + #ifndef _ASM_SCORE_TERMBITS_H 2 + #define _ASM_SCORE_TERMBITS_H 3 + 4 + #include <asm-generic/termbits.h> 5 + 6 + #endif /* _ASM_SCORE_TERMBITS_H */
+6
arch/score/include/asm/termios.h
··· 1 + #ifndef _ASM_SCORE_TERMIOS_H 2 + #define _ASM_SCORE_TERMIOS_H 3 + 4 + #include <asm-generic/termios.h> 5 + 6 + #endif /* _ASM_SCORE_TERMIOS_H */
+103
arch/score/include/asm/thread_info.h
··· 1 + #ifndef _ASM_SCORE_THREAD_INFO_H 2 + #define _ASM_SCORE_THREAD_INFO_H 3 + 4 + #ifdef __KERNEL__ 5 + 6 + #define KU_MASK 0x08 7 + #define KU_USER 0x08 8 + #define KU_KERN 0x00 9 + 10 + #ifndef __ASSEMBLY__ 11 + 12 + #include <asm/processor.h> 13 + 14 + /* 15 + * low level task data that entry.S needs immediate access to 16 + * - this struct should fit entirely inside of one cache line 17 + * - this struct shares the supervisor stack pages 18 + * - if the contents of this structure are changed, the assembly constants 19 + * must also be changed 20 + */ 21 + struct thread_info { 22 + struct task_struct *task; /* main task structure */ 23 + struct exec_domain *exec_domain; /* execution domain */ 24 + unsigned long flags; /* low level flags */ 25 + unsigned long tp_value; /* thread pointer */ 26 + __u32 cpu; /* current CPU */ 27 + 28 + /* 0 => preemptable, < 0 => BUG */ 29 + int preempt_count; 30 + 31 + /* 32 + * thread address space: 33 + * 0-0xBFFFFFFF for user-thead 34 + * 0-0xFFFFFFFF for kernel-thread 35 + */ 36 + mm_segment_t addr_limit; 37 + struct restart_block restart_block; 38 + struct pt_regs *regs; 39 + }; 40 + 41 + /* 42 + * macros/functions for gaining access to the thread information structure 43 + * 44 + * preempt_count needs to be 1 initially, until the scheduler is functional. 45 + */ 46 + #define INIT_THREAD_INFO(tsk) \ 47 + { \ 48 + .task = &tsk, \ 49 + .exec_domain = &default_exec_domain, \ 50 + .cpu = 0, \ 51 + .preempt_count = 1, \ 52 + .addr_limit = KERNEL_DS, \ 53 + .restart_block = { \ 54 + .fn = do_no_restart_syscall, \ 55 + }, \ 56 + } 57 + 58 + #define init_thread_info (init_thread_union.thread_info) 59 + #define init_stack (init_thread_union.stack) 60 + 61 + /* How to get the thread information struct from C. */ 62 + register struct thread_info *__current_thread_info __asm__("r28"); 63 + #define current_thread_info() __current_thread_info 64 + 65 + /* thread information allocation */ 66 + #define THREAD_SIZE_ORDER (1) 67 + #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 68 + #define THREAD_MASK (THREAD_SIZE - 1UL) 69 + #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 70 + 71 + #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) 72 + #define free_thread_info(info) kfree(info) 73 + 74 + #endif /* !__ASSEMBLY__ */ 75 + 76 + #define PREEMPT_ACTIVE 0x10000000 77 + 78 + /* 79 + * thread information flags 80 + * - these are process state flags that various assembly files may need to 81 + * access 82 + * - pending work-to-be-done flags are in LSW 83 + * - other flags in MSW 84 + */ 85 + #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ 86 + #define TIF_SIGPENDING 1 /* signal pending */ 87 + #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 88 + #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ 89 + #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling 90 + TIF_NEED_RESCHED */ 91 + #define TIF_MEMDIE 18 92 + 93 + #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 94 + #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 95 + #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 96 + #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 97 + #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 98 + 99 + #define _TIF_WORK_MASK (0x0000ffff) 100 + 101 + #endif /* __KERNEL__ */ 102 + 103 + #endif /* _ASM_SCORE_THREAD_INFO_H */
+8
arch/score/include/asm/timex.h
··· 1 + #ifndef _ASM_SCORE_TIMEX_H 2 + #define _ASM_SCORE_TIMEX_H 3 + 4 + #define CLOCK_TICK_RATE 27000000 /* Timer input freq. */ 5 + 6 + #include <asm-generic/timex.h> 7 + 8 + #endif /* _ASM_SCORE_TIMEX_H */
+17
arch/score/include/asm/tlb.h
··· 1 + #ifndef _ASM_SCORE_TLB_H 2 + #define _ASM_SCORE_TLB_H 3 + 4 + /* 5 + * SCORE doesn't need any special per-pte or per-vma handling, except 6 + * we need to flush cache for area to be unmapped. 7 + */ 8 + #define tlb_start_vma(tlb, vma) do {} while (0) 9 + #define tlb_end_vma(tlb, vma) do {} while (0) 10 + #define __tlb_remove_tlb_entry(tlb, ptep, address) do {} while (0) 11 + #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) 12 + 13 + extern void score7_FTLB_refill_Handler(void); 14 + 15 + #include <asm-generic/tlb.h> 16 + 17 + #endif /* _ASM_SCORE_TLB_H */
+142
arch/score/include/asm/tlbflush.h
··· 1 + #ifndef _ASM_SCORE_TLBFLUSH_H 2 + #define _ASM_SCORE_TLBFLUSH_H 3 + 4 + #include <linux/mm.h> 5 + 6 + /* 7 + * TLB flushing: 8 + * 9 + * - flush_tlb_all() flushes all processes TLB entries 10 + * - flush_tlb_mm(mm) flushes the specified mm context TLB entries 11 + * - flush_tlb_page(vma, vmaddr) flushes one page 12 + * - flush_tlb_range(vma, start, end) flushes a range of pages 13 + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages 14 + */ 15 + extern void local_flush_tlb_all(void); 16 + extern void local_flush_tlb_mm(struct mm_struct *mm); 17 + extern void local_flush_tlb_range(struct vm_area_struct *vma, 18 + unsigned long start, unsigned long end); 19 + extern void local_flush_tlb_kernel_range(unsigned long start, 20 + unsigned long end); 21 + extern void local_flush_tlb_page(struct vm_area_struct *vma, 22 + unsigned long page); 23 + extern void local_flush_tlb_one(unsigned long vaddr); 24 + 25 + #define flush_tlb_all() local_flush_tlb_all() 26 + #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 27 + #define flush_tlb_range(vma, vmaddr, end) \ 28 + local_flush_tlb_range(vma, vmaddr, end) 29 + #define flush_tlb_kernel_range(vmaddr, end) \ 30 + local_flush_tlb_kernel_range(vmaddr, end) 31 + #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) 32 + #define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr) 33 + 34 + #ifndef __ASSEMBLY__ 35 + 36 + static inline unsigned long pevn_get(void) 37 + { 38 + unsigned long val; 39 + 40 + __asm__ __volatile__( 41 + "mfcr %0, cr11\n" 42 + "nop\nnop\n" 43 + : "=r" (val)); 44 + 45 + return val; 46 + } 47 + 48 + static inline void pevn_set(unsigned long val) 49 + { 50 + __asm__ __volatile__( 51 + "mtcr %0, cr11\n" 52 + "nop\nnop\nnop\nnop\nnop\n" 53 + : : "r" (val)); 54 + } 55 + 56 + static inline void pectx_set(unsigned long val) 57 + { 58 + __asm__ __volatile__( 59 + "mtcr %0, cr12\n" 60 + "nop\nnop\nnop\nnop\nnop\n" 61 + : : "r" (val)); 62 + } 63 + 64 + static inline unsigned long pectx_get(void) 65 + { 66 + unsigned long val; 67 + __asm__ __volatile__( 68 + "mfcr %0, cr12\n" 69 + "nop\nnop\n" 70 + : "=r" (val)); 71 + return val; 72 + } 73 + static inline unsigned long tlblock_get(void) 74 + { 75 + unsigned long val; 76 + 77 + __asm__ __volatile__( 78 + "mfcr %0, cr7\n" 79 + "nop\nnop\n" 80 + : "=r" (val)); 81 + return val; 82 + } 83 + static inline void tlblock_set(unsigned long val) 84 + { 85 + __asm__ __volatile__( 86 + "mtcr %0, cr7\n" 87 + "nop\nnop\nnop\nnop\nnop\n" 88 + : : "r" (val)); 89 + } 90 + 91 + static inline void tlbpt_set(unsigned long val) 92 + { 93 + __asm__ __volatile__( 94 + "mtcr %0, cr8\n" 95 + "nop\nnop\nnop\nnop\nnop\n" 96 + : : "r" (val)); 97 + } 98 + 99 + static inline long tlbpt_get(void) 100 + { 101 + long val; 102 + 103 + __asm__ __volatile__( 104 + "mfcr %0, cr8\n" 105 + "nop\nnop\n" 106 + : "=r" (val)); 107 + 108 + return val; 109 + } 110 + 111 + static inline void peaddr_set(unsigned long val) 112 + { 113 + __asm__ __volatile__( 114 + "mtcr %0, cr9\n" 115 + "nop\nnop\nnop\nnop\nnop\n" 116 + : : "r" (val)); 117 + } 118 + 119 + /* TLB operations. */ 120 + static inline void tlb_probe(void) 121 + { 122 + __asm__ __volatile__("stlb;nop;nop;nop;nop;nop"); 123 + } 124 + 125 + static inline void tlb_read(void) 126 + { 127 + __asm__ __volatile__("mftlb;nop;nop;nop;nop;nop"); 128 + } 129 + 130 + static inline void tlb_write_indexed(void) 131 + { 132 + __asm__ __volatile__("mtptlb;nop;nop;nop;nop;nop"); 133 + } 134 + 135 + static inline void tlb_write_random(void) 136 + { 137 + __asm__ __volatile__("mtrtlb;nop;nop;nop;nop;nop"); 138 + } 139 + 140 + #endif /* Not __ASSEMBLY__ */ 141 + 142 + #endif /* _ASM_SCORE_TLBFLUSH_H */
+6
arch/score/include/asm/topology.h
··· 1 + #ifndef _ASM_SCORE_TOPOLOGY_H 2 + #define _ASM_SCORE_TOPOLOGY_H 3 + 4 + #include <asm-generic/topology.h> 5 + 6 + #endif /* _ASM_SCORE_TOPOLOGY_H */
+6
arch/score/include/asm/types.h
··· 1 + #ifndef _ASM_SCORE_TYPES_H 2 + #define _ASM_SCORE_TYPES_H 3 + 4 + #include <asm-generic/types.h> 5 + 6 + #endif /* _ASM_SCORE_TYPES_H */
+27
arch/score/include/asm/uaccess.h
··· 1 + #ifndef _ASM_SCORE_UACCESS_H 2 + #define _ASM_SCORE_UACCESS_H 3 + /* 4 + * Copyright (C) 2006 Atmark Techno, Inc. 5 + * 6 + * This file is subject to the terms and conditions of the GNU General Public 7 + * License. See the file "COPYING" in the main directory of this archive 8 + * for more details. 9 + */ 10 + struct pt_regs; 11 + extern int fixup_exception(struct pt_regs *regs); 12 + 13 + #ifndef __ASSEMBLY__ 14 + 15 + #define __range_ok(addr, size) \ 16 + ((((unsigned long __force)(addr) >= 0x80000000) \ 17 + || ((unsigned long)(size) > 0x80000000) \ 18 + || (((unsigned long __force)(addr) + (unsigned long)(size)) > 0x80000000))) 19 + 20 + #define __access_ok(addr, size) \ 21 + (__range_ok((addr), (size)) == 0) 22 + 23 + #include <asm-generic/uaccess.h> 24 + 25 + #endif /* __ASSEMBLY__ */ 26 + 27 + #endif /* _ASM_SCORE_UACCESS_H */
+1
arch/score/include/asm/ucontext.h
··· 1 + #include <asm-generic/ucontext.h>
+6
arch/score/include/asm/unaligned.h
··· 1 + #ifndef _ASM_SCORE_UNALIGNED_H 2 + #define _ASM_SCORE_UNALIGNED_H 3 + 4 + #include <asm-generic/unaligned.h> 5 + 6 + #endif /* _ASM_SCORE_UNALIGNED_H */
+8
arch/score/include/asm/unistd.h
··· 1 + #if !defined(_ASM_SCORE_UNISTD_H) || defined(__SYSCALL) 2 + #define _ASM_SCORE_UNISTD_H 3 + 4 + #define __ARCH_HAVE_MMU 5 + 6 + #include <asm-generic/unistd.h> 7 + 8 + #endif /* _ASM_SCORE_UNISTD_H */
+4
arch/score/include/asm/user.h
··· 1 + #ifndef _ASM_SCORE_USER_H 2 + #define _ASM_SCORE_USER_H 3 + 4 + #endif /* _ASM_SCORE_USER_H */
+11
arch/score/kernel/Makefile
··· 1 + # 2 + # Makefile for the Linux/SCORE kernel. 3 + # 4 + 5 + extra-y := head.o vmlinux.lds 6 + 7 + obj-y += entry.o init_task.o irq.o process.o ptrace.o \ 8 + setup.o signal.o sys_score.o time.o traps.o \ 9 + sys_call_table.o 10 + 11 + obj-$(CONFIG_MODULES) += module.o
+216
arch/score/kernel/asm-offsets.c
··· 1 + /* 2 + * arch/score/kernel/asm-offsets.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Chen Liqin <liqin.chen@sunplusct.com> 8 + * Lennox Wu <lennox.wu@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/kbuild.h> 27 + #include <linux/interrupt.h> 28 + #include <linux/mm.h> 29 + #include <linux/sched.h> 30 + 31 + #include <asm-generic/cmpxchg-local.h> 32 + 33 + void output_ptreg_defines(void) 34 + { 35 + COMMENT("SCORE pt_regs offsets."); 36 + OFFSET(PT_R0, pt_regs, regs[0]); 37 + OFFSET(PT_R1, pt_regs, regs[1]); 38 + OFFSET(PT_R2, pt_regs, regs[2]); 39 + OFFSET(PT_R3, pt_regs, regs[3]); 40 + OFFSET(PT_R4, pt_regs, regs[4]); 41 + OFFSET(PT_R5, pt_regs, regs[5]); 42 + OFFSET(PT_R6, pt_regs, regs[6]); 43 + OFFSET(PT_R7, pt_regs, regs[7]); 44 + OFFSET(PT_R8, pt_regs, regs[8]); 45 + OFFSET(PT_R9, pt_regs, regs[9]); 46 + OFFSET(PT_R10, pt_regs, regs[10]); 47 + OFFSET(PT_R11, pt_regs, regs[11]); 48 + OFFSET(PT_R12, pt_regs, regs[12]); 49 + OFFSET(PT_R13, pt_regs, regs[13]); 50 + OFFSET(PT_R14, pt_regs, regs[14]); 51 + OFFSET(PT_R15, pt_regs, regs[15]); 52 + OFFSET(PT_R16, pt_regs, regs[16]); 53 + OFFSET(PT_R17, pt_regs, regs[17]); 54 + OFFSET(PT_R18, pt_regs, regs[18]); 55 + OFFSET(PT_R19, pt_regs, regs[19]); 56 + OFFSET(PT_R20, pt_regs, regs[20]); 57 + OFFSET(PT_R21, pt_regs, regs[21]); 58 + OFFSET(PT_R22, pt_regs, regs[22]); 59 + OFFSET(PT_R23, pt_regs, regs[23]); 60 + OFFSET(PT_R24, pt_regs, regs[24]); 61 + OFFSET(PT_R25, pt_regs, regs[25]); 62 + OFFSET(PT_R26, pt_regs, regs[26]); 63 + OFFSET(PT_R27, pt_regs, regs[27]); 64 + OFFSET(PT_R28, pt_regs, regs[28]); 65 + OFFSET(PT_R29, pt_regs, regs[29]); 66 + OFFSET(PT_R30, pt_regs, regs[30]); 67 + OFFSET(PT_R31, pt_regs, regs[31]); 68 + 69 + OFFSET(PT_ORIG_R4, pt_regs, orig_r4); 70 + OFFSET(PT_ORIG_R7, pt_regs, orig_r7); 71 + OFFSET(PT_CEL, pt_regs, cel); 72 + OFFSET(PT_CEH, pt_regs, ceh); 73 + OFFSET(PT_SR0, pt_regs, sr0); 74 + OFFSET(PT_SR1, pt_regs, sr1); 75 + OFFSET(PT_SR2, pt_regs, sr2); 76 + OFFSET(PT_EPC, pt_regs, cp0_epc); 77 + OFFSET(PT_EMA, pt_regs, cp0_ema); 78 + OFFSET(PT_PSR, pt_regs, cp0_psr); 79 + OFFSET(PT_ECR, pt_regs, cp0_ecr); 80 + OFFSET(PT_CONDITION, pt_regs, cp0_condition); 81 + OFFSET(PT_IS_SYSCALL, pt_regs, is_syscall); 82 + 83 + DEFINE(PT_SIZE, sizeof(struct pt_regs)); 84 + BLANK(); 85 + } 86 + 87 + void output_task_defines(void) 88 + { 89 + COMMENT("SCORE task_struct offsets."); 90 + OFFSET(TASK_STATE, task_struct, state); 91 + OFFSET(TASK_THREAD_INFO, task_struct, stack); 92 + OFFSET(TASK_FLAGS, task_struct, flags); 93 + OFFSET(TASK_MM, task_struct, mm); 94 + OFFSET(TASK_PID, task_struct, pid); 95 + DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); 96 + BLANK(); 97 + } 98 + 99 + void output_thread_info_defines(void) 100 + { 101 + COMMENT("SCORE thread_info offsets."); 102 + OFFSET(TI_TASK, thread_info, task); 103 + OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain); 104 + OFFSET(TI_FLAGS, thread_info, flags); 105 + OFFSET(TI_TP_VALUE, thread_info, tp_value); 106 + OFFSET(TI_CPU, thread_info, cpu); 107 + OFFSET(TI_PRE_COUNT, thread_info, preempt_count); 108 + OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); 109 + OFFSET(TI_RESTART_BLOCK, thread_info, restart_block); 110 + OFFSET(TI_REGS, thread_info, regs); 111 + DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE); 112 + DEFINE(KERNEL_STACK_MASK, THREAD_MASK); 113 + BLANK(); 114 + } 115 + 116 + void output_thread_defines(void) 117 + { 118 + COMMENT("SCORE specific thread_struct offsets."); 119 + OFFSET(THREAD_REG0, task_struct, thread.reg0); 120 + OFFSET(THREAD_REG2, task_struct, thread.reg2); 121 + OFFSET(THREAD_REG3, task_struct, thread.reg3); 122 + OFFSET(THREAD_REG12, task_struct, thread.reg12); 123 + OFFSET(THREAD_REG13, task_struct, thread.reg13); 124 + OFFSET(THREAD_REG14, task_struct, thread.reg14); 125 + OFFSET(THREAD_REG15, task_struct, thread.reg15); 126 + OFFSET(THREAD_REG16, task_struct, thread.reg16); 127 + OFFSET(THREAD_REG17, task_struct, thread.reg17); 128 + OFFSET(THREAD_REG18, task_struct, thread.reg18); 129 + OFFSET(THREAD_REG19, task_struct, thread.reg19); 130 + OFFSET(THREAD_REG20, task_struct, thread.reg20); 131 + OFFSET(THREAD_REG21, task_struct, thread.reg21); 132 + OFFSET(THREAD_REG29, task_struct, thread.reg29); 133 + 134 + OFFSET(THREAD_PSR, task_struct, thread.cp0_psr); 135 + OFFSET(THREAD_EMA, task_struct, thread.cp0_ema); 136 + OFFSET(THREAD_BADUADDR, task_struct, thread.cp0_baduaddr); 137 + OFFSET(THREAD_ECODE, task_struct, thread.error_code); 138 + OFFSET(THREAD_TRAPNO, task_struct, thread.trap_no); 139 + BLANK(); 140 + } 141 + 142 + void output_mm_defines(void) 143 + { 144 + COMMENT("Size of struct page"); 145 + DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page)); 146 + BLANK(); 147 + COMMENT("Linux mm_struct offsets."); 148 + OFFSET(MM_USERS, mm_struct, mm_users); 149 + OFFSET(MM_PGD, mm_struct, pgd); 150 + OFFSET(MM_CONTEXT, mm_struct, context); 151 + BLANK(); 152 + DEFINE(_PAGE_SIZE, PAGE_SIZE); 153 + DEFINE(_PAGE_SHIFT, PAGE_SHIFT); 154 + BLANK(); 155 + DEFINE(_PGD_T_SIZE, sizeof(pgd_t)); 156 + DEFINE(_PTE_T_SIZE, sizeof(pte_t)); 157 + BLANK(); 158 + DEFINE(_PGD_ORDER, PGD_ORDER); 159 + DEFINE(_PTE_ORDER, PTE_ORDER); 160 + BLANK(); 161 + DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); 162 + BLANK(); 163 + DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD); 164 + DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE); 165 + BLANK(); 166 + } 167 + 168 + void output_sc_defines(void) 169 + { 170 + COMMENT("Linux sigcontext offsets."); 171 + OFFSET(SC_REGS, sigcontext, sc_regs); 172 + OFFSET(SC_MDCEH, sigcontext, sc_mdceh); 173 + OFFSET(SC_MDCEL, sigcontext, sc_mdcel); 174 + OFFSET(SC_PC, sigcontext, sc_pc); 175 + OFFSET(SC_PSR, sigcontext, sc_psr); 176 + OFFSET(SC_ECR, sigcontext, sc_ecr); 177 + OFFSET(SC_EMA, sigcontext, sc_ema); 178 + BLANK(); 179 + } 180 + 181 + void output_signal_defined(void) 182 + { 183 + COMMENT("Linux signal numbers."); 184 + DEFINE(_SIGHUP, SIGHUP); 185 + DEFINE(_SIGINT, SIGINT); 186 + DEFINE(_SIGQUIT, SIGQUIT); 187 + DEFINE(_SIGILL, SIGILL); 188 + DEFINE(_SIGTRAP, SIGTRAP); 189 + DEFINE(_SIGIOT, SIGIOT); 190 + DEFINE(_SIGABRT, SIGABRT); 191 + DEFINE(_SIGFPE, SIGFPE); 192 + DEFINE(_SIGKILL, SIGKILL); 193 + DEFINE(_SIGBUS, SIGBUS); 194 + DEFINE(_SIGSEGV, SIGSEGV); 195 + DEFINE(_SIGSYS, SIGSYS); 196 + DEFINE(_SIGPIPE, SIGPIPE); 197 + DEFINE(_SIGALRM, SIGALRM); 198 + DEFINE(_SIGTERM, SIGTERM); 199 + DEFINE(_SIGUSR1, SIGUSR1); 200 + DEFINE(_SIGUSR2, SIGUSR2); 201 + DEFINE(_SIGCHLD, SIGCHLD); 202 + DEFINE(_SIGPWR, SIGPWR); 203 + DEFINE(_SIGWINCH, SIGWINCH); 204 + DEFINE(_SIGURG, SIGURG); 205 + DEFINE(_SIGIO, SIGIO); 206 + DEFINE(_SIGSTOP, SIGSTOP); 207 + DEFINE(_SIGTSTP, SIGTSTP); 208 + DEFINE(_SIGCONT, SIGCONT); 209 + DEFINE(_SIGTTIN, SIGTTIN); 210 + DEFINE(_SIGTTOU, SIGTTOU); 211 + DEFINE(_SIGVTALRM, SIGVTALRM); 212 + DEFINE(_SIGPROF, SIGPROF); 213 + DEFINE(_SIGXCPU, SIGXCPU); 214 + DEFINE(_SIGXFSZ, SIGXFSZ); 215 + BLANK(); 216 + }
+501
arch/score/kernel/entry.S
··· 1 + /* 2 + * arch/score/kernel/entry.S 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Chen Liqin <liqin.chen@sunplusct.com> 8 + * Lennox Wu <lennox.wu@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/err.h> 27 + #include <linux/init.h> 28 + #include <linux/linkage.h> 29 + 30 + #include <asm/asmmacro.h> 31 + #include <asm/thread_info.h> 32 + #include <asm/unistd.h> 33 + 34 + /* 35 + * disable interrupts. 36 + */ 37 + .macro disable_irq 38 + mfcr r8, cr0 39 + srli r8, r8, 1 40 + slli r8, r8, 1 41 + mtcr r8, cr0 42 + nop 43 + nop 44 + nop 45 + nop 46 + nop 47 + .endm 48 + 49 + /* 50 + * enable interrupts. 51 + */ 52 + .macro enable_irq 53 + mfcr r8, cr0 54 + ori r8, 1 55 + mtcr r8, cr0 56 + nop 57 + nop 58 + nop 59 + nop 60 + nop 61 + .endm 62 + 63 + __INIT 64 + ENTRY(debug_exception_vector) 65 + nop! 66 + nop! 67 + nop! 68 + nop! 69 + nop! 70 + nop! 71 + nop! 72 + nop! 73 + 74 + ENTRY(general_exception_vector) # should move to addr 0x200 75 + j general_exception 76 + nop! 77 + nop! 78 + nop! 79 + nop! 80 + nop! 81 + nop! 82 + 83 + ENTRY(interrupt_exception_vector) # should move to addr 0x210 84 + j interrupt_exception 85 + nop! 86 + nop! 87 + nop! 88 + nop! 89 + nop! 90 + nop! 91 + 92 + .section ".text", "ax" 93 + .align 2; 94 + general_exception: 95 + mfcr r31, cr2 96 + nop 97 + la r30, exception_handlers 98 + andi r31, 0x1f # get ecr.exc_code 99 + slli r31, r31, 2 100 + add r30, r30, r31 101 + lw r30, [r30] 102 + br r30 103 + 104 + interrupt_exception: 105 + SAVE_ALL 106 + mfcr r4, cr2 107 + nop 108 + lw r16, [r28, TI_REGS] 109 + sw r0, [r28, TI_REGS] 110 + la r3, ret_from_irq 111 + srli r4, r4, 18 # get ecr.ip[7:2], interrupt No. 112 + mv r5, r0 113 + j do_IRQ 114 + 115 + ENTRY(handle_nmi) # NMI #1 116 + SAVE_ALL 117 + mv r4, r0 118 + la r8, nmi_exception_handler 119 + brl r8 120 + j restore_all 121 + 122 + ENTRY(handle_adelinsn) # AdEL-instruction #2 123 + SAVE_ALL 124 + mfcr r8, cr6 125 + nop 126 + nop 127 + sw r8, [r0, PT_EMA] 128 + mv r4, r0 129 + la r8, do_adelinsn 130 + brl r8 131 + mv r4, r0 132 + j ret_from_exception 133 + nop 134 + 135 + ENTRY(handle_ibe) # BusEL-instruction #5 136 + SAVE_ALL 137 + mv r4, r0 138 + la r8, do_be 139 + brl r8 140 + mv r4, r0 141 + j ret_from_exception 142 + nop 143 + 144 + ENTRY(handle_pel) # P-EL #6 145 + SAVE_ALL 146 + mv r4, r0 147 + la r8, do_pel 148 + brl r8 149 + mv r4, r0 150 + j ret_from_exception 151 + nop 152 + 153 + ENTRY(handle_ccu) # CCU #8 154 + SAVE_ALL 155 + mv r4, r0 156 + la r8, do_ccu 157 + brl r8 158 + mv r4, r0 159 + j ret_from_exception 160 + nop 161 + 162 + ENTRY(handle_ri) # RI #9 163 + SAVE_ALL 164 + mv r4, r0 165 + la r8, do_ri 166 + brl r8 167 + mv r4, r0 168 + j ret_from_exception 169 + nop 170 + 171 + ENTRY(handle_tr) # Trap #10 172 + SAVE_ALL 173 + mv r4, r0 174 + la r8, do_tr 175 + brl r8 176 + mv r4, r0 177 + j ret_from_exception 178 + nop 179 + 180 + ENTRY(handle_adedata) # AdES-instruction #12 181 + SAVE_ALL 182 + mfcr r8, cr6 183 + nop 184 + nop 185 + sw r8, [r0, PT_EMA] 186 + mv r4, r0 187 + la r8, do_adedata 188 + brl r8 189 + mv r4, r0 190 + j ret_from_exception 191 + nop 192 + 193 + ENTRY(handle_cee) # CeE #16 194 + SAVE_ALL 195 + mv r4, r0 196 + la r8, do_cee 197 + brl r8 198 + mv r4, r0 199 + j ret_from_exception 200 + nop 201 + 202 + ENTRY(handle_cpe) # CpE #17 203 + SAVE_ALL 204 + mv r4, r0 205 + la r8, do_cpe 206 + brl r8 207 + mv r4, r0 208 + j ret_from_exception 209 + nop 210 + 211 + ENTRY(handle_dbe) # BusEL-data #18 212 + SAVE_ALL 213 + mv r4, r0 214 + la r8, do_be 215 + brl r8 216 + mv r4, r0 217 + j ret_from_exception 218 + nop 219 + 220 + ENTRY(handle_reserved) # others 221 + SAVE_ALL 222 + mv r4, r0 223 + la r8, do_reserved 224 + brl r8 225 + mv r4, r0 226 + j ret_from_exception 227 + nop 228 + 229 + #ifndef CONFIG_PREEMPT 230 + #define resume_kernel restore_all 231 + #else 232 + #define __ret_from_irq ret_from_exception 233 + #endif 234 + 235 + .align 2 236 + #ifndef CONFIG_PREEMPT 237 + ENTRY(ret_from_exception) 238 + disable_irq # preempt stop 239 + nop 240 + j __ret_from_irq 241 + nop 242 + #endif 243 + 244 + ENTRY(ret_from_irq) 245 + sw r16, [r28, TI_REGS] 246 + 247 + ENTRY(__ret_from_irq) 248 + lw r8, [r0, PT_PSR] # returning to kernel mode? 249 + andri.c r8, r8, KU_USER 250 + beq resume_kernel 251 + 252 + resume_userspace: 253 + disable_irq 254 + lw r6, [r28, TI_FLAGS] # current->work 255 + li r8, _TIF_WORK_MASK 256 + and.c r8, r8, r6 # ignoring syscall_trace 257 + bne work_pending 258 + nop 259 + j restore_all 260 + nop 261 + 262 + #ifdef CONFIG_PREEMPT 263 + resume_kernel: 264 + disable_irq 265 + lw r8, [r28, TI_PRE_COUNT] 266 + cmpz.c r8 267 + bne r8, restore_all 268 + need_resched: 269 + lw r8, [r28, TI_FLAGS] 270 + andri.c r9, r8, _TIF_NEED_RESCHED 271 + beq restore_all 272 + lw r8, [r28, PT_PSR] # Interrupts off? 273 + andri.c r8, r8, 1 274 + beq restore_all 275 + bl preempt_schedule_irq 276 + nop 277 + j need_resched 278 + nop 279 + #endif 280 + 281 + ENTRY(ret_from_fork) 282 + bl schedule_tail # r4=struct task_struct *prev 283 + 284 + ENTRY(syscall_exit) 285 + nop 286 + disable_irq 287 + lw r6, [r28, TI_FLAGS] # current->work 288 + li r8, _TIF_WORK_MASK 289 + and.c r8, r6, r8 290 + bne syscall_exit_work 291 + 292 + ENTRY(restore_all) # restore full frame 293 + RESTORE_ALL_AND_RET 294 + 295 + work_pending: 296 + andri.c r8, r6, _TIF_NEED_RESCHED # r6 is preloaded with TI_FLAGS 297 + beq work_notifysig 298 + work_resched: 299 + bl schedule 300 + nop 301 + disable_irq 302 + lw r6, [r28, TI_FLAGS] 303 + li r8, _TIF_WORK_MASK 304 + and.c r8, r6, r8 # is there any work to be done 305 + # other than syscall tracing? 306 + beq restore_all 307 + andri.c r8, r6, _TIF_NEED_RESCHED 308 + bne work_resched 309 + 310 + work_notifysig: 311 + mv r4, r0 312 + li r5, 0 313 + bl do_notify_resume # r6 already loaded 314 + nop 315 + j resume_userspace 316 + nop 317 + 318 + ENTRY(syscall_exit_work) 319 + li r8, _TIF_SYSCALL_TRACE 320 + and.c r8, r8, r6 # r6 is preloaded with TI_FLAGS 321 + beq work_pending # trace bit set? 322 + nop 323 + enable_irq 324 + mv r4, r0 325 + li r5, 1 326 + bl do_syscall_trace 327 + nop 328 + b resume_userspace 329 + nop 330 + 331 + .macro save_context reg 332 + sw r12, [\reg, THREAD_REG12]; 333 + sw r13, [\reg, THREAD_REG13]; 334 + sw r14, [\reg, THREAD_REG14]; 335 + sw r15, [\reg, THREAD_REG15]; 336 + sw r16, [\reg, THREAD_REG16]; 337 + sw r17, [\reg, THREAD_REG17]; 338 + sw r18, [\reg, THREAD_REG18]; 339 + sw r19, [\reg, THREAD_REG19]; 340 + sw r20, [\reg, THREAD_REG20]; 341 + sw r21, [\reg, THREAD_REG21]; 342 + sw r29, [\reg, THREAD_REG29]; 343 + sw r2, [\reg, THREAD_REG2]; 344 + sw r0, [\reg, THREAD_REG0] 345 + .endm 346 + 347 + .macro restore_context reg 348 + lw r12, [\reg, THREAD_REG12]; 349 + lw r13, [\reg, THREAD_REG13]; 350 + lw r14, [\reg, THREAD_REG14]; 351 + lw r15, [\reg, THREAD_REG15]; 352 + lw r16, [\reg, THREAD_REG16]; 353 + lw r17, [\reg, THREAD_REG17]; 354 + lw r18, [\reg, THREAD_REG18]; 355 + lw r19, [\reg, THREAD_REG19]; 356 + lw r20, [\reg, THREAD_REG20]; 357 + lw r21, [\reg, THREAD_REG21]; 358 + lw r29, [\reg, THREAD_REG29]; 359 + lw r0, [\reg, THREAD_REG0]; 360 + lw r2, [\reg, THREAD_REG2]; 361 + lw r3, [\reg, THREAD_REG3] 362 + .endm 363 + 364 + /* 365 + * task_struct *resume(task_struct *prev, task_struct *next, 366 + * struct thread_info *next_ti) 367 + */ 368 + ENTRY(resume) 369 + mfcr r9, cr0 370 + nop 371 + nop 372 + sw r9, [r4, THREAD_PSR] 373 + save_context r4 374 + sw r3, [r4, THREAD_REG3] 375 + 376 + mv r28, r6 377 + restore_context r5 378 + mv r8, r6 379 + addi r8, KERNEL_STACK_SIZE 380 + subi r8, 32 381 + la r9, kernelsp; 382 + sw r8, [r9]; 383 + 384 + mfcr r9, cr0 385 + ldis r7, 0x00ff 386 + nop 387 + and r9, r9, r7 388 + lw r6, [r5, THREAD_PSR] 389 + not r7, r7 390 + and r6, r6, r7 391 + or r6, r6, r9 392 + mtcr r6, cr0 393 + nop; nop; nop; nop; nop 394 + br r3 395 + 396 + ENTRY(handle_sys) 397 + SAVE_ALL 398 + sw r8, [r0, 16] # argument 5 from user r8 399 + sw r9, [r0, 20] # argument 6 from user r9 400 + enable_irq 401 + 402 + sw r4, [r0, PT_ORIG_R4] #for restart syscall 403 + sw r7, [r0, PT_ORIG_R7] #for restart syscall 404 + sw r27, [r0, PT_IS_SYSCALL] # it from syscall 405 + 406 + lw r9, [r0, PT_EPC] # skip syscall on return 407 + addi r9, 4 408 + sw r9, [r0, PT_EPC] 409 + 410 + cmpi.c r27, __NR_syscalls # check syscall number 411 + bgtu illegal_syscall 412 + 413 + slli r8, r27, 2 # get syscall routine 414 + la r11, sys_call_table 415 + add r11, r11, r8 416 + lw r10, [r11] # get syscall entry 417 + 418 + cmpz.c r10 419 + beq illegal_syscall 420 + 421 + lw r8, [r28, TI_FLAGS] 422 + li r9, _TIF_SYSCALL_TRACE 423 + and.c r8, r8, r9 424 + bne syscall_trace_entry 425 + 426 + brl r10 # Do The Real system call 427 + 428 + cmpi.c r4, 0 429 + blt 1f 430 + ldi r8, 0 431 + sw r8, [r0, PT_R7] 432 + b 2f 433 + 1: 434 + cmpi.c r4, -MAX_ERRNO - 1 435 + ble 2f 436 + ldi r8, 0x1; 437 + sw r8, [r0, PT_R7] 438 + neg r4, r4 439 + 2: 440 + sw r4, [r0, PT_R4] # save result 441 + 442 + syscall_return: 443 + disable_irq 444 + lw r6, [r28, TI_FLAGS] # current->work 445 + li r8, _TIF_WORK_MASK 446 + and.c r8, r6, r8 447 + bne syscall_return_work 448 + j restore_all 449 + 450 + syscall_return_work: 451 + j syscall_exit_work 452 + 453 + syscall_trace_entry: 454 + mv r16, r10 455 + mv r4, r0 456 + li r5, 0 457 + bl do_syscall_trace 458 + 459 + mv r8, r16 460 + lw r4, [r0, PT_R4] # Restore argument registers 461 + lw r5, [r0, PT_R5] 462 + lw r6, [r0, PT_R6] 463 + lw r7, [r0, PT_R7] 464 + brl r8 465 + 466 + li r8, -MAX_ERRNO - 1 467 + sw r8, [r0, PT_R7] # set error flag 468 + 469 + neg r4, r4 # error 470 + sw r4, [r0, PT_R0] # set flag for syscall 471 + # restarting 472 + 1: sw r4, [r0, PT_R2] # result 473 + j syscall_exit 474 + 475 + illegal_syscall: 476 + ldi r4, -ENOSYS # error 477 + sw r4, [r0, PT_ORIG_R4] 478 + sw r4, [r0, PT_R4] 479 + ldi r9, 1 # set error flag 480 + sw r9, [r0, PT_R7] 481 + j syscall_return 482 + 483 + ENTRY(sys_execve) 484 + mv r4, r0 485 + la r8, score_execve 486 + br r8 487 + 488 + ENTRY(sys_clone) 489 + mv r4, r0 490 + la r8, score_clone 491 + br r8 492 + 493 + ENTRY(sys_rt_sigreturn) 494 + mv r4, r0 495 + la r8, score_rt_sigreturn 496 + br r8 497 + 498 + ENTRY(sys_sigaltstack) 499 + mv r4, r0 500 + la r8, score_sigaltstack 501 + br r8
+70
arch/score/kernel/head.S
··· 1 + /* 2 + * arch/score/kernel/head.S 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Chen Liqin <liqin.chen@sunplusct.com> 8 + * Lennox Wu <lennox.wu@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + #include <linux/init.h> 26 + #include <linux/linkage.h> 27 + 28 + #include <asm/asm-offsets.h> 29 + 30 + .extern start_kernel 31 + .global init_thread_union 32 + .global kernelsp 33 + 34 + __INIT 35 + ENTRY(_stext) 36 + la r30, __bss_start /* initialize BSS segment. */ 37 + la r31, _end 38 + xor r8, r8, r8 39 + 40 + 1: cmp.c r31, r30 41 + beq 2f 42 + 43 + sw r8, [r30] /* clean memory. */ 44 + addi r30, 4 45 + b 1b 46 + 47 + 2: la r28, init_thread_union /* set kernel stack. */ 48 + mv r0, r28 49 + addi r0, KERNEL_STACK_SIZE - 32 50 + la r30, kernelsp 51 + sw r0, [r30] 52 + subi r0, 4*4 53 + xor r30, r30, r30 54 + ori r30, 0x02 /* enable MMU. */ 55 + mtcr r30, cr4 56 + nop 57 + nop 58 + nop 59 + nop 60 + nop 61 + nop 62 + nop 63 + 64 + /* there is no parameter */ 65 + xor r4, r4, r4 66 + xor r5, r5, r5 67 + xor r6, r6, r6 68 + xor r7, r7, r7 69 + la r30, start_kernel /* jump to init_arch */ 70 + br r30
+47
arch/score/kernel/init_task.c
··· 1 + /* 2 + * arch/score/kernel/init_task.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + * You should have received a copy of the GNU General Public License 19 + * along with this program; if not, see the file COPYING, or write 20 + * to the Free Software Foundation, Inc., 21 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 + */ 23 + 24 + #include <linux/init_task.h> 25 + #include <linux/mqueue.h> 26 + 27 + static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 28 + static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 29 + 30 + /* 31 + * Initial thread structure. 32 + * 33 + * We need to make sure that this is THREAD_SIZE aligned due to the 34 + * way process stacks are handled. This is done by having a special 35 + * "init_task" linker map entry.. 36 + */ 37 + union thread_union init_thread_union 38 + __attribute__((__section__(".data.init_task"))) = 39 + { INIT_THREAD_INFO(init_task) }; 40 + 41 + /* 42 + * Initial task structure. 43 + * 44 + * All other task structs will be allocated on slabs in fork.c 45 + */ 46 + struct task_struct init_task = INIT_TASK(init_task); 47 + EXPORT_SYMBOL(init_task);
+148
arch/score/kernel/irq.c
··· 1 + /* 2 + * arch/score/kernel/irq.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Chen Liqin <liqin.chen@sunplusct.com> 8 + * Lennox Wu <lennox.wu@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/interrupt.h> 27 + #include <linux/kernel_stat.h> 28 + #include <linux/seq_file.h> 29 + 30 + #include <asm/io.h> 31 + 32 + /* the interrupt controller is hardcoded at this address */ 33 + #define SCORE_PIC ((u32 __iomem __force *)0x95F50000) 34 + 35 + #define INT_PNDL 0 36 + #define INT_PNDH 1 37 + #define INT_PRIORITY_M 2 38 + #define INT_PRIORITY_SG0 4 39 + #define INT_PRIORITY_SG1 5 40 + #define INT_PRIORITY_SG2 6 41 + #define INT_PRIORITY_SG3 7 42 + #define INT_MASKL 8 43 + #define INT_MASKH 9 44 + 45 + /* 46 + * handles all normal device IRQs 47 + */ 48 + asmlinkage void do_IRQ(int irq) 49 + { 50 + irq_enter(); 51 + generic_handle_irq(irq); 52 + irq_exit(); 53 + } 54 + 55 + static void score_mask(unsigned int irq_nr) 56 + { 57 + unsigned int irq_source = 63 - irq_nr; 58 + 59 + if (irq_source < 32) 60 + __raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) | \ 61 + (1 << irq_source)), SCORE_PIC + INT_MASKL); 62 + else 63 + __raw_writel((__raw_readl(SCORE_PIC + INT_MASKH) | \ 64 + (1 << (irq_source - 32))), SCORE_PIC + INT_MASKH); 65 + } 66 + 67 + static void score_unmask(unsigned int irq_nr) 68 + { 69 + unsigned int irq_source = 63 - irq_nr; 70 + 71 + if (irq_source < 32) 72 + __raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) & \ 73 + ~(1 << irq_source)), SCORE_PIC + INT_MASKL); 74 + else 75 + __raw_writel((__raw_readl(SCORE_PIC + INT_MASKH) & \ 76 + ~(1 << (irq_source - 32))), SCORE_PIC + INT_MASKH); 77 + } 78 + 79 + struct irq_chip score_irq_chip = { 80 + .name = "Score7-level", 81 + .mask = score_mask, 82 + .mask_ack = score_mask, 83 + .unmask = score_unmask, 84 + }; 85 + 86 + /* 87 + * initialise the interrupt system 88 + */ 89 + void __init init_IRQ(void) 90 + { 91 + int index; 92 + unsigned long target_addr; 93 + 94 + for (index = 0; index < NR_IRQS; ++index) 95 + set_irq_chip_and_handler(index, &score_irq_chip, 96 + handle_level_irq); 97 + 98 + for (target_addr = IRQ_VECTOR_BASE_ADDR; 99 + target_addr <= IRQ_VECTOR_END_ADDR; 100 + target_addr += IRQ_VECTOR_SIZE) 101 + memcpy((void *)target_addr, \ 102 + interrupt_exception_vector, IRQ_VECTOR_SIZE); 103 + 104 + __raw_writel(0xffffffff, SCORE_PIC + INT_MASKL); 105 + __raw_writel(0xffffffff, SCORE_PIC + INT_MASKH); 106 + 107 + __asm__ __volatile__( 108 + "mtcr %0, cr3\n\t" 109 + : : "r" (EXCEPTION_VECTOR_BASE_ADDR | \ 110 + VECTOR_ADDRESS_OFFSET_MODE16)); 111 + } 112 + 113 + /* 114 + * Generic, controller-independent functions: 115 + */ 116 + int show_interrupts(struct seq_file *p, void *v) 117 + { 118 + int i = *(loff_t *)v, cpu; 119 + struct irqaction *action; 120 + unsigned long flags; 121 + 122 + if (i == 0) { 123 + seq_puts(p, " "); 124 + for_each_online_cpu(cpu) 125 + seq_printf(p, "CPU%d ", cpu); 126 + seq_putc(p, '\n'); 127 + } 128 + 129 + if (i < NR_IRQS) { 130 + spin_lock_irqsave(&irq_desc[i].lock, flags); 131 + action = irq_desc[i].action; 132 + if (!action) 133 + goto unlock; 134 + 135 + seq_printf(p, "%3d: ", i); 136 + seq_printf(p, "%10u ", kstat_irqs(i)); 137 + seq_printf(p, " %8s", irq_desc[i].chip->name ? : "-"); 138 + seq_printf(p, " %s", action->name); 139 + for (action = action->next; action; action = action->next) 140 + seq_printf(p, ", %s", action->name); 141 + 142 + seq_putc(p, '\n'); 143 + unlock: 144 + spin_unlock_irqrestore(&irq_desc[i].lock, flags); 145 + } 146 + 147 + return 0; 148 + }
+165
arch/score/kernel/module.c
··· 1 + /* 2 + * arch/score/kernel/module.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Chen Liqin <liqin.chen@sunplusct.com> 8 + * Lennox Wu <lennox.wu@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/moduleloader.h> 27 + #include <linux/module.h> 28 + #include <linux/vmalloc.h> 29 + 30 + void *module_alloc(unsigned long size) 31 + { 32 + return size ? vmalloc(size) : NULL; 33 + } 34 + 35 + /* Free memory returned from module_alloc */ 36 + void module_free(struct module *mod, void *module_region) 37 + { 38 + vfree(module_region); 39 + } 40 + 41 + int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, 42 + char *secstrings, struct module *mod) 43 + { 44 + return 0; 45 + } 46 + 47 + int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, 48 + unsigned int symindex, unsigned int relindex, 49 + struct module *me) 50 + { 51 + Elf32_Shdr *symsec = sechdrs + symindex; 52 + Elf32_Shdr *relsec = sechdrs + relindex; 53 + Elf32_Shdr *dstsec = sechdrs + relsec->sh_info; 54 + Elf32_Rel *rel = (void *)relsec->sh_addr; 55 + unsigned int i; 56 + 57 + for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) { 58 + unsigned long loc; 59 + Elf32_Sym *sym; 60 + s32 r_offset; 61 + 62 + r_offset = ELF32_R_SYM(rel->r_info); 63 + if ((r_offset < 0) || 64 + (r_offset > (symsec->sh_size / sizeof(Elf32_Sym)))) { 65 + printk(KERN_ERR "%s: bad relocation, section %d reloc %d\n", 66 + me->name, relindex, i); 67 + return -ENOEXEC; 68 + } 69 + 70 + sym = ((Elf32_Sym *)symsec->sh_addr) + r_offset; 71 + 72 + if ((rel->r_offset < 0) || 73 + (rel->r_offset > dstsec->sh_size - sizeof(u32))) { 74 + printk(KERN_ERR "%s: out of bounds relocation, " 75 + "section %d reloc %d offset %d size %d\n", 76 + me->name, relindex, i, rel->r_offset, 77 + dstsec->sh_size); 78 + return -ENOEXEC; 79 + } 80 + 81 + loc = dstsec->sh_addr + rel->r_offset; 82 + switch (ELF32_R_TYPE(rel->r_info)) { 83 + case R_SCORE_NONE: 84 + break; 85 + case R_SCORE_ABS32: 86 + *(unsigned long *)loc += sym->st_value; 87 + break; 88 + case R_SCORE_HI16: 89 + break; 90 + case R_SCORE_LO16: { 91 + unsigned long hi16_offset, offset; 92 + unsigned long uvalue; 93 + unsigned long temp, temp_hi; 94 + temp_hi = *((unsigned long *)loc - 1); 95 + temp = *(unsigned long *)loc; 96 + 97 + hi16_offset = (((((temp_hi) >> 16) & 0x3) << 15) | 98 + ((temp_hi) & 0x7fff)) >> 1; 99 + offset = ((temp >> 16 & 0x03) << 15) | 100 + ((temp & 0x7fff) >> 1); 101 + offset = (hi16_offset << 16) | (offset & 0xffff); 102 + uvalue = sym->st_value + offset; 103 + hi16_offset = (uvalue >> 16) << 1; 104 + 105 + temp_hi = ((temp_hi) & (~(0x37fff))) | 106 + (hi16_offset & 0x7fff) | 107 + ((hi16_offset << 1) & 0x30000); 108 + *((unsigned long *)loc - 1) = temp_hi; 109 + 110 + offset = (uvalue & 0xffff) << 1; 111 + temp = (temp & (~(0x37fff))) | (offset & 0x7fff) | 112 + ((offset << 1) & 0x30000); 113 + *(unsigned long *)loc = temp; 114 + break; 115 + } 116 + case R_SCORE_24: { 117 + unsigned long hi16_offset, offset; 118 + unsigned long uvalue; 119 + unsigned long temp; 120 + 121 + temp = *(unsigned long *)loc; 122 + offset = (temp & 0x03FF7FFE); 123 + hi16_offset = (offset & 0xFFFF0000); 124 + offset = (hi16_offset | ((offset & 0xFFFF) << 1)) >> 2; 125 + 126 + uvalue = (sym->st_value + offset) >> 1; 127 + uvalue = uvalue & 0x00ffffff; 128 + 129 + temp = (temp & 0xfc008001) | 130 + ((uvalue << 2) & 0x3ff0000) | 131 + ((uvalue & 0x3fff) << 1); 132 + *(unsigned long *)loc = temp; 133 + break; 134 + } 135 + default: 136 + printk(KERN_ERR "%s: unknown relocation: %u\n", 137 + me->name, ELF32_R_TYPE(rel->r_info)); 138 + return -ENOEXEC; 139 + } 140 + } 141 + 142 + return 0; 143 + } 144 + 145 + int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, 146 + unsigned int symindex, unsigned int relsec, 147 + struct module *me) 148 + { 149 + return 0; 150 + } 151 + 152 + /* Given an address, look for it in the module exception tables. */ 153 + const struct exception_table_entry *search_module_dbetables(unsigned long addr) 154 + { 155 + return NULL; 156 + } 157 + 158 + /* Put in dbe list if necessary. */ 159 + int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, 160 + struct module *me) 161 + { 162 + return 0; 163 + } 164 + 165 + void module_arch_cleanup(struct module *mod) {}
+168
arch/score/kernel/process.c
··· 1 + /* 2 + * arch/score/kernel/process.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Chen Liqin <liqin.chen@sunplusct.com> 8 + * Lennox Wu <lennox.wu@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/module.h> 27 + #include <linux/reboot.h> 28 + #include <linux/elfcore.h> 29 + #include <linux/pm.h> 30 + 31 + void (*pm_power_off)(void); 32 + EXPORT_SYMBOL(pm_power_off); 33 + 34 + /* If or when software machine-restart is implemented, add code here. */ 35 + void machine_restart(char *command) {} 36 + 37 + /* If or when software machine-halt is implemented, add code here. */ 38 + void machine_halt(void) {} 39 + 40 + /* If or when software machine-power-off is implemented, add code here. */ 41 + void machine_power_off(void) {} 42 + 43 + /* 44 + * The idle thread. There's no useful work to be 45 + * done, so just try to conserve power and have a 46 + * low exit latency (ie sit in a loop waiting for 47 + * somebody to say that they'd like to reschedule) 48 + */ 49 + void __noreturn cpu_idle(void) 50 + { 51 + /* endless idle loop with no priority at all */ 52 + while (1) { 53 + while (!need_resched()) 54 + barrier(); 55 + 56 + preempt_enable_no_resched(); 57 + schedule(); 58 + preempt_disable(); 59 + } 60 + } 61 + 62 + void ret_from_fork(void); 63 + 64 + void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) 65 + { 66 + unsigned long status; 67 + 68 + /* New thread loses kernel privileges. */ 69 + status = regs->cp0_psr & ~(KU_MASK); 70 + status |= KU_USER; 71 + regs->cp0_psr = status; 72 + regs->cp0_epc = pc; 73 + regs->regs[0] = sp; 74 + } 75 + 76 + void exit_thread(void) {} 77 + 78 + /* 79 + * When a process does an "exec", machine state like FPU and debug 80 + * registers need to be reset. This is a hook function for that. 81 + * Currently we don't have any such state to reset, so this is empty. 82 + */ 83 + void flush_thread(void) {} 84 + 85 + /* 86 + * set up the kernel stack and exception frames for a new process 87 + */ 88 + int copy_thread(unsigned long clone_flags, unsigned long usp, 89 + unsigned long unused, 90 + struct task_struct *p, struct pt_regs *regs) 91 + { 92 + struct thread_info *ti = task_thread_info(p); 93 + struct pt_regs *childregs = task_pt_regs(p); 94 + 95 + p->set_child_tid = NULL; 96 + p->clear_child_tid = NULL; 97 + 98 + *childregs = *regs; 99 + childregs->regs[7] = 0; /* Clear error flag */ 100 + childregs->regs[4] = 0; /* Child gets zero as return value */ 101 + regs->regs[4] = p->pid; 102 + 103 + if (childregs->cp0_psr & 0x8) { /* test kernel fork or user fork */ 104 + childregs->regs[0] = usp; /* user fork */ 105 + } else { 106 + childregs->regs[28] = (unsigned long) ti; /* kernel fork */ 107 + childregs->regs[0] = (unsigned long) childregs; 108 + } 109 + 110 + p->thread.reg0 = (unsigned long) childregs; 111 + p->thread.reg3 = (unsigned long) ret_from_fork; 112 + p->thread.cp0_psr = 0; 113 + 114 + return 0; 115 + } 116 + 117 + /* Fill in the fpu structure for a core dump. */ 118 + int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) 119 + { 120 + return 1; 121 + } 122 + 123 + static void __noreturn 124 + kernel_thread_helper(void *unused0, int (*fn)(void *), 125 + void *arg, void *unused1) 126 + { 127 + do_exit(fn(arg)); 128 + } 129 + 130 + /* 131 + * Create a kernel thread. 132 + */ 133 + long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 134 + { 135 + struct pt_regs regs; 136 + 137 + memset(&regs, 0, sizeof(regs)); 138 + 139 + regs.regs[6] = (unsigned long) arg; 140 + regs.regs[5] = (unsigned long) fn; 141 + regs.cp0_epc = (unsigned long) kernel_thread_helper; 142 + regs.cp0_psr = (regs.cp0_psr & ~(0x1|0x4|0x8)) | \ 143 + ((regs.cp0_psr & 0x3) << 2); 144 + 145 + return do_fork(flags | CLONE_VM | CLONE_UNTRACED, \ 146 + 0, &regs, 0, NULL, NULL); 147 + } 148 + 149 + unsigned long thread_saved_pc(struct task_struct *tsk) 150 + { 151 + return task_pt_regs(tsk)->cp0_epc; 152 + } 153 + 154 + unsigned long get_wchan(struct task_struct *task) 155 + { 156 + if (!task || task == current || task->state == TASK_RUNNING) 157 + return 0; 158 + 159 + if (!task_stack_page(task)) 160 + return 0; 161 + 162 + return task_pt_regs(task)->cp0_epc; 163 + } 164 + 165 + unsigned long arch_align_stack(unsigned long sp) 166 + { 167 + return sp; 168 + }
+400
arch/score/kernel/ptrace.c
··· 1 + /* 2 + * arch/score/kernel/ptrace.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Chen Liqin <liqin.chen@sunplusct.com> 8 + * Lennox Wu <lennox.wu@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/kernel.h> 27 + #include <linux/ptrace.h> 28 + 29 + #include <asm/uaccess.h> 30 + 31 + static int is_16bitinsn(unsigned long insn) 32 + { 33 + if ((insn & INSN32_MASK) == INSN32_MASK) 34 + return 0; 35 + else 36 + return 1; 37 + } 38 + 39 + int 40 + read_tsk_long(struct task_struct *child, 41 + unsigned long addr, unsigned long *res) 42 + { 43 + int copied; 44 + 45 + copied = access_process_vm(child, addr, res, sizeof(*res), 0); 46 + 47 + return copied != sizeof(*res) ? -EIO : 0; 48 + } 49 + 50 + int 51 + read_tsk_short(struct task_struct *child, 52 + unsigned long addr, unsigned short *res) 53 + { 54 + int copied; 55 + 56 + copied = access_process_vm(child, addr, res, sizeof(*res), 0); 57 + 58 + return copied != sizeof(*res) ? -EIO : 0; 59 + } 60 + 61 + static int 62 + write_tsk_short(struct task_struct *child, 63 + unsigned long addr, unsigned short val) 64 + { 65 + int copied; 66 + 67 + copied = access_process_vm(child, addr, &val, sizeof(val), 1); 68 + 69 + return copied != sizeof(val) ? -EIO : 0; 70 + } 71 + 72 + static int 73 + write_tsk_long(struct task_struct *child, 74 + unsigned long addr, unsigned long val) 75 + { 76 + int copied; 77 + 78 + copied = access_process_vm(child, addr, &val, sizeof(val), 1); 79 + 80 + return copied != sizeof(val) ? -EIO : 0; 81 + } 82 + 83 + /* 84 + * Get all user integer registers. 85 + */ 86 + static int ptrace_getregs(struct task_struct *tsk, void __user *uregs) 87 + { 88 + struct pt_regs *regs = task_pt_regs(tsk); 89 + 90 + return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0; 91 + } 92 + 93 + /* 94 + * Set all user integer registers. 95 + */ 96 + static int ptrace_setregs(struct task_struct *tsk, void __user *uregs) 97 + { 98 + struct pt_regs newregs; 99 + int ret; 100 + 101 + ret = -EFAULT; 102 + if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) { 103 + struct pt_regs *regs = task_pt_regs(tsk); 104 + *regs = newregs; 105 + ret = 0; 106 + } 107 + 108 + return ret; 109 + } 110 + 111 + void user_enable_single_step(struct task_struct *child) 112 + { 113 + /* far_epc is the target of branch */ 114 + unsigned int epc, far_epc = 0; 115 + unsigned long epc_insn, far_epc_insn; 116 + int ninsn_type; /* next insn type 0=16b, 1=32b */ 117 + unsigned int tmp, tmp2; 118 + struct pt_regs *regs = task_pt_regs(child); 119 + child->thread.single_step = 1; 120 + child->thread.ss_nextcnt = 1; 121 + epc = regs->cp0_epc; 122 + 123 + read_tsk_long(child, epc, &epc_insn); 124 + 125 + if (is_16bitinsn(epc_insn)) { 126 + if ((epc_insn & J16M) == J16) { 127 + tmp = epc_insn & 0xFFE; 128 + epc = (epc & 0xFFFFF000) | tmp; 129 + } else if ((epc_insn & B16M) == B16) { 130 + child->thread.ss_nextcnt = 2; 131 + tmp = (epc_insn & 0xFF) << 1; 132 + tmp = tmp << 23; 133 + tmp = (unsigned int)((int) tmp >> 23); 134 + far_epc = epc + tmp; 135 + epc += 2; 136 + } else if ((epc_insn & BR16M) == BR16) { 137 + child->thread.ss_nextcnt = 2; 138 + tmp = (epc_insn >> 4) & 0xF; 139 + far_epc = regs->regs[tmp]; 140 + epc += 2; 141 + } else 142 + epc += 2; 143 + } else { 144 + if ((epc_insn & J32M) == J32) { 145 + tmp = epc_insn & 0x03FFFFFE; 146 + tmp2 = tmp & 0x7FFF; 147 + tmp = (((tmp >> 16) & 0x3FF) << 15) | tmp2; 148 + epc = (epc & 0xFFC00000) | tmp; 149 + } else if ((epc_insn & B32M) == B32) { 150 + child->thread.ss_nextcnt = 2; 151 + tmp = epc_insn & 0x03FFFFFE; /* discard LK bit */ 152 + tmp2 = tmp & 0x3FF; 153 + tmp = (((tmp >> 16) & 0x3FF) << 10) | tmp2; /* 20bit */ 154 + tmp = tmp << 12; 155 + tmp = (unsigned int)((int) tmp >> 12); 156 + far_epc = epc + tmp; 157 + epc += 4; 158 + } else if ((epc_insn & BR32M) == BR32) { 159 + child->thread.ss_nextcnt = 2; 160 + tmp = (epc_insn >> 16) & 0x1F; 161 + far_epc = regs->regs[tmp]; 162 + epc += 4; 163 + } else 164 + epc += 4; 165 + } 166 + 167 + if (child->thread.ss_nextcnt == 1) { 168 + read_tsk_long(child, epc, &epc_insn); 169 + 170 + if (is_16bitinsn(epc_insn)) { 171 + write_tsk_short(child, epc, SINGLESTEP16_INSN); 172 + ninsn_type = 0; 173 + } else { 174 + write_tsk_long(child, epc, SINGLESTEP32_INSN); 175 + ninsn_type = 1; 176 + } 177 + 178 + if (ninsn_type == 0) { /* 16bits */ 179 + child->thread.insn1_type = 0; 180 + child->thread.addr1 = epc; 181 + /* the insn may have 32bit data */ 182 + child->thread.insn1 = (short)epc_insn; 183 + } else { 184 + child->thread.insn1_type = 1; 185 + child->thread.addr1 = epc; 186 + child->thread.insn1 = epc_insn; 187 + } 188 + } else { 189 + /* branch! have two target child->thread.ss_nextcnt=2 */ 190 + read_tsk_long(child, epc, &epc_insn); 191 + read_tsk_long(child, far_epc, &far_epc_insn); 192 + if (is_16bitinsn(epc_insn)) { 193 + write_tsk_short(child, epc, SINGLESTEP16_INSN); 194 + ninsn_type = 0; 195 + } else { 196 + write_tsk_long(child, epc, SINGLESTEP32_INSN); 197 + ninsn_type = 1; 198 + } 199 + 200 + if (ninsn_type == 0) { /* 16bits */ 201 + child->thread.insn1_type = 0; 202 + child->thread.addr1 = epc; 203 + /* the insn may have 32bit data */ 204 + child->thread.insn1 = (short)epc_insn; 205 + } else { 206 + child->thread.insn1_type = 1; 207 + child->thread.addr1 = epc; 208 + child->thread.insn1 = epc_insn; 209 + } 210 + 211 + if (is_16bitinsn(far_epc_insn)) { 212 + write_tsk_short(child, far_epc, SINGLESTEP16_INSN); 213 + ninsn_type = 0; 214 + } else { 215 + write_tsk_long(child, far_epc, SINGLESTEP32_INSN); 216 + ninsn_type = 1; 217 + } 218 + 219 + if (ninsn_type == 0) { /* 16bits */ 220 + child->thread.insn2_type = 0; 221 + child->thread.addr2 = far_epc; 222 + /* the insn may have 32bit data */ 223 + child->thread.insn2 = (short)far_epc_insn; 224 + } else { 225 + child->thread.insn2_type = 1; 226 + child->thread.addr2 = far_epc; 227 + child->thread.insn2 = far_epc_insn; 228 + } 229 + } 230 + } 231 + 232 + void user_disable_single_step(struct task_struct *child) 233 + { 234 + if (child->thread.insn1_type == 0) 235 + write_tsk_short(child, child->thread.addr1, 236 + child->thread.insn1); 237 + 238 + if (child->thread.insn1_type == 1) 239 + write_tsk_long(child, child->thread.addr1, 240 + child->thread.insn1); 241 + 242 + if (child->thread.ss_nextcnt == 2) { /* branch */ 243 + if (child->thread.insn1_type == 0) 244 + write_tsk_short(child, child->thread.addr1, 245 + child->thread.insn1); 246 + if (child->thread.insn1_type == 1) 247 + write_tsk_long(child, child->thread.addr1, 248 + child->thread.insn1); 249 + if (child->thread.insn2_type == 0) 250 + write_tsk_short(child, child->thread.addr2, 251 + child->thread.insn2); 252 + if (child->thread.insn2_type == 1) 253 + write_tsk_long(child, child->thread.addr2, 254 + child->thread.insn2); 255 + } 256 + 257 + child->thread.single_step = 0; 258 + child->thread.ss_nextcnt = 0; 259 + } 260 + 261 + void ptrace_disable(struct task_struct *child) 262 + { 263 + user_disable_single_step(child); 264 + } 265 + 266 + long 267 + arch_ptrace(struct task_struct *child, long request, long addr, long data) 268 + { 269 + int ret; 270 + unsigned long __user *datap = (void __user *)data; 271 + 272 + switch (request) { 273 + /* Read the word at location addr in the USER area. */ 274 + case PTRACE_PEEKUSR: { 275 + struct pt_regs *regs; 276 + unsigned long tmp; 277 + 278 + regs = task_pt_regs(child); 279 + 280 + tmp = 0; /* Default return value. */ 281 + switch (addr) { 282 + case 0 ... 31: 283 + tmp = regs->regs[addr]; 284 + break; 285 + case PC: 286 + tmp = regs->cp0_epc; 287 + break; 288 + case ECR: 289 + tmp = regs->cp0_ecr; 290 + break; 291 + case EMA: 292 + tmp = regs->cp0_ema; 293 + break; 294 + case CEH: 295 + tmp = regs->ceh; 296 + break; 297 + case CEL: 298 + tmp = regs->cel; 299 + break; 300 + case CONDITION: 301 + tmp = regs->cp0_condition; 302 + break; 303 + case PSR: 304 + tmp = regs->cp0_psr; 305 + break; 306 + case COUNTER: 307 + tmp = regs->sr0; 308 + break; 309 + case LDCR: 310 + tmp = regs->sr1; 311 + break; 312 + case STCR: 313 + tmp = regs->sr2; 314 + break; 315 + default: 316 + tmp = 0; 317 + return -EIO; 318 + } 319 + 320 + ret = put_user(tmp, (unsigned int __user *) datap); 321 + return ret; 322 + } 323 + 324 + case PTRACE_POKEUSR: { 325 + struct pt_regs *regs; 326 + ret = 0; 327 + regs = task_pt_regs(child); 328 + 329 + switch (addr) { 330 + case 0 ... 31: 331 + regs->regs[addr] = data; 332 + break; 333 + case PC: 334 + regs->cp0_epc = data; 335 + break; 336 + case CEH: 337 + regs->ceh = data; 338 + break; 339 + case CEL: 340 + regs->cel = data; 341 + break; 342 + case CONDITION: 343 + regs->cp0_condition = data; 344 + break; 345 + case PSR: 346 + case COUNTER: 347 + case STCR: 348 + case LDCR: 349 + break; /* user can't write the reg */ 350 + default: 351 + /* The rest are not allowed. */ 352 + ret = -EIO; 353 + break; 354 + } 355 + break; 356 + } 357 + 358 + case PTRACE_GETREGS: 359 + ret = ptrace_getregs(child, (void __user *)datap); 360 + break; 361 + 362 + case PTRACE_SETREGS: 363 + ret = ptrace_setregs(child, (void __user *)datap); 364 + break; 365 + 366 + default: 367 + ret = ptrace_request(child, request, addr, data); 368 + break; 369 + } 370 + 371 + return ret; 372 + } 373 + 374 + /* 375 + * Notification of system call entry/exit 376 + * - triggered by current->work.syscall_trace 377 + */ 378 + asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) 379 + { 380 + if (!(current->ptrace & PT_PTRACED)) 381 + return; 382 + 383 + if (!test_thread_flag(TIF_SYSCALL_TRACE)) 384 + return; 385 + 386 + /* The 0x80 provides a way for the tracing parent to distinguish 387 + between a syscall stop and SIGTRAP delivery. */ 388 + ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 389 + 0x80 : 0)); 390 + 391 + /* 392 + * this isn't the same as continuing with a signal, but it will do 393 + * for normal use. strace only continues with a signal if the 394 + * stopping signal is not SIGTRAP. -brl 395 + */ 396 + if (current->exit_code) { 397 + send_sig(current->exit_code, current, 1); 398 + current->exit_code = 0; 399 + } 400 + }
+157
arch/score/kernel/setup.c
··· 1 + /* 2 + * arch/score/kernel/setup.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Chen Liqin <liqin.chen@sunplusct.com> 8 + * Lennox Wu <lennox.wu@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/bootmem.h> 27 + #include <linux/initrd.h> 28 + #include <linux/ioport.h> 29 + #include <linux/seq_file.h> 30 + #include <linux/screen_info.h> 31 + 32 + #include <asm-generic/sections.h> 33 + 34 + struct screen_info screen_info; 35 + unsigned long kernelsp; 36 + 37 + static char command_line[COMMAND_LINE_SIZE]; 38 + static struct resource code_resource = { .name = "Kernel code",}; 39 + static struct resource data_resource = { .name = "Kernel data",}; 40 + 41 + static void __init bootmem_init(void) 42 + { 43 + unsigned long reserved_end, bootmap_size; 44 + unsigned long size = initrd_end - initrd_start; 45 + 46 + reserved_end = (unsigned long)_end; 47 + 48 + min_low_pfn = 0; 49 + max_low_pfn = MEM_SIZE / PAGE_SIZE; 50 + 51 + /* Initialize the boot-time allocator with low memory only. */ 52 + bootmap_size = init_bootmem_node(NODE_DATA(0), reserved_end, 53 + min_low_pfn, max_low_pfn); 54 + add_active_range(0, min_low_pfn, max_low_pfn); 55 + 56 + free_bootmem(PFN_PHYS(reserved_end), 57 + (max_low_pfn - reserved_end) << PAGE_SHIFT); 58 + memory_present(0, reserved_end, max_low_pfn); 59 + 60 + /* Reserve space for the bootmem bitmap. */ 61 + reserve_bootmem(PFN_PHYS(reserved_end), bootmap_size, BOOTMEM_DEFAULT); 62 + 63 + if (size == 0) { 64 + printk(KERN_INFO "Initrd not found or empty"); 65 + goto disable; 66 + } 67 + 68 + if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { 69 + printk(KERN_ERR "Initrd extends beyond end of memory"); 70 + goto disable; 71 + } 72 + 73 + /* Reserve space for the initrd bitmap. */ 74 + reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT); 75 + initrd_below_start_ok = 1; 76 + 77 + pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n", 78 + initrd_start, size); 79 + return; 80 + disable: 81 + printk(KERN_CONT " - disabling initrd\n"); 82 + initrd_start = 0; 83 + initrd_end = 0; 84 + } 85 + 86 + static void __init resource_init(void) 87 + { 88 + struct resource *res; 89 + 90 + code_resource.start = (unsigned long)_text; 91 + code_resource.end = (unsigned long)_etext - 1; 92 + data_resource.start = (unsigned long)_etext; 93 + data_resource.end = (unsigned long)_edata - 1; 94 + 95 + res = alloc_bootmem(sizeof(struct resource)); 96 + res->name = "System RAM"; 97 + res->start = 0; 98 + res->end = MEM_SIZE - 1; 99 + res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 100 + request_resource(&iomem_resource, res); 101 + 102 + request_resource(res, &code_resource); 103 + request_resource(res, &data_resource); 104 + } 105 + 106 + void __init setup_arch(char **cmdline_p) 107 + { 108 + randomize_va_space = 0; 109 + *cmdline_p = command_line; 110 + 111 + cpu_cache_init(); 112 + tlb_init(); 113 + bootmem_init(); 114 + paging_init(); 115 + resource_init(); 116 + } 117 + 118 + static int show_cpuinfo(struct seq_file *m, void *v) 119 + { 120 + unsigned long n = (unsigned long) v - 1; 121 + 122 + seq_printf(m, "processor\t\t: %ld\n", n); 123 + seq_printf(m, "\n"); 124 + 125 + return 0; 126 + } 127 + 128 + static void *c_start(struct seq_file *m, loff_t *pos) 129 + { 130 + unsigned long i = *pos; 131 + 132 + return i < 1 ? (void *) (i + 1) : NULL; 133 + } 134 + 135 + static void *c_next(struct seq_file *m, void *v, loff_t *pos) 136 + { 137 + ++*pos; 138 + return c_start(m, pos); 139 + } 140 + 141 + static void c_stop(struct seq_file *m, void *v) 142 + { 143 + } 144 + 145 + const struct seq_operations cpuinfo_op = { 146 + .start = c_start, 147 + .next = c_next, 148 + .stop = c_stop, 149 + .show = show_cpuinfo, 150 + }; 151 + 152 + static int __init topology_init(void) 153 + { 154 + return 0; 155 + } 156 + 157 + subsys_initcall(topology_init);
+361
arch/score/kernel/signal.c
··· 1 + /* 2 + * arch/score/kernel/signal.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Chen Liqin <liqin.chen@sunplusct.com> 8 + * Lennox Wu <lennox.wu@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/errno.h> 27 + #include <linux/signal.h> 28 + #include <linux/unistd.h> 29 + #include <linux/uaccess.h> 30 + 31 + #include <asm/syscalls.h> 32 + #include <asm/ucontext.h> 33 + 34 + #include <asm/cacheflush.h> 35 + 36 + #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 37 + 38 + struct rt_sigframe { 39 + u32 rs_ass[4]; /* argument save space */ 40 + u32 rs_code[2]; /* signal trampoline */ 41 + struct siginfo rs_info; 42 + struct ucontext rs_uc; 43 + }; 44 + 45 + static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 46 + { 47 + int err = 0; 48 + unsigned long reg; 49 + 50 + reg = regs->cp0_epc; err |= __put_user(reg, &sc->sc_pc); 51 + err |= __put_user(regs->cp0_psr, &sc->sc_psr); 52 + err |= __put_user(regs->cp0_condition, &sc->sc_condition); 53 + 54 + 55 + #define save_gp_reg(i) { \ 56 + reg = regs->regs[i]; \ 57 + err |= __put_user(reg, &sc->sc_regs[i]); \ 58 + } while (0) 59 + save_gp_reg(0); save_gp_reg(1); save_gp_reg(2); 60 + save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); 61 + save_gp_reg(6); save_gp_reg(7); save_gp_reg(8); 62 + save_gp_reg(9); save_gp_reg(10); save_gp_reg(11); 63 + save_gp_reg(12); save_gp_reg(13); save_gp_reg(14); 64 + save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); 65 + save_gp_reg(18); save_gp_reg(19); save_gp_reg(20); 66 + save_gp_reg(21); save_gp_reg(22); save_gp_reg(23); 67 + save_gp_reg(24); save_gp_reg(25); save_gp_reg(26); 68 + save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); 69 + #undef save_gp_reg 70 + 71 + reg = regs->ceh; err |= __put_user(reg, &sc->sc_mdceh); 72 + reg = regs->cel; err |= __put_user(reg, &sc->sc_mdcel); 73 + err |= __put_user(regs->cp0_ecr, &sc->sc_ecr); 74 + err |= __put_user(regs->cp0_ema, &sc->sc_ema); 75 + 76 + return err; 77 + } 78 + 79 + static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 80 + { 81 + int err = 0; 82 + u32 reg; 83 + 84 + err |= __get_user(regs->cp0_epc, &sc->sc_pc); 85 + err |= __get_user(regs->cp0_condition, &sc->sc_condition); 86 + 87 + err |= __get_user(reg, &sc->sc_mdceh); 88 + regs->ceh = (int) reg; 89 + err |= __get_user(reg, &sc->sc_mdcel); 90 + regs->cel = (int) reg; 91 + 92 + err |= __get_user(reg, &sc->sc_psr); 93 + regs->cp0_psr = (int) reg; 94 + err |= __get_user(reg, &sc->sc_ecr); 95 + regs->cp0_ecr = (int) reg; 96 + err |= __get_user(reg, &sc->sc_ema); 97 + regs->cp0_ema = (int) reg; 98 + 99 + #define restore_gp_reg(i) do { \ 100 + err |= __get_user(reg, &sc->sc_regs[i]); \ 101 + regs->regs[i] = reg; \ 102 + } while (0) 103 + restore_gp_reg(0); restore_gp_reg(1); restore_gp_reg(2); 104 + restore_gp_reg(3); restore_gp_reg(4); restore_gp_reg(5); 105 + restore_gp_reg(6); restore_gp_reg(7); restore_gp_reg(8); 106 + restore_gp_reg(9); restore_gp_reg(10); restore_gp_reg(11); 107 + restore_gp_reg(12); restore_gp_reg(13); restore_gp_reg(14); 108 + restore_gp_reg(15); restore_gp_reg(16); restore_gp_reg(17); 109 + restore_gp_reg(18); restore_gp_reg(19); restore_gp_reg(20); 110 + restore_gp_reg(21); restore_gp_reg(22); restore_gp_reg(23); 111 + restore_gp_reg(24); restore_gp_reg(25); restore_gp_reg(26); 112 + restore_gp_reg(27); restore_gp_reg(28); restore_gp_reg(29); 113 + #undef restore_gp_reg 114 + 115 + return err; 116 + } 117 + 118 + /* 119 + * Determine which stack to use.. 120 + */ 121 + static void __user *get_sigframe(struct k_sigaction *ka, 122 + struct pt_regs *regs, size_t frame_size) 123 + { 124 + unsigned long sp; 125 + 126 + /* Default to using normal stack */ 127 + sp = regs->regs[0]; 128 + sp -= 32; 129 + 130 + /* This is the X/Open sanctioned signal stack switching. */ 131 + if ((ka->sa.sa_flags & SA_ONSTACK) && (!on_sig_stack(sp))) 132 + sp = current->sas_ss_sp + current->sas_ss_size; 133 + 134 + return (void __user*)((sp - frame_size) & ~7); 135 + } 136 + 137 + asmlinkage long 138 + score_sigaltstack(struct pt_regs *regs) 139 + { 140 + const stack_t __user *uss = (const stack_t __user *) regs->regs[4]; 141 + stack_t __user *uoss = (stack_t __user *) regs->regs[5]; 142 + unsigned long usp = regs->regs[0]; 143 + 144 + return do_sigaltstack(uss, uoss, usp); 145 + } 146 + 147 + asmlinkage long 148 + score_rt_sigreturn(struct pt_regs *regs) 149 + { 150 + struct rt_sigframe __user *frame; 151 + sigset_t set; 152 + stack_t st; 153 + int sig; 154 + 155 + frame = (struct rt_sigframe __user *) regs->regs[0]; 156 + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 157 + goto badframe; 158 + if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) 159 + goto badframe; 160 + 161 + sigdelsetmask(&set, ~_BLOCKABLE); 162 + spin_lock_irq(&current->sighand->siglock); 163 + current->blocked = set; 164 + recalc_sigpending(); 165 + spin_unlock_irq(&current->sighand->siglock); 166 + 167 + sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext); 168 + if (sig < 0) 169 + goto badframe; 170 + else if (sig) 171 + force_sig(sig, current); 172 + 173 + if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st))) 174 + goto badframe; 175 + 176 + /* It is more difficult to avoid calling this function than to 177 + call it and ignore errors. */ 178 + do_sigaltstack((stack_t __user *)&st, NULL, regs->regs[0]); 179 + 180 + __asm__ __volatile__( 181 + "mv\tr0, %0\n\t" 182 + "la\tr8, syscall_exit\n\t" 183 + "br\tr8\n\t" 184 + : : "r" (regs) : "r8"); 185 + 186 + badframe: 187 + force_sig(SIGSEGV, current); 188 + 189 + return 0; 190 + } 191 + 192 + static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, 193 + int signr, sigset_t *set, siginfo_t *info) 194 + { 195 + struct rt_sigframe __user *frame; 196 + int err = 0; 197 + 198 + frame = get_sigframe(ka, regs, sizeof(*frame)); 199 + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 200 + goto give_sigsegv; 201 + 202 + /* 203 + * Set up the return code ... 204 + * 205 + * li v0, __NR_rt_sigreturn 206 + * syscall 207 + */ 208 + err |= __put_user(0x87788000 + __NR_rt_sigreturn*2, 209 + frame->rs_code + 0); 210 + err |= __put_user(0x80008002, frame->rs_code + 1); 211 + flush_cache_sigtramp((unsigned long) frame->rs_code); 212 + 213 + err |= copy_siginfo_to_user(&frame->rs_info, info); 214 + err |= __put_user(0, &frame->rs_uc.uc_flags); 215 + err |= __put_user(NULL, &frame->rs_uc.uc_link); 216 + err |= __put_user((void __user *)current->sas_ss_sp, 217 + &frame->rs_uc.uc_stack.ss_sp); 218 + err |= __put_user(sas_ss_flags(regs->regs[0]), 219 + &frame->rs_uc.uc_stack.ss_flags); 220 + err |= __put_user(current->sas_ss_size, 221 + &frame->rs_uc.uc_stack.ss_size); 222 + err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); 223 + err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); 224 + 225 + if (err) 226 + goto give_sigsegv; 227 + 228 + regs->regs[0] = (unsigned long) frame; 229 + regs->regs[3] = (unsigned long) frame->rs_code; 230 + regs->regs[4] = signr; 231 + regs->regs[5] = (unsigned long) &frame->rs_info; 232 + regs->regs[6] = (unsigned long) &frame->rs_uc; 233 + regs->regs[29] = (unsigned long) ka->sa.sa_handler; 234 + regs->cp0_epc = (unsigned long) ka->sa.sa_handler; 235 + 236 + return 0; 237 + 238 + give_sigsegv: 239 + if (signr == SIGSEGV) 240 + ka->sa.sa_handler = SIG_DFL; 241 + force_sig(SIGSEGV, current); 242 + return -EFAULT; 243 + } 244 + 245 + static int handle_signal(unsigned long sig, siginfo_t *info, 246 + struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) 247 + { 248 + int ret; 249 + 250 + if (regs->is_syscall) { 251 + switch (regs->regs[4]) { 252 + case ERESTART_RESTARTBLOCK: 253 + case ERESTARTNOHAND: 254 + regs->regs[4] = EINTR; 255 + break; 256 + case ERESTARTSYS: 257 + if (!(ka->sa.sa_flags & SA_RESTART)) { 258 + regs->regs[4] = EINTR; 259 + break; 260 + } 261 + case ERESTARTNOINTR: 262 + regs->regs[4] = regs->orig_r4; 263 + regs->regs[7] = regs->orig_r7; 264 + regs->cp0_epc -= 8; 265 + } 266 + 267 + regs->is_syscall = 0; 268 + } 269 + 270 + /* 271 + * Set up the stack frame 272 + */ 273 + ret = setup_rt_frame(ka, regs, sig, oldset, info); 274 + 275 + spin_lock_irq(&current->sighand->siglock); 276 + sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); 277 + if (!(ka->sa.sa_flags & SA_NODEFER)) 278 + sigaddset(&current->blocked, sig); 279 + recalc_sigpending(); 280 + spin_unlock_irq(&current->sighand->siglock); 281 + 282 + return ret; 283 + } 284 + 285 + static void do_signal(struct pt_regs *regs) 286 + { 287 + struct k_sigaction ka; 288 + sigset_t *oldset; 289 + siginfo_t info; 290 + int signr; 291 + 292 + /* 293 + * We want the common case to go fast, which is why we may in certain 294 + * cases get here from kernel mode. Just return without doing anything 295 + * if so. 296 + */ 297 + if (!user_mode(regs)) 298 + return; 299 + 300 + if (test_thread_flag(TIF_RESTORE_SIGMASK)) 301 + oldset = &current->saved_sigmask; 302 + else 303 + oldset = &current->blocked; 304 + 305 + signr = get_signal_to_deliver(&info, &ka, regs, NULL); 306 + if (signr > 0) { 307 + /* Actually deliver the signal. */ 308 + if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { 309 + /* 310 + * A signal was successfully delivered; the saved 311 + * sigmask will have been stored in the signal frame, 312 + * and will be restored by sigreturn, so we can simply 313 + * clear the TIF_RESTORE_SIGMASK flag. 314 + */ 315 + if (test_thread_flag(TIF_RESTORE_SIGMASK)) 316 + clear_thread_flag(TIF_RESTORE_SIGMASK); 317 + } 318 + 319 + return; 320 + } 321 + 322 + if (regs->is_syscall) { 323 + if (regs->regs[4] == ERESTARTNOHAND || 324 + regs->regs[4] == ERESTARTSYS || 325 + regs->regs[4] == ERESTARTNOINTR) { 326 + regs->regs[4] = regs->orig_r4; 327 + regs->regs[7] = regs->orig_r7; 328 + regs->cp0_epc -= 8; 329 + } 330 + 331 + if (regs->regs[4] == ERESTART_RESTARTBLOCK) { 332 + regs->regs[27] = __NR_restart_syscall; 333 + regs->regs[4] = regs->orig_r4; 334 + regs->regs[7] = regs->orig_r7; 335 + regs->cp0_epc -= 8; 336 + } 337 + 338 + regs->is_syscall = 0; /* Don't deal with this again. */ 339 + } 340 + 341 + /* 342 + * If there's no signal to deliver, we just put the saved sigmask 343 + * back 344 + */ 345 + if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 346 + clear_thread_flag(TIF_RESTORE_SIGMASK); 347 + sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 348 + } 349 + } 350 + 351 + /* 352 + * notification of userspace execution resumption 353 + * - triggered by the TIF_WORK_MASK flags 354 + */ 355 + asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, 356 + __u32 thread_info_flags) 357 + { 358 + /* deal with pending signal delivery */ 359 + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) 360 + do_signal(regs); 361 + }
+12
arch/score/kernel/sys_call_table.c
··· 1 + #include <linux/syscalls.h> 2 + #include <linux/signal.h> 3 + #include <linux/unistd.h> 4 + 5 + #include <asm/syscalls.h> 6 + 7 + #undef __SYSCALL 8 + #define __SYSCALL(nr, call) [nr] = (call), 9 + 10 + void *sys_call_table[__NR_syscalls] = { 11 + #include <asm/unistd.h> 12 + };
+131
arch/score/kernel/sys_score.c
··· 1 + /* 2 + * arch/score/kernel/syscall.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Chen Liqin <liqin.chen@sunplusct.com> 8 + * Lennox Wu <lennox.wu@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/file.h> 27 + #include <linux/fs.h> 28 + #include <linux/mman.h> 29 + #include <linux/module.h> 30 + #include <linux/unistd.h> 31 + #include <linux/syscalls.h> 32 + #include <asm/syscalls.h> 33 + 34 + asmlinkage long 35 + sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 36 + unsigned long flags, unsigned long fd, unsigned long pgoff) 37 + { 38 + int error = -EBADF; 39 + struct file *file = NULL; 40 + 41 + if (pgoff & (~PAGE_MASK >> 12)) 42 + return -EINVAL; 43 + 44 + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 45 + if (!(flags & MAP_ANONYMOUS)) { 46 + file = fget(fd); 47 + if (!file) 48 + return error; 49 + } 50 + 51 + down_write(&current->mm->mmap_sem); 52 + error = do_mmap_pgoff(file, addr, len, prot, flags, 53 + pgoff >> (PAGE_SHIFT - 12)); 54 + up_write(&current->mm->mmap_sem); 55 + 56 + if (file) 57 + fput(file); 58 + 59 + return error; 60 + } 61 + 62 + /* 63 + * Clone a task - this clones the calling program thread. 64 + * This is called indirectly via a small wrapper 65 + */ 66 + asmlinkage long 67 + score_clone(struct pt_regs *regs) 68 + { 69 + unsigned long clone_flags; 70 + unsigned long newsp; 71 + int __user *parent_tidptr, *child_tidptr; 72 + 73 + clone_flags = regs->regs[4]; 74 + newsp = regs->regs[5]; 75 + if (!newsp) 76 + newsp = regs->regs[0]; 77 + parent_tidptr = (int __user *)regs->regs[6]; 78 + child_tidptr = (int __user *)regs->regs[8]; 79 + 80 + return do_fork(clone_flags, newsp, regs, 0, 81 + parent_tidptr, child_tidptr); 82 + } 83 + 84 + /* 85 + * sys_execve() executes a new program. 86 + * This is called indirectly via a small wrapper 87 + */ 88 + asmlinkage long 89 + score_execve(struct pt_regs *regs) 90 + { 91 + int error; 92 + char *filename; 93 + 94 + filename = getname((char __user*)regs->regs[4]); 95 + error = PTR_ERR(filename); 96 + if (IS_ERR(filename)) 97 + return error; 98 + 99 + error = do_execve(filename, (char __user *__user*)regs->regs[5], 100 + (char __user *__user *) regs->regs[6], regs); 101 + 102 + putname(filename); 103 + return error; 104 + } 105 + 106 + /* 107 + * Do a system call from kernel instead of calling sys_execve so we 108 + * end up with proper pt_regs. 109 + */ 110 + int kernel_execve(const char *filename, char *const argv[], char *const envp[]) 111 + { 112 + register unsigned long __r4 asm("r4") = (unsigned long) filename; 113 + register unsigned long __r5 asm("r5") = (unsigned long) argv; 114 + register unsigned long __r6 asm("r6") = (unsigned long) envp; 115 + register unsigned long __r7 asm("r7"); 116 + 117 + __asm__ __volatile__ (" \n" 118 + "ldi r27, %5 \n" 119 + "syscall \n" 120 + "mv %0, r4 \n" 121 + "mv %1, r7 \n" 122 + : "=&r" (__r4), "=r" (__r7) 123 + : "r" (__r4), "r" (__r5), "r" (__r6), "i" (__NR_execve) 124 + : "r8", "r9", "r10", "r11", "r22", "r23", "r24", "r25", 125 + "r26", "r27", "memory"); 126 + 127 + if (__r7 == 0) 128 + return __r4; 129 + 130 + return -__r4; 131 + }
+99
arch/score/kernel/time.c
··· 1 + /* 2 + * arch/score/kernel/time.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Chen Liqin <liqin.chen@sunplusct.com> 8 + * Lennox Wu <lennox.wu@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/clockchips.h> 27 + #include <linux/interrupt.h> 28 + 29 + #include <asm/scoreregs.h> 30 + 31 + static irqreturn_t timer_interrupt(int irq, void *dev_id) 32 + { 33 + struct clock_event_device *evdev = dev_id; 34 + 35 + /* clear timer interrupt flag */ 36 + outl(1, P_TIMER0_CPP_REG); 37 + evdev->event_handler(evdev); 38 + 39 + return IRQ_HANDLED; 40 + } 41 + 42 + static struct irqaction timer_irq = { 43 + .handler = timer_interrupt, 44 + .flags = IRQF_DISABLED | IRQF_TIMER, 45 + .name = "timer", 46 + }; 47 + 48 + static int score_timer_set_next_event(unsigned long delta, 49 + struct clock_event_device *evdev) 50 + { 51 + outl((TMR_M_PERIODIC | TMR_IE_ENABLE), P_TIMER0_CTRL); 52 + outl(delta, P_TIMER0_PRELOAD); 53 + outl(inl(P_TIMER0_CTRL) | TMR_ENABLE, P_TIMER0_CTRL); 54 + 55 + return 0; 56 + } 57 + 58 + static void score_timer_set_mode(enum clock_event_mode mode, 59 + struct clock_event_device *evdev) 60 + { 61 + switch (mode) { 62 + case CLOCK_EVT_MODE_PERIODIC: 63 + outl((TMR_M_PERIODIC | TMR_IE_ENABLE), P_TIMER0_CTRL); 64 + outl(SYSTEM_CLOCK/HZ, P_TIMER0_PRELOAD); 65 + outl(inl(P_TIMER0_CTRL) | TMR_ENABLE, P_TIMER0_CTRL); 66 + break; 67 + case CLOCK_EVT_MODE_ONESHOT: 68 + case CLOCK_EVT_MODE_SHUTDOWN: 69 + case CLOCK_EVT_MODE_RESUME: 70 + case CLOCK_EVT_MODE_UNUSED: 71 + break; 72 + default: 73 + BUG(); 74 + } 75 + } 76 + 77 + static struct clock_event_device score_clockevent = { 78 + .name = "score_clockevent", 79 + .features = CLOCK_EVT_FEAT_PERIODIC, 80 + .shift = 16, 81 + .set_next_event = score_timer_set_next_event, 82 + .set_mode = score_timer_set_mode, 83 + }; 84 + 85 + void __init time_init(void) 86 + { 87 + timer_irq.dev_id = &score_clockevent; 88 + setup_irq(IRQ_TIMER , &timer_irq); 89 + 90 + /* setup COMPARE clockevent */ 91 + score_clockevent.mult = div_sc(SYSTEM_CLOCK, NSEC_PER_SEC, 92 + score_clockevent.shift); 93 + score_clockevent.max_delta_ns = clockevent_delta2ns((u32)~0, 94 + &score_clockevent); 95 + score_clockevent.min_delta_ns = clockevent_delta2ns(50, 96 + &score_clockevent) + 1; 97 + score_clockevent.cpumask = cpumask_of(0); 98 + clockevents_register_device(&score_clockevent); 99 + }
+349
arch/score/kernel/traps.c
··· 1 + /* 2 + * arch/score/kernel/traps.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Chen Liqin <liqin.chen@sunplusct.com> 8 + * Lennox Wu <lennox.wu@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/module.h> 27 + #include <linux/sched.h> 28 + 29 + #include <asm/cacheflush.h> 30 + #include <asm/irq.h> 31 + #include <asm/irq_regs.h> 32 + 33 + unsigned long exception_handlers[32]; 34 + 35 + /* 36 + * The architecture-independent show_stack generator 37 + */ 38 + void show_stack(struct task_struct *task, unsigned long *sp) 39 + { 40 + int i; 41 + long stackdata; 42 + 43 + sp = sp ? sp : (unsigned long *)&sp; 44 + 45 + printk(KERN_NOTICE "Stack: "); 46 + i = 1; 47 + while ((long) sp & (PAGE_SIZE - 1)) { 48 + if (i && ((i % 8) == 0)) 49 + printk(KERN_NOTICE "\n"); 50 + if (i > 40) { 51 + printk(KERN_NOTICE " ..."); 52 + break; 53 + } 54 + 55 + if (__get_user(stackdata, sp++)) { 56 + printk(KERN_NOTICE " (Bad stack address)"); 57 + break; 58 + } 59 + 60 + printk(KERN_NOTICE " %08lx", stackdata); 61 + i++; 62 + } 63 + printk(KERN_NOTICE "\n"); 64 + } 65 + 66 + static void show_trace(long *sp) 67 + { 68 + int i; 69 + long addr; 70 + 71 + sp = sp ? sp : (long *) &sp; 72 + 73 + printk(KERN_NOTICE "Call Trace: "); 74 + i = 1; 75 + while ((long) sp & (PAGE_SIZE - 1)) { 76 + if (__get_user(addr, sp++)) { 77 + if (i && ((i % 6) == 0)) 78 + printk(KERN_NOTICE "\n"); 79 + printk(KERN_NOTICE " (Bad stack address)\n"); 80 + break; 81 + } 82 + 83 + if (kernel_text_address(addr)) { 84 + if (i && ((i % 6) == 0)) 85 + printk(KERN_NOTICE "\n"); 86 + if (i > 40) { 87 + printk(KERN_NOTICE " ..."); 88 + break; 89 + } 90 + 91 + printk(KERN_NOTICE " [<%08lx>]", addr); 92 + i++; 93 + } 94 + } 95 + printk(KERN_NOTICE "\n"); 96 + } 97 + 98 + static void show_code(unsigned int *pc) 99 + { 100 + long i; 101 + 102 + printk(KERN_NOTICE "\nCode:"); 103 + 104 + for (i = -3; i < 6; i++) { 105 + unsigned long insn; 106 + if (__get_user(insn, pc + i)) { 107 + printk(KERN_NOTICE " (Bad address in epc)\n"); 108 + break; 109 + } 110 + printk(KERN_NOTICE "%c%08lx%c", (i ? ' ' : '<'), 111 + insn, (i ? ' ' : '>')); 112 + } 113 + } 114 + 115 + /* 116 + * FIXME: really the generic show_regs should take a const pointer argument. 117 + */ 118 + void show_regs(struct pt_regs *regs) 119 + { 120 + printk("r0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 121 + regs->regs[0], regs->regs[1], regs->regs[2], regs->regs[3], 122 + regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); 123 + printk("r8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 124 + regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11], 125 + regs->regs[12], regs->regs[13], regs->regs[14], regs->regs[15]); 126 + printk("r16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 127 + regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19], 128 + regs->regs[20], regs->regs[21], regs->regs[22], regs->regs[23]); 129 + printk("r24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 130 + regs->regs[24], regs->regs[25], regs->regs[26], regs->regs[27], 131 + regs->regs[28], regs->regs[29], regs->regs[30], regs->regs[31]); 132 + 133 + printk("CEH : %08lx\n", regs->ceh); 134 + printk("CEL : %08lx\n", regs->cel); 135 + 136 + printk("EMA:%08lx, epc:%08lx %s\nPSR: %08lx\nECR:%08lx\nCondition : %08lx\n", 137 + regs->cp0_ema, regs->cp0_epc, print_tainted(), regs->cp0_psr, 138 + regs->cp0_ecr, regs->cp0_condition); 139 + } 140 + 141 + static void show_registers(struct pt_regs *regs) 142 + { 143 + show_regs(regs); 144 + printk(KERN_NOTICE "Process %s (pid: %d, stackpage=%08lx)\n", 145 + current->comm, current->pid, (unsigned long) current); 146 + show_stack(current_thread_info()->task, (long *) regs->regs[0]); 147 + show_trace((long *) regs->regs[0]); 148 + show_code((unsigned int *) regs->cp0_epc); 149 + printk(KERN_NOTICE "\n"); 150 + } 151 + 152 + /* 153 + * The architecture-independent dump_stack generator 154 + */ 155 + void dump_stack(void) 156 + { 157 + show_stack(current_thread_info()->task, 158 + (long *) get_irq_regs()->regs[0]); 159 + } 160 + EXPORT_SYMBOL(dump_stack); 161 + 162 + void __die(const char *str, struct pt_regs *regs, const char *file, 163 + const char *func, unsigned long line) 164 + { 165 + console_verbose(); 166 + printk("%s", str); 167 + if (file && func) 168 + printk(" in %s:%s, line %ld", file, func, line); 169 + printk(":\n"); 170 + show_registers(regs); 171 + do_exit(SIGSEGV); 172 + } 173 + 174 + void __die_if_kernel(const char *str, struct pt_regs *regs, 175 + const char *file, const char *func, unsigned long line) 176 + { 177 + if (!user_mode(regs)) 178 + __die(str, regs, file, func, line); 179 + } 180 + 181 + asmlinkage void do_adelinsn(struct pt_regs *regs) 182 + { 183 + printk("do_ADE-linsn:ema:0x%08lx:epc:0x%08lx\n", 184 + regs->cp0_ema, regs->cp0_epc); 185 + die_if_kernel("do_ade execution Exception\n", regs); 186 + force_sig(SIGBUS, current); 187 + } 188 + 189 + asmlinkage void do_adedata(struct pt_regs *regs) 190 + { 191 + const struct exception_table_entry *fixup; 192 + fixup = search_exception_tables(regs->cp0_epc); 193 + if (fixup) { 194 + regs->cp0_epc = fixup->fixup; 195 + return; 196 + } 197 + printk("do_ADE-data:ema:0x%08lx:epc:0x%08lx\n", 198 + regs->cp0_ema, regs->cp0_epc); 199 + die_if_kernel("do_ade execution Exception\n", regs); 200 + force_sig(SIGBUS, current); 201 + } 202 + 203 + asmlinkage void do_pel(struct pt_regs *regs) 204 + { 205 + die_if_kernel("do_pel execution Exception", regs); 206 + force_sig(SIGFPE, current); 207 + } 208 + 209 + asmlinkage void do_cee(struct pt_regs *regs) 210 + { 211 + die_if_kernel("do_cee execution Exception", regs); 212 + force_sig(SIGFPE, current); 213 + } 214 + 215 + asmlinkage void do_cpe(struct pt_regs *regs) 216 + { 217 + die_if_kernel("do_cpe execution Exception", regs); 218 + force_sig(SIGFPE, current); 219 + } 220 + 221 + asmlinkage void do_be(struct pt_regs *regs) 222 + { 223 + die_if_kernel("do_be execution Exception", regs); 224 + force_sig(SIGBUS, current); 225 + } 226 + 227 + asmlinkage void do_ov(struct pt_regs *regs) 228 + { 229 + siginfo_t info; 230 + 231 + die_if_kernel("do_ov execution Exception", regs); 232 + 233 + info.si_code = FPE_INTOVF; 234 + info.si_signo = SIGFPE; 235 + info.si_errno = 0; 236 + info.si_addr = (void *)regs->cp0_epc; 237 + force_sig_info(SIGFPE, &info, current); 238 + } 239 + 240 + asmlinkage void do_tr(struct pt_regs *regs) 241 + { 242 + die_if_kernel("do_tr execution Exception", regs); 243 + force_sig(SIGTRAP, current); 244 + } 245 + 246 + asmlinkage void do_ri(struct pt_regs *regs) 247 + { 248 + unsigned long epc_insn; 249 + unsigned long epc = regs->cp0_epc; 250 + 251 + read_tsk_long(current, epc, &epc_insn); 252 + if (current->thread.single_step == 1) { 253 + if ((epc == current->thread.addr1) || 254 + (epc == current->thread.addr2)) { 255 + user_disable_single_step(current); 256 + force_sig(SIGTRAP, current); 257 + return; 258 + } else 259 + BUG(); 260 + } else if ((epc_insn == BREAKPOINT32_INSN) || 261 + ((epc_insn & 0x0000FFFF) == 0x7002) || 262 + ((epc_insn & 0xFFFF0000) == 0x70020000)) { 263 + force_sig(SIGTRAP, current); 264 + return; 265 + } else { 266 + die_if_kernel("do_ri execution Exception", regs); 267 + force_sig(SIGILL, current); 268 + } 269 + } 270 + 271 + asmlinkage void do_ccu(struct pt_regs *regs) 272 + { 273 + die_if_kernel("do_ccu execution Exception", regs); 274 + force_sig(SIGILL, current); 275 + } 276 + 277 + asmlinkage void do_reserved(struct pt_regs *regs) 278 + { 279 + /* 280 + * Game over - no way to handle this if it ever occurs. Most probably 281 + * caused by a new unknown cpu type or after another deadly 282 + * hard/software error. 283 + */ 284 + die_if_kernel("do_reserved execution Exception", regs); 285 + show_regs(regs); 286 + panic("Caught reserved exception - should not happen."); 287 + } 288 + 289 + /* 290 + * NMI exception handler. 291 + */ 292 + void nmi_exception_handler(struct pt_regs *regs) 293 + { 294 + die_if_kernel("nmi_exception_handler execution Exception", regs); 295 + die("NMI", regs); 296 + } 297 + 298 + /* Install CPU exception handler */ 299 + void *set_except_vector(int n, void *addr) 300 + { 301 + unsigned long handler = (unsigned long) addr; 302 + unsigned long old_handler = exception_handlers[n]; 303 + 304 + exception_handlers[n] = handler; 305 + return (void *)old_handler; 306 + } 307 + 308 + void __init trap_init(void) 309 + { 310 + int i; 311 + 312 + pgd_current = (unsigned long)init_mm.pgd; 313 + /* DEBUG EXCEPTION */ 314 + memcpy((void *)DEBUG_VECTOR_BASE_ADDR, 315 + &debug_exception_vector, DEBUG_VECTOR_SIZE); 316 + /* NMI EXCEPTION */ 317 + memcpy((void *)GENERAL_VECTOR_BASE_ADDR, 318 + &general_exception_vector, GENERAL_VECTOR_SIZE); 319 + 320 + /* 321 + * Initialise exception handlers 322 + */ 323 + for (i = 0; i <= 31; i++) 324 + set_except_vector(i, handle_reserved); 325 + 326 + set_except_vector(1, handle_nmi); 327 + set_except_vector(2, handle_adelinsn); 328 + set_except_vector(3, handle_tlb_refill); 329 + set_except_vector(4, handle_tlb_invaild); 330 + set_except_vector(5, handle_ibe); 331 + set_except_vector(6, handle_pel); 332 + set_except_vector(7, handle_sys); 333 + set_except_vector(8, handle_ccu); 334 + set_except_vector(9, handle_ri); 335 + set_except_vector(10, handle_tr); 336 + set_except_vector(11, handle_adedata); 337 + set_except_vector(12, handle_adedata); 338 + set_except_vector(13, handle_tlb_refill); 339 + set_except_vector(14, handle_tlb_invaild); 340 + set_except_vector(15, handle_mod); 341 + set_except_vector(16, handle_cee); 342 + set_except_vector(17, handle_cpe); 343 + set_except_vector(18, handle_dbe); 344 + flush_icache_range(DEBUG_VECTOR_BASE_ADDR, IRQ_VECTOR_BASE_ADDR); 345 + 346 + atomic_inc(&init_mm.mm_count); 347 + current->active_mm = &init_mm; 348 + cpu_cache_init(); 349 + }
+148
arch/score/kernel/vmlinux.lds.S
··· 1 + /* 2 + * arch/score/kernel/vmlinux.lds.S 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Chen Liqin <liqin.chen@sunplusct.com> 8 + * Lennox Wu <lennox.wu@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <asm-generic/vmlinux.lds.h> 27 + 28 + OUTPUT_ARCH(score) 29 + ENTRY(_stext) 30 + 31 + jiffies = jiffies_64; 32 + 33 + SECTIONS 34 + { 35 + . = CONFIG_MEMORY_START + 0x2000; 36 + /* read-only */ 37 + .text : { 38 + _text = .; /* Text and read-only data */ 39 + TEXT_TEXT 40 + SCHED_TEXT 41 + LOCK_TEXT 42 + KPROBES_TEXT 43 + *(.text.*) 44 + *(.fixup) 45 + . = ALIGN (4) ; 46 + _etext = .; /* End of text section */ 47 + } 48 + 49 + . = ALIGN(16); 50 + RODATA 51 + 52 + /* Exception table */ 53 + . = ALIGN(16); 54 + __ex_table : { 55 + __start___ex_table = .; 56 + *(__ex_table) 57 + __stop___ex_table = .; 58 + } 59 + 60 + /* writeable */ 61 + .data ALIGN (4096): { 62 + *(.data.init_task) 63 + 64 + DATA_DATA 65 + CONSTRUCTORS 66 + } 67 + 68 + /* We want the small data sections together, so single-instruction offsets 69 + can access them all, and initialized data all before uninitialized, so 70 + we can shorten the on-disk segment size. */ 71 + . = ALIGN(8); 72 + .sdata : { 73 + *(.sdata) 74 + } 75 + 76 + . = ALIGN(32); 77 + .data.cacheline_aligned : { 78 + *(.data.cacheline_aligned) 79 + } 80 + _edata = .; /* End of data section */ 81 + 82 + /* will be freed after init */ 83 + . = ALIGN(4096); /* Init code and data */ 84 + __init_begin = .; 85 + 86 + . = ALIGN(4096); 87 + .init.text : { 88 + _sinittext = .; 89 + INIT_TEXT 90 + _einittext = .; 91 + } 92 + .init.data : { 93 + INIT_DATA 94 + } 95 + . = ALIGN(16); 96 + .init.setup : { 97 + __setup_start = .; 98 + *(.init.setup) 99 + __setup_end = .; 100 + } 101 + 102 + .initcall.init : { 103 + __initcall_start = .; 104 + INITCALLS 105 + __initcall_end = .; 106 + } 107 + 108 + .con_initcall.init : { 109 + __con_initcall_start = .; 110 + *(.con_initcall.init) 111 + __con_initcall_end = .; 112 + } 113 + SECURITY_INIT 114 + 115 + /* .exit.text is discarded at runtime, not link time, to deal with 116 + * references from .rodata 117 + */ 118 + .exit.text : { 119 + EXIT_TEXT 120 + } 121 + .exit.data : { 122 + EXIT_DATA 123 + } 124 + #if defined(CONFIG_BLK_DEV_INITRD) 125 + .init.ramfs ALIGN(4096): { 126 + __initramfs_start = .; 127 + *(.init.ramfs) 128 + __initramfs_end = .; 129 + . = ALIGN(4); 130 + LONG(0); 131 + } 132 + #endif 133 + . = ALIGN(4096); 134 + __init_end = .; 135 + /* freed after init ends here */ 136 + 137 + __bss_start = .; /* BSS */ 138 + .sbss : { 139 + *(.sbss) 140 + *(.scommon) 141 + } 142 + .bss : { 143 + *(.bss) 144 + *(COMMON) 145 + } 146 + __bss_stop = .; 147 + _end = .; 148 + }
+8
arch/score/lib/Makefile
··· 1 + # 2 + # Makefile for SCORE-specific library files.. 3 + # 4 + 5 + lib-y += string.o checksum.o checksum_copy.o 6 + 7 + # libgcc-style stuff needed in the kernel 8 + obj-y += ashldi3.o ashrdi3.o cmpdi2.o lshrdi3.o ucmpdi2.o
+46
arch/score/lib/ashldi3.c
··· 1 + /* 2 + * arch/score/lib/ashldi3.c 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, see the file COPYING, or write 16 + * to the Free Software Foundation, Inc., 17 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 + */ 19 + 20 + #include <linux/module.h> 21 + #include "libgcc.h" 22 + 23 + long long __ashldi3(long long u, word_type b) 24 + { 25 + DWunion uu, w; 26 + word_type bm; 27 + 28 + if (b == 0) 29 + return u; 30 + 31 + uu.ll = u; 32 + bm = 32 - b; 33 + 34 + if (bm <= 0) { 35 + w.s.low = 0; 36 + w.s.high = (unsigned int) uu.s.low << -bm; 37 + } else { 38 + const unsigned int carries = (unsigned int) uu.s.low >> bm; 39 + 40 + w.s.low = (unsigned int) uu.s.low << b; 41 + w.s.high = ((unsigned int) uu.s.high << b) | carries; 42 + } 43 + 44 + return w.ll; 45 + } 46 + EXPORT_SYMBOL(__ashldi3);
+48
arch/score/lib/ashrdi3.c
··· 1 + /* 2 + * arch/score/lib/ashrdi3.c 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, see the file COPYING, or write 16 + * to the Free Software Foundation, Inc., 17 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 + */ 19 + 20 + #include <linux/module.h> 21 + #include "libgcc.h" 22 + 23 + long long __ashrdi3(long long u, word_type b) 24 + { 25 + DWunion uu, w; 26 + word_type bm; 27 + 28 + if (b == 0) 29 + return u; 30 + 31 + uu.ll = u; 32 + bm = 32 - b; 33 + 34 + if (bm <= 0) { 35 + /* w.s.high = 1..1 or 0..0 */ 36 + w.s.high = 37 + uu.s.high >> 31; 38 + w.s.low = uu.s.high >> -bm; 39 + } else { 40 + const unsigned int carries = (unsigned int) uu.s.high << bm; 41 + 42 + w.s.high = uu.s.high >> b; 43 + w.s.low = ((unsigned int) uu.s.low >> b) | carries; 44 + } 45 + 46 + return w.ll; 47 + } 48 + EXPORT_SYMBOL(__ashrdi3);
+255
arch/score/lib/checksum.S
··· 1 + /* 2 + * arch/score/lib/csum_partial.S 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Lennox Wu <lennox.wu@sunplusct.com> 8 + * Chen Liqin <liqin.chen@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + #include <linux/linkage.h> 26 + 27 + #define ADDC(sum,reg) \ 28 + add sum, sum, reg; \ 29 + cmp.c reg, sum; \ 30 + bleu 9f; \ 31 + addi sum, 0x1; \ 32 + 9: 33 + 34 + #define CSUM_BIGCHUNK(src, offset, sum) \ 35 + lw r8, [src, offset + 0x00]; \ 36 + lw r9, [src, offset + 0x04]; \ 37 + lw r10, [src, offset + 0x08]; \ 38 + lw r11, [src, offset + 0x0c]; \ 39 + ADDC(sum, r8); \ 40 + ADDC(sum, r9); \ 41 + ADDC(sum, r10); \ 42 + ADDC(sum, r11); \ 43 + lw r8, [src, offset + 0x10]; \ 44 + lw r9, [src, offset + 0x14]; \ 45 + lw r10, [src, offset + 0x18]; \ 46 + lw r11, [src, offset + 0x1c]; \ 47 + ADDC(sum, r8); \ 48 + ADDC(sum, r9); \ 49 + ADDC(sum, r10); \ 50 + ADDC(sum, r11); \ 51 + 52 + #define src r4 53 + #define dest r5 54 + #define sum r27 55 + 56 + .text 57 + /* unknown src alignment and < 8 bytes to go */ 58 + small_csumcpy: 59 + mv r5, r10 60 + ldi r9, 0x0 61 + cmpi.c r25, 0x1 62 + beq pass_small_set_t7 /*already set, jump to pass_small_set_t7*/ 63 + andri.c r25,r4 , 0x1 /*Is src 2 bytes aligned?*/ 64 + 65 + pass_small_set_t7: 66 + beq aligned 67 + cmpi.c r5, 0x0 68 + beq fold 69 + lbu r9, [src] 70 + slli r9,r9, 0x8 /*Little endian*/ 71 + ADDC(sum, r9) 72 + addi src, 0x1 73 + subi.c r5, 0x1 74 + 75 + /*len still a full word */ 76 + aligned: 77 + andri.c r8, r5, 0x4 /*Len >= 4?*/ 78 + beq len_less_4bytes 79 + 80 + /* Still a full word (4byte) to go,and the src is word aligned.*/ 81 + andri.c r8, src, 0x3 /*src is 4bytes aligned, so use LW!!*/ 82 + beq four_byte_aligned 83 + lhu r9, [src] 84 + addi src, 2 85 + ADDC(sum, r9) 86 + lhu r9, [src] 87 + addi src, 2 88 + ADDC(sum, r9) 89 + b len_less_4bytes 90 + 91 + four_byte_aligned: /* Len >=4 and four byte aligned */ 92 + lw r9, [src] 93 + addi src, 4 94 + ADDC(sum, r9) 95 + 96 + len_less_4bytes: /* 2 byte aligned aligned and length<4B */ 97 + andri.c r8, r5, 0x2 98 + beq len_less_2bytes 99 + lhu r9, [src] 100 + addi src, 0x2 /* src+=2 */ 101 + ADDC(sum, r9) 102 + 103 + len_less_2bytes: /* len = 1 */ 104 + andri.c r8, r5, 0x1 105 + beq fold /* less than 2 and not equal 1--> len=0 -> fold */ 106 + lbu r9, [src] 107 + 108 + fold_ADDC: 109 + ADDC(sum, r9) 110 + fold: 111 + /* fold checksum */ 112 + slli r26, sum, 16 113 + add sum, sum, r26 114 + cmp.c r26, sum 115 + srli sum, sum, 16 116 + bleu 1f /* if r26<=sum */ 117 + addi sum, 0x1 /* r26>sum */ 118 + 1: 119 + /* odd buffer alignment? r25 was set in csum_partial */ 120 + cmpi.c r25, 0x0 121 + beq 1f 122 + slli r26, sum, 8 123 + srli sum, sum, 8 124 + or sum, sum, r26 125 + andi sum, 0xffff 126 + 1: 127 + .set optimize 128 + /* Add the passed partial csum. */ 129 + ADDC(sum, r6) 130 + mv r4, sum 131 + br r3 132 + .set volatile 133 + 134 + .align 5 135 + ENTRY(csum_partial) 136 + ldi sum, 0 137 + ldi r25, 0 138 + mv r10, r5 139 + cmpi.c r5, 0x8 140 + blt small_csumcpy /* < 8(singed) bytes to copy */ 141 + cmpi.c r5, 0x0 142 + beq out 143 + andri.c r25, src, 0x1 /* odd buffer? */ 144 + 145 + beq word_align 146 + hword_align: /* 1 byte */ 147 + lbu r8, [src] 148 + subi r5, 0x1 149 + slli r8, r8, 8 150 + ADDC(sum, r8) 151 + addi src, 0x1 152 + 153 + word_align: /* 2 bytes */ 154 + andri.c r8, src, 0x2 /* 4bytes(dword)_aligned? */ 155 + beq dword_align /* not, maybe dword_align */ 156 + lhu r8, [src] 157 + subi r5, 0x2 158 + ADDC(sum, r8) 159 + addi src, 0x2 160 + 161 + dword_align: /* 4bytes */ 162 + mv r26, r5 /* maybe useless when len >=56 */ 163 + ldi r8, 56 164 + cmp.c r8, r5 165 + bgtu do_end_words /* if a1(len)<t0(56) ,unsigned */ 166 + andri.c r26, src, 0x4 167 + beq qword_align 168 + lw r8, [src] 169 + subi r5, 0x4 170 + ADDC(sum, r8) 171 + addi src, 0x4 172 + 173 + qword_align: /* 8 bytes */ 174 + andri.c r26, src, 0x8 175 + beq oword_align 176 + lw r8, [src, 0x0] 177 + lw r9, [src, 0x4] 178 + subi r5, 0x8 /* len-=0x8 */ 179 + ADDC(sum, r8) 180 + ADDC(sum, r9) 181 + addi src, 0x8 182 + 183 + oword_align: /* 16bytes */ 184 + andri.c r26, src, 0x10 185 + beq begin_movement 186 + lw r10, [src, 0x08] 187 + lw r11, [src, 0x0c] 188 + lw r8, [src, 0x00] 189 + lw r9, [src, 0x04] 190 + ADDC(sum, r10) 191 + ADDC(sum, r11) 192 + ADDC(sum, r8) 193 + ADDC(sum, r9) 194 + subi r5, 0x10 195 + addi src, 0x10 196 + 197 + begin_movement: 198 + srli.c r26, r5, 0x7 /* len>=128? */ 199 + beq 1f /* len<128 */ 200 + 201 + /* r26 is the result that computed in oword_align */ 202 + move_128bytes: 203 + CSUM_BIGCHUNK(src, 0x00, sum) 204 + CSUM_BIGCHUNK(src, 0x20, sum) 205 + CSUM_BIGCHUNK(src, 0x40, sum) 206 + CSUM_BIGCHUNK(src, 0x60, sum) 207 + subi.c r26, 0x01 /* r26 equals len/128 */ 208 + addi src, 0x80 209 + bne move_128bytes 210 + 211 + 1: /* len<128,we process 64byte here */ 212 + andri.c r10, r5, 0x40 213 + beq 1f 214 + 215 + move_64bytes: 216 + CSUM_BIGCHUNK(src, 0x00, sum) 217 + CSUM_BIGCHUNK(src, 0x20, sum) 218 + addi src, 0x40 219 + 220 + 1: /* len<64 */ 221 + andri r26, r5, 0x1c /* 0x1c=28 */ 222 + andri.c r10, r5, 0x20 223 + beq do_end_words /* decided by andri */ 224 + 225 + move_32bytes: 226 + CSUM_BIGCHUNK(src, 0x00, sum) 227 + andri r26, r5, 0x1c 228 + addri src, src, 0x20 229 + 230 + do_end_words: /* len<32 */ 231 + /* r26 was set already in dword_align */ 232 + cmpi.c r26, 0x0 233 + beq maybe_end_cruft /* len<28 or len<56 */ 234 + srli r26, r26, 0x2 235 + 236 + end_words: 237 + lw r8, [src] 238 + subi.c r26, 0x1 /* unit is 4 byte */ 239 + ADDC(sum, r8) 240 + addi src, 0x4 241 + cmpi.c r26, 0x0 242 + bne end_words /* r26!=0 */ 243 + 244 + maybe_end_cruft: /* len<4 */ 245 + andri r10, r5, 0x3 246 + 247 + small_memcpy: 248 + mv r5, r10 249 + j small_csumcpy 250 + 251 + out: 252 + mv r4, sum 253 + br r3 254 + 255 + END(csum_partial)
+52
arch/score/lib/checksum_copy.c
··· 1 + /* 2 + * arch/score/lib/csum_partial_copy.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Lennox Wu <lennox.wu@sunplusct.com> 8 + * Chen Liqin <liqin.chen@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <net/checksum.h> 27 + 28 + #include <asm/uaccess.h> 29 + 30 + unsigned int csum_partial_copy(const char *src, char *dst, 31 + int len, unsigned int sum) 32 + { 33 + sum = csum_partial(src, len, sum); 34 + memcpy(dst, src, len); 35 + 36 + return sum; 37 + } 38 + 39 + unsigned int csum_partial_copy_from_user(const char *src, char *dst, 40 + int len, unsigned int sum, 41 + int *err_ptr) 42 + { 43 + int missing; 44 + 45 + missing = copy_from_user(dst, src, len); 46 + if (missing) { 47 + memset(dst + len - missing, 0, missing); 48 + *err_ptr = -EFAULT; 49 + } 50 + 51 + return csum_partial(dst, len, sum); 52 + }
+44
arch/score/lib/cmpdi2.c
··· 1 + /* 2 + * arch/score/lib/cmpdi2.c 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, see the file COPYING, or write 16 + * to the Free Software Foundation, Inc., 17 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 + */ 19 + 20 + #include <linux/module.h> 21 + #include "libgcc.h" 22 + 23 + word_type __cmpdi2(long long a, long long b) 24 + { 25 + const DWunion au = { 26 + .ll = a 27 + }; 28 + const DWunion bu = { 29 + .ll = b 30 + }; 31 + 32 + if (au.s.high < bu.s.high) 33 + return 0; 34 + else if (au.s.high > bu.s.high) 35 + return 2; 36 + 37 + if ((unsigned int) au.s.low < (unsigned int) bu.s.low) 38 + return 0; 39 + else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) 40 + return 2; 41 + 42 + return 1; 43 + } 44 + EXPORT_SYMBOL(__cmpdi2);
+37
arch/score/lib/libgcc.h
··· 1 + /* 2 + * arch/score/lib/libgcc.h 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, see the file COPYING, or write 16 + * to the Free Software Foundation, Inc., 17 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 + */ 19 + 20 + 21 + #ifndef __ASM_LIBGCC_H 22 + #define __ASM_LIBGCC_H 23 + 24 + #include <asm/byteorder.h> 25 + 26 + typedef int word_type __attribute__((mode(__word__))); 27 + 28 + struct DWstruct { 29 + int low, high; 30 + }; 31 + 32 + typedef union { 33 + struct DWstruct s; 34 + long long ll; 35 + } DWunion; 36 + 37 + #endif /* __ASM_LIBGCC_H */
+47
arch/score/lib/lshrdi3.c
··· 1 + /* 2 + * arch/score/lib/lshrdi3.c 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, see the file COPYING, or write 16 + * to the Free Software Foundation, Inc., 17 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 + */ 19 + 20 + 21 + #include <linux/module.h> 22 + #include "libgcc.h" 23 + 24 + long long __lshrdi3(long long u, word_type b) 25 + { 26 + DWunion uu, w; 27 + word_type bm; 28 + 29 + if (b == 0) 30 + return u; 31 + 32 + uu.ll = u; 33 + bm = 32 - b; 34 + 35 + if (bm <= 0) { 36 + w.s.high = 0; 37 + w.s.low = (unsigned int) uu.s.high >> -bm; 38 + } else { 39 + const unsigned int carries = (unsigned int) uu.s.high << bm; 40 + 41 + w.s.high = (unsigned int) uu.s.high >> b; 42 + w.s.low = ((unsigned int) uu.s.low >> b) | carries; 43 + } 44 + 45 + return w.ll; 46 + } 47 + EXPORT_SYMBOL(__lshrdi3);
+184
arch/score/lib/string.S
··· 1 + /* 2 + * arch/score/lib/string.S 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Chen Liqin <liqin.chen@sunplusct.com> 8 + * Lennox Wu <lennox.wu@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/linkage.h> 27 + #include <asm-generic/errno.h> 28 + 29 + .text 30 + .align 2 31 + ENTRY(__strncpy_from_user) 32 + cmpi.c r6, 0 33 + mv r9, r6 34 + ble .L2 35 + 0: lbu r7, [r5] 36 + ldi r8, 0 37 + 1: sb r7, [r4] 38 + 2: lb r6, [r5] 39 + cmp.c r6, r8 40 + beq .L2 41 + 42 + .L5: 43 + addi r8, 1 44 + cmp.c r8, r9 45 + beq .L7 46 + 3: lbu r6, [r5, 1]+ 47 + 4: sb r6, [r4, 1]+ 48 + 5: lb r7, [r5] 49 + cmpi.c r7, 0 50 + bne .L5 51 + .L7: 52 + mv r4, r8 53 + br r3 54 + .L2: 55 + ldi r8, 0 56 + mv r4, r8 57 + br r3 58 + .section .fixup, "ax" 59 + 99: 60 + ldi r4, -EFAULT 61 + br r3 62 + .previous 63 + .section __ex_table, "a" 64 + .align 2 65 + .word 0b ,99b 66 + .word 1b ,99b 67 + .word 2b ,99b 68 + .word 3b ,99b 69 + .word 4b ,99b 70 + .word 5b ,99b 71 + .previous 72 + 73 + .align 2 74 + ENTRY(__strnlen_user) 75 + cmpi.c r5, 0 76 + ble .L11 77 + 0: lb r6, [r4] 78 + ldi r7, 0 79 + cmp.c r6, r7 80 + beq .L11 81 + .L15: 82 + addi r7, 1 83 + cmp.c r7, r5 84 + beq .L23 85 + 1: lb r6, [r4,1]+ 86 + cmpi.c r6, 0 87 + bne .L15 88 + .L23: 89 + addri r4, r7, 1 90 + br r3 91 + 92 + .L11: 93 + ldi r4, 1 94 + br r3 95 + .section .fixup, "ax" 96 + 99: 97 + ldi r4, 0 98 + br r3 99 + 100 + .section __ex_table,"a" 101 + .align 2 102 + .word 0b, 99b 103 + .word 1b, 99b 104 + .previous 105 + 106 + .align 2 107 + ENTRY(__strlen_user) 108 + 0: lb r6, [r4] 109 + mv r7, r4 110 + extsb r6, r6 111 + cmpi.c r6, 0 112 + mv r4, r6 113 + beq .L27 114 + .L28: 115 + 1: lb r6, [r7, 1]+ 116 + addi r6, 1 117 + cmpi.c r6, 0 118 + bne .L28 119 + .L27: 120 + br r3 121 + .section .fixup, "ax" 122 + ldi r4, 0x0 123 + br r3 124 + 99: 125 + ldi r4, 0 126 + br r3 127 + .previous 128 + .section __ex_table, "a" 129 + .align 2 130 + .word 0b ,99b 131 + .word 1b ,99b 132 + .previous 133 + 134 + .align 2 135 + ENTRY(__copy_tofrom_user) 136 + cmpi.c r6, 0 137 + mv r10,r6 138 + beq .L32 139 + ldi r9, 0 140 + .L34: 141 + add r6, r5, r9 142 + 0: lbu r8, [r6] 143 + add r7, r4, r9 144 + 1: sb r8, [r7] 145 + addi r9, 1 146 + cmp.c r9, r10 147 + bne .L34 148 + .L32: 149 + ldi r4, 0 150 + br r3 151 + .section .fixup, "ax" 152 + 99: 153 + sub r4, r10, r9 154 + br r3 155 + .previous 156 + .section __ex_table, "a" 157 + .align 2 158 + .word 0b, 99b 159 + .word 1b, 99b 160 + .previous 161 + 162 + .align 2 163 + ENTRY(__clear_user) 164 + cmpi.c r5, 0 165 + beq .L38 166 + ldi r6, 0 167 + mv r7, r6 168 + .L40: 169 + addi r6, 1 170 + 0: sb r7, [r4]+, 1 171 + cmp.c r6, r5 172 + bne .L40 173 + .L38: 174 + ldi r4, 0 175 + br r3 176 + 177 + .section .fixup, "ax" 178 + br r3 179 + .previous 180 + .section __ex_table, "a" 181 + .align 2 182 + 99: 183 + .word 0b, 99b 184 + .previous
+38
arch/score/lib/ucmpdi2.c
··· 1 + /* 2 + * arch/score/lib/ucmpdi2.c 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, see the file COPYING, or write 16 + * to the Free Software Foundation, Inc., 17 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 + */ 19 + 20 + #include <linux/module.h> 21 + #include "libgcc.h" 22 + 23 + word_type __ucmpdi2(unsigned long long a, unsigned long long b) 24 + { 25 + const DWunion au = {.ll = a}; 26 + const DWunion bu = {.ll = b}; 27 + 28 + if ((unsigned int) au.s.high < (unsigned int) bu.s.high) 29 + return 0; 30 + else if ((unsigned int) au.s.high > (unsigned int) bu.s.high) 31 + return 2; 32 + if ((unsigned int) au.s.low < (unsigned int) bu.s.low) 33 + return 0; 34 + else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) 35 + return 2; 36 + return 1; 37 + } 38 + EXPORT_SYMBOL(__ucmpdi2);
+6
arch/score/mm/Makefile
··· 1 + # 2 + # Makefile for the Linux/SCORE-specific parts of the memory manager. 3 + # 4 + 5 + obj-y += cache.o extable.o fault.o init.o \ 6 + tlb-miss.o tlb-score.o pgtable.o
+257
arch/score/mm/cache.c
··· 1 + /* 2 + * arch/score/mm/cache.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Lennox Wu <lennox.wu@sunplusct.com> 8 + * Chen Liqin <liqin.chen@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/init.h> 27 + #include <linux/linkage.h> 28 + #include <linux/kernel.h> 29 + #include <linux/mm.h> 30 + #include <linux/module.h> 31 + #include <linux/sched.h> 32 + 33 + #include <asm/mmu_context.h> 34 + 35 + /* 36 + Just flush entire Dcache!! 37 + You must ensure the page doesn't include instructions, because 38 + the function will not flush the Icache. 39 + The addr must be cache aligned. 40 + */ 41 + static void flush_data_cache_page(unsigned long addr) 42 + { 43 + unsigned int i; 44 + for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) { 45 + __asm__ __volatile__( 46 + "cache 0x0e, [%0, 0]\n" 47 + "cache 0x1a, [%0, 0]\n" 48 + "nop\n" 49 + : : "r" (addr)); 50 + addr += L1_CACHE_BYTES; 51 + } 52 + } 53 + 54 + /* called by update_mmu_cache. */ 55 + void __update_cache(struct vm_area_struct *vma, unsigned long address, 56 + pte_t pte) 57 + { 58 + struct page *page; 59 + unsigned long pfn, addr; 60 + int exec = (vma->vm_flags & VM_EXEC); 61 + 62 + pfn = pte_pfn(pte); 63 + if (unlikely(!pfn_valid(pfn))) 64 + return; 65 + page = pfn_to_page(pfn); 66 + if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) { 67 + addr = (unsigned long) page_address(page); 68 + if (exec) 69 + flush_data_cache_page(addr); 70 + clear_bit(PG_arch_1, &page->flags); 71 + } 72 + } 73 + 74 + static inline void setup_protection_map(void) 75 + { 76 + protection_map[0] = PAGE_NONE; 77 + protection_map[1] = PAGE_READONLY; 78 + protection_map[2] = PAGE_COPY; 79 + protection_map[3] = PAGE_COPY; 80 + protection_map[4] = PAGE_READONLY; 81 + protection_map[5] = PAGE_READONLY; 82 + protection_map[6] = PAGE_COPY; 83 + protection_map[7] = PAGE_COPY; 84 + protection_map[8] = PAGE_NONE; 85 + protection_map[9] = PAGE_READONLY; 86 + protection_map[10] = PAGE_SHARED; 87 + protection_map[11] = PAGE_SHARED; 88 + protection_map[12] = PAGE_READONLY; 89 + protection_map[13] = PAGE_READONLY; 90 + protection_map[14] = PAGE_SHARED; 91 + protection_map[15] = PAGE_SHARED; 92 + } 93 + 94 + void __devinit cpu_cache_init(void) 95 + { 96 + setup_protection_map(); 97 + } 98 + 99 + void flush_icache_all(void) 100 + { 101 + __asm__ __volatile__( 102 + "la r8, flush_icache_all\n" 103 + "cache 0x10, [r8, 0]\n" 104 + "nop\nnop\nnop\nnop\nnop\nnop\n" 105 + : : : "r8"); 106 + } 107 + 108 + void flush_dcache_all(void) 109 + { 110 + __asm__ __volatile__( 111 + "la r8, flush_dcache_all\n" 112 + "cache 0x1f, [r8, 0]\n" 113 + "nop\nnop\nnop\nnop\nnop\nnop\n" 114 + "cache 0x1a, [r8, 0]\n" 115 + "nop\nnop\nnop\nnop\nnop\nnop\n" 116 + : : : "r8"); 117 + } 118 + 119 + void flush_cache_all(void) 120 + { 121 + __asm__ __volatile__( 122 + "la r8, flush_cache_all\n" 123 + "cache 0x10, [r8, 0]\n" 124 + "nop\nnop\nnop\nnop\nnop\nnop\n" 125 + "cache 0x1f, [r8, 0]\n" 126 + "nop\nnop\nnop\nnop\nnop\nnop\n" 127 + "cache 0x1a, [r8, 0]\n" 128 + "nop\nnop\nnop\nnop\nnop\nnop\n" 129 + : : : "r8"); 130 + } 131 + 132 + void flush_cache_mm(struct mm_struct *mm) 133 + { 134 + if (!(mm->context)) 135 + return; 136 + flush_cache_all(); 137 + } 138 + 139 + /*if we flush a range precisely , the processing may be very long. 140 + We must check each page in the range whether present. If the page is present, 141 + we can flush the range in the page. Be careful, the range may be cross two 142 + page, a page is present and another is not present. 143 + */ 144 + /* 145 + The interface is provided in hopes that the port can find 146 + a suitably efficient method for removing multiple page 147 + sized regions from the cache. 148 + */ 149 + void flush_cache_range(struct vm_area_struct *vma, 150 + unsigned long start, unsigned long end) 151 + { 152 + struct mm_struct *mm = vma->vm_mm; 153 + int exec = vma->vm_flags & VM_EXEC; 154 + pgd_t *pgdp; 155 + pud_t *pudp; 156 + pmd_t *pmdp; 157 + pte_t *ptep; 158 + 159 + if (!(mm->context)) 160 + return; 161 + 162 + pgdp = pgd_offset(mm, start); 163 + pudp = pud_offset(pgdp, start); 164 + pmdp = pmd_offset(pudp, start); 165 + ptep = pte_offset(pmdp, start); 166 + 167 + while (start <= end) { 168 + unsigned long tmpend; 169 + pgdp = pgd_offset(mm, start); 170 + pudp = pud_offset(pgdp, start); 171 + pmdp = pmd_offset(pudp, start); 172 + ptep = pte_offset(pmdp, start); 173 + 174 + if (!(pte_val(*ptep) & _PAGE_PRESENT)) { 175 + start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); 176 + continue; 177 + } 178 + tmpend = (start | (PAGE_SIZE-1)) > end ? 179 + end : (start | (PAGE_SIZE-1)); 180 + 181 + flush_dcache_range(start, tmpend); 182 + if (exec) 183 + flush_icache_range(start, tmpend); 184 + start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); 185 + } 186 + } 187 + 188 + void flush_cache_page(struct vm_area_struct *vma, 189 + unsigned long addr, unsigned long pfn) 190 + { 191 + int exec = vma->vm_flags & VM_EXEC; 192 + unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT); 193 + 194 + flush_dcache_range(kaddr, kaddr + PAGE_SIZE); 195 + 196 + if (exec) 197 + flush_icache_range(kaddr, kaddr + PAGE_SIZE); 198 + } 199 + 200 + void flush_cache_sigtramp(unsigned long addr) 201 + { 202 + __asm__ __volatile__( 203 + "cache 0x02, [%0, 0]\n" 204 + "nop\nnop\nnop\nnop\nnop\n" 205 + "cache 0x02, [%0, 0x4]\n" 206 + "nop\nnop\nnop\nnop\nnop\n" 207 + 208 + "cache 0x0d, [%0, 0]\n" 209 + "nop\nnop\nnop\nnop\nnop\n" 210 + "cache 0x0d, [%0, 0x4]\n" 211 + "nop\nnop\nnop\nnop\nnop\n" 212 + 213 + "cache 0x1a, [%0, 0]\n" 214 + "nop\nnop\nnop\nnop\nnop\n" 215 + : : "r" (addr)); 216 + } 217 + 218 + /* 219 + 1. WB and invalid a cache line of Dcache 220 + 2. Drain Write Buffer 221 + the range must be smaller than PAGE_SIZE 222 + */ 223 + void flush_dcache_range(unsigned long start, unsigned long end) 224 + { 225 + int size, i; 226 + 227 + start = start & ~(L1_CACHE_BYTES - 1); 228 + end = end & ~(L1_CACHE_BYTES - 1); 229 + size = end - start; 230 + /* flush dcache to ram, and invalidate dcache lines. */ 231 + for (i = 0; i < size; i += L1_CACHE_BYTES) { 232 + __asm__ __volatile__( 233 + "cache 0x0e, [%0, 0]\n" 234 + "nop\nnop\nnop\nnop\nnop\n" 235 + "cache 0x1a, [%0, 0]\n" 236 + "nop\nnop\nnop\nnop\nnop\n" 237 + : : "r" (start)); 238 + start += L1_CACHE_BYTES; 239 + } 240 + } 241 + 242 + void flush_icache_range(unsigned long start, unsigned long end) 243 + { 244 + int size, i; 245 + start = start & ~(L1_CACHE_BYTES - 1); 246 + end = end & ~(L1_CACHE_BYTES - 1); 247 + 248 + size = end - start; 249 + /* invalidate icache lines. */ 250 + for (i = 0; i < size; i += L1_CACHE_BYTES) { 251 + __asm__ __volatile__( 252 + "cache 0x02, [%0, 0]\n" 253 + "nop\nnop\nnop\nnop\nnop\n" 254 + : : "r" (start)); 255 + start += L1_CACHE_BYTES; 256 + } 257 + }
+38
arch/score/mm/extable.c
··· 1 + /* 2 + * arch/score/mm/extable.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Lennox Wu <lennox.wu@sunplusct.com> 8 + * Chen Liqin <liqin.chen@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/module.h> 27 + 28 + int fixup_exception(struct pt_regs *regs) 29 + { 30 + const struct exception_table_entry *fixup; 31 + 32 + fixup = search_exception_tables(regs->cp0_epc); 33 + if (fixup) { 34 + regs->cp0_epc = fixup->fixup; 35 + return 1; 36 + } 37 + return 0; 38 + }
+235
arch/score/mm/fault.c
··· 1 + /* 2 + * arch/score/mm/fault.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Lennox Wu <lennox.wu@sunplusct.com> 8 + * Chen Liqin <liqin.chen@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/errno.h> 27 + #include <linux/interrupt.h> 28 + #include <linux/kernel.h> 29 + #include <linux/mm.h> 30 + #include <linux/mman.h> 31 + #include <linux/module.h> 32 + #include <linux/signal.h> 33 + #include <linux/sched.h> 34 + #include <linux/string.h> 35 + #include <linux/types.h> 36 + #include <linux/ptrace.h> 37 + 38 + /* 39 + * This routine handles page faults. It determines the address, 40 + * and the problem, and then passes it off to one of the appropriate 41 + * routines. 42 + */ 43 + asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, 44 + unsigned long address) 45 + { 46 + struct vm_area_struct *vma = NULL; 47 + struct task_struct *tsk = current; 48 + struct mm_struct *mm = tsk->mm; 49 + const int field = sizeof(unsigned long) * 2; 50 + siginfo_t info; 51 + int fault; 52 + 53 + info.si_code = SEGV_MAPERR; 54 + 55 + /* 56 + * We fault-in kernel-space virtual memory on-demand. The 57 + * 'reference' page table is init_mm.pgd. 58 + * 59 + * NOTE! We MUST NOT take any locks for this case. We may 60 + * be in an interrupt or a critical region, and should 61 + * only copy the information from the master page table, 62 + * nothing more. 63 + */ 64 + if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) 65 + goto vmalloc_fault; 66 + #ifdef MODULE_START 67 + if (unlikely(address >= MODULE_START && address < MODULE_END)) 68 + goto vmalloc_fault; 69 + #endif 70 + 71 + /* 72 + * If we're in an interrupt or have no user 73 + * context, we must not take the fault.. 74 + */ 75 + if (in_atomic() || !mm) 76 + goto bad_area_nosemaphore; 77 + 78 + down_read(&mm->mmap_sem); 79 + vma = find_vma(mm, address); 80 + if (!vma) 81 + goto bad_area; 82 + if (vma->vm_start <= address) 83 + goto good_area; 84 + if (!(vma->vm_flags & VM_GROWSDOWN)) 85 + goto bad_area; 86 + if (expand_stack(vma, address)) 87 + goto bad_area; 88 + /* 89 + * Ok, we have a good vm_area for this memory access, so 90 + * we can handle it.. 91 + */ 92 + good_area: 93 + info.si_code = SEGV_ACCERR; 94 + 95 + if (write) { 96 + if (!(vma->vm_flags & VM_WRITE)) 97 + goto bad_area; 98 + } else { 99 + if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) 100 + goto bad_area; 101 + } 102 + 103 + survive: 104 + /* 105 + * If for any reason at all we couldn't handle the fault, 106 + * make sure we exit gracefully rather than endlessly redo 107 + * the fault. 108 + */ 109 + fault = handle_mm_fault(mm, vma, address, write); 110 + if (unlikely(fault & VM_FAULT_ERROR)) { 111 + if (fault & VM_FAULT_OOM) 112 + goto out_of_memory; 113 + else if (fault & VM_FAULT_SIGBUS) 114 + goto do_sigbus; 115 + BUG(); 116 + } 117 + if (fault & VM_FAULT_MAJOR) 118 + tsk->maj_flt++; 119 + else 120 + tsk->min_flt++; 121 + 122 + up_read(&mm->mmap_sem); 123 + return; 124 + 125 + /* 126 + * Something tried to access memory that isn't in our memory map.. 127 + * Fix it, but check if it's kernel or user first.. 128 + */ 129 + bad_area: 130 + up_read(&mm->mmap_sem); 131 + 132 + bad_area_nosemaphore: 133 + /* User mode accesses just cause a SIGSEGV */ 134 + if (user_mode(regs)) { 135 + tsk->thread.cp0_badvaddr = address; 136 + tsk->thread.error_code = write; 137 + info.si_signo = SIGSEGV; 138 + info.si_errno = 0; 139 + /* info.si_code has been set above */ 140 + info.si_addr = (void __user *) address; 141 + force_sig_info(SIGSEGV, &info, tsk); 142 + return; 143 + } 144 + 145 + no_context: 146 + /* Are we prepared to handle this kernel fault? */ 147 + if (fixup_exception(regs)) { 148 + current->thread.cp0_baduaddr = address; 149 + return; 150 + } 151 + 152 + /* 153 + * Oops. The kernel tried to access some bad page. We'll have to 154 + * terminate things with extreme prejudice. 155 + */ 156 + bust_spinlocks(1); 157 + 158 + printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at " 159 + "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n", 160 + 0, field, address, field, regs->cp0_epc, 161 + field, regs->regs[3]); 162 + die("Oops", regs); 163 + 164 + /* 165 + * We ran out of memory, or some other thing happened to us that made 166 + * us unable to handle the page fault gracefully. 167 + */ 168 + out_of_memory: 169 + up_read(&mm->mmap_sem); 170 + if (is_global_init(tsk)) { 171 + yield(); 172 + down_read(&mm->mmap_sem); 173 + goto survive; 174 + } 175 + printk("VM: killing process %s\n", tsk->comm); 176 + if (user_mode(regs)) 177 + do_group_exit(SIGKILL); 178 + goto no_context; 179 + 180 + do_sigbus: 181 + up_read(&mm->mmap_sem); 182 + /* Kernel mode? Handle exceptions or die */ 183 + if (!user_mode(regs)) 184 + goto no_context; 185 + else 186 + /* 187 + * Send a sigbus, regardless of whether we were in kernel 188 + * or user mode. 189 + */ 190 + tsk->thread.cp0_badvaddr = address; 191 + info.si_signo = SIGBUS; 192 + info.si_errno = 0; 193 + info.si_code = BUS_ADRERR; 194 + info.si_addr = (void __user *) address; 195 + force_sig_info(SIGBUS, &info, tsk); 196 + return; 197 + vmalloc_fault: 198 + { 199 + /* 200 + * Synchronize this task's top level page-table 201 + * with the 'reference' page table. 202 + * 203 + * Do _not_ use "tsk" here. We might be inside 204 + * an interrupt in the middle of a task switch.. 205 + */ 206 + int offset = __pgd_offset(address); 207 + pgd_t *pgd, *pgd_k; 208 + pud_t *pud, *pud_k; 209 + pmd_t *pmd, *pmd_k; 210 + pte_t *pte_k; 211 + 212 + pgd = (pgd_t *) pgd_current + offset; 213 + pgd_k = init_mm.pgd + offset; 214 + 215 + if (!pgd_present(*pgd_k)) 216 + goto no_context; 217 + set_pgd(pgd, *pgd_k); 218 + 219 + pud = pud_offset(pgd, address); 220 + pud_k = pud_offset(pgd_k, address); 221 + if (!pud_present(*pud_k)) 222 + goto no_context; 223 + 224 + pmd = pmd_offset(pud, address); 225 + pmd_k = pmd_offset(pud_k, address); 226 + if (!pmd_present(*pmd_k)) 227 + goto no_context; 228 + set_pmd(pmd, *pmd_k); 229 + 230 + pte_k = pte_offset_kernel(pmd_k, address); 231 + if (!pte_present(*pte_k)) 232 + goto no_context; 233 + return; 234 + } 235 + }
+159
arch/score/mm/init.c
··· 1 + /* 2 + * arch/score/mm/init.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Lennox Wu <lennox.wu@sunplusct.com> 8 + * Chen Liqin <liqin.chen@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/errno.h> 27 + #include <linux/bootmem.h> 28 + #include <linux/kernel.h> 29 + #include <linux/init.h> 30 + #include <linux/mm.h> 31 + #include <linux/mman.h> 32 + #include <linux/pagemap.h> 33 + #include <linux/proc_fs.h> 34 + #include <linux/sched.h> 35 + #include <linux/initrd.h> 36 + 37 + #include <asm/sections.h> 38 + #include <asm/tlb.h> 39 + 40 + DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 41 + 42 + unsigned long empty_zero_page; 43 + EXPORT_SYMBOL_GPL(empty_zero_page); 44 + 45 + static struct kcore_list kcore_mem, kcore_vmalloc; 46 + 47 + static unsigned long setup_zero_page(void) 48 + { 49 + struct page *page; 50 + 51 + empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 0); 52 + if (!empty_zero_page) 53 + panic("Oh boy, that early out of memory?"); 54 + 55 + page = virt_to_page((void *) empty_zero_page); 56 + SetPageReserved(page); 57 + 58 + return 1UL; 59 + } 60 + 61 + #ifndef CONFIG_NEED_MULTIPLE_NODES 62 + static int __init page_is_ram(unsigned long pagenr) 63 + { 64 + if (pagenr >= min_low_pfn && pagenr < max_low_pfn) 65 + return 1; 66 + else 67 + return 0; 68 + } 69 + 70 + void __init paging_init(void) 71 + { 72 + unsigned long max_zone_pfns[MAX_NR_ZONES]; 73 + unsigned long lastpfn; 74 + 75 + pagetable_init(); 76 + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 77 + lastpfn = max_low_pfn; 78 + free_area_init_nodes(max_zone_pfns); 79 + } 80 + 81 + void __init mem_init(void) 82 + { 83 + unsigned long codesize, reservedpages, datasize, initsize; 84 + unsigned long tmp, ram = 0; 85 + 86 + max_mapnr = max_low_pfn; 87 + high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 88 + totalram_pages += free_all_bootmem(); 89 + totalram_pages -= setup_zero_page(); /* Setup zeroed pages. */ 90 + reservedpages = 0; 91 + 92 + for (tmp = 0; tmp < max_low_pfn; tmp++) 93 + if (page_is_ram(tmp)) { 94 + ram++; 95 + if (PageReserved(pfn_to_page(tmp))) 96 + reservedpages++; 97 + } 98 + 99 + num_physpages = ram; 100 + codesize = (unsigned long) &_etext - (unsigned long) &_text; 101 + datasize = (unsigned long) &_edata - (unsigned long) &_etext; 102 + initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 103 + 104 + kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 105 + kclist_add(&kcore_vmalloc, (void *) VMALLOC_START, 106 + VMALLOC_END - VMALLOC_START); 107 + 108 + printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " 109 + "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", 110 + (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 111 + ram << (PAGE_SHIFT-10), codesize >> 10, 112 + reservedpages << (PAGE_SHIFT-10), datasize >> 10, 113 + initsize >> 10, 114 + (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); 115 + } 116 + #endif /* !CONFIG_NEED_MULTIPLE_NODES */ 117 + 118 + static void free_init_pages(const char *what, unsigned long begin, unsigned long end) 119 + { 120 + unsigned long pfn; 121 + 122 + for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { 123 + struct page *page = pfn_to_page(pfn); 124 + void *addr = phys_to_virt(PFN_PHYS(pfn)); 125 + 126 + ClearPageReserved(page); 127 + init_page_count(page); 128 + memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); 129 + __free_page(page); 130 + totalram_pages++; 131 + } 132 + printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); 133 + } 134 + 135 + #ifdef CONFIG_BLK_DEV_INITRD 136 + void free_initrd_mem(unsigned long start, unsigned long end) 137 + { 138 + free_init_pages("initrd memory", 139 + virt_to_phys((void *) start), 140 + virt_to_phys((void *) end)); 141 + } 142 + #endif 143 + 144 + void __init_refok free_initmem(void) 145 + { 146 + free_init_pages("unused kernel memory", 147 + (unsigned long)__init_begin, (unsigned long)__init_end); 148 + } 149 + 150 + unsigned long pgd_current; 151 + 152 + #define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order))) 153 + 154 + /* 155 + * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER 156 + * are constants. So we use the variants from asm-offset.h until that gcc 157 + * will officially be retired. 158 + */ 159 + pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
+60
arch/score/mm/pgtable.c
··· 1 + /* 2 + * arch/score/mm/pgtable-32.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Lennox Wu <lennox.wu@sunplusct.com> 8 + * Chen Liqin <liqin.chen@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/bootmem.h> 27 + #include <linux/init.h> 28 + #include <linux/pfn.h> 29 + #include <linux/mm.h> 30 + 31 + void pgd_init(unsigned long page) 32 + { 33 + unsigned long *p = (unsigned long *) page; 34 + int i; 35 + 36 + for (i = 0; i < USER_PTRS_PER_PGD; i += 8) { 37 + p[i + 0] = (unsigned long) invalid_pte_table; 38 + p[i + 1] = (unsigned long) invalid_pte_table; 39 + p[i + 2] = (unsigned long) invalid_pte_table; 40 + p[i + 3] = (unsigned long) invalid_pte_table; 41 + p[i + 4] = (unsigned long) invalid_pte_table; 42 + p[i + 5] = (unsigned long) invalid_pte_table; 43 + p[i + 6] = (unsigned long) invalid_pte_table; 44 + p[i + 7] = (unsigned long) invalid_pte_table; 45 + } 46 + } 47 + 48 + void __init pagetable_init(void) 49 + { 50 + unsigned long vaddr; 51 + pgd_t *pgd_base; 52 + 53 + /* Initialize the entire pgd. */ 54 + pgd_init((unsigned long) swapper_pg_dir); 55 + pgd_init((unsigned long) swapper_pg_dir 56 + + sizeof(pgd_t) * USER_PTRS_PER_PGD); 57 + 58 + pgd_base = swapper_pg_dir; 59 + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 60 + }
+199
arch/score/mm/tlb-miss.S
··· 1 + /* 2 + * arch/score/mm/tlbex.S 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Lennox Wu <lennox.wu@sunplusct.com> 8 + * Chen Liqin <liqin.chen@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <asm/asmmacro.h> 27 + #include <asm/pgtable-bits.h> 28 + #include <asm/scoreregs.h> 29 + 30 + /* 31 + * After this macro runs, the pte faulted on is 32 + * in register PTE, a ptr into the table in which 33 + * the pte belongs is in PTR. 34 + */ 35 + .macro load_pte, pte, ptr 36 + la \ptr, pgd_current 37 + lw \ptr, [\ptr, 0] 38 + mfcr \pte, cr6 39 + srli \pte, \pte, 22 40 + slli \pte, \pte, 2 41 + add \ptr, \ptr, \pte 42 + lw \ptr, [\ptr, 0] 43 + mfcr \pte, cr6 44 + srli \pte, \pte, 10 45 + andi \pte, 0xffc 46 + add \ptr, \ptr, \pte 47 + lw \pte, [\ptr, 0] 48 + .endm 49 + 50 + .macro pte_reload, ptr 51 + lw \ptr, [\ptr, 0] 52 + mtcr \ptr, cr12 53 + nop 54 + nop 55 + nop 56 + nop 57 + nop 58 + .endm 59 + 60 + .macro do_fault, write 61 + SAVE_ALL 62 + mfcr r6, cr6 63 + mv r4, r0 64 + ldi r5, \write 65 + la r8, do_page_fault 66 + brl r8 67 + j ret_from_exception 68 + .endm 69 + 70 + .macro pte_writable, pte, ptr, label 71 + andi \pte, 0x280 72 + cmpi.c \pte, 0x280 73 + bne \label 74 + lw \pte, [\ptr, 0] /*reload PTE*/ 75 + .endm 76 + 77 + /* 78 + * Make PTE writable, update software status bits as well, 79 + * then store at PTR. 80 + */ 81 + .macro pte_makewrite, pte, ptr 82 + ori \pte, 0x426 83 + sw \pte, [\ptr, 0] 84 + .endm 85 + 86 + .text 87 + ENTRY(score7_FTLB_refill_Handler) 88 + la r31, pgd_current /* get pgd pointer */ 89 + lw r31, [r31, 0] /* get the address of PGD */ 90 + mfcr r30, cr6 91 + srli r30, r30, 22 /* PGDIR_SHIFT = 22*/ 92 + slli r30, r30, 2 93 + add r31, r31, r30 94 + lw r31, [r31, 0] /* get the address of the start address of PTE table */ 95 + 96 + mfcr r30, cr9 97 + andi r30, 0xfff /* equivalent to get PET index and right shift 2 bits */ 98 + add r31, r31, r30 99 + lw r30, [r31, 0] /* load pte entry */ 100 + mtcr r30, cr12 101 + nop 102 + nop 103 + nop 104 + nop 105 + nop 106 + mtrtlb 107 + nop 108 + nop 109 + nop 110 + nop 111 + nop 112 + rte /* 6 cycles to make sure tlb entry works */ 113 + 114 + ENTRY(score7_KSEG_refill_Handler) 115 + la r31, pgd_current /* get pgd pointer */ 116 + lw r31, [r31, 0] /* get the address of PGD */ 117 + mfcr r30, cr6 118 + srli r30, r30, 22 /* PGDIR_SHIFT = 22 */ 119 + slli r30, r30, 2 120 + add r31, r31, r30 121 + lw r31, [r31, 0] /* get the address of the start address of PTE table */ 122 + 123 + mfcr r30, cr6 /* get Bad VPN */ 124 + srli r30, r30, 10 125 + andi r30, 0xffc /* PTE VPN mask (bit 11~2) */ 126 + 127 + add r31, r31, r30 128 + lw r30, [r31, 0] /* load pte entry */ 129 + mtcr r30, cr12 130 + nop 131 + nop 132 + nop 133 + nop 134 + nop 135 + mtrtlb 136 + nop 137 + nop 138 + nop 139 + nop 140 + nop 141 + rte /* 6 cycles to make sure tlb entry works */ 142 + 143 + nopage_tlbl: 144 + do_fault 0 /* Read */ 145 + 146 + ENTRY(handle_tlb_refill) 147 + load_pte r30, r31 148 + pte_writable r30, r31, handle_tlb_refill_nopage 149 + pte_makewrite r30, r31 /* Access|Modify|Dirty|Valid */ 150 + pte_reload r31 151 + mtrtlb 152 + nop 153 + nop 154 + nop 155 + nop 156 + nop 157 + rte 158 + handle_tlb_refill_nopage: 159 + do_fault 0 /* Read */ 160 + 161 + ENTRY(handle_tlb_invaild) 162 + load_pte r30, r31 163 + stlb /* find faulting entry */ 164 + pte_writable r30, r31, handle_tlb_invaild_nopage 165 + pte_makewrite r30, r31 /* Access|Modify|Dirty|Valid */ 166 + pte_reload r31 167 + mtptlb 168 + nop 169 + nop 170 + nop 171 + nop 172 + nop 173 + rte 174 + handle_tlb_invaild_nopage: 175 + do_fault 0 /* Read */ 176 + 177 + ENTRY(handle_mod) 178 + load_pte r30, r31 179 + stlb /* find faulting entry */ 180 + andi r30, _PAGE_WRITE /* Writable? */ 181 + cmpz.c r30 182 + beq nowrite_mod 183 + lw r30, [r31, 0] /* reload into r30 */ 184 + 185 + /* Present and writable bits set, set accessed and dirty bits. */ 186 + pte_makewrite r30, r31 187 + 188 + /* Now reload the entry into the tlb. */ 189 + pte_reload r31 190 + mtptlb 191 + nop 192 + nop 193 + nop 194 + nop 195 + nop 196 + rte 197 + 198 + nowrite_mod: 199 + do_fault 1 /* Write */
+251
arch/score/mm/tlb-score.c
··· 1 + /* 2 + * arch/score/mm/tlb-score.c 3 + * 4 + * Score Processor version. 5 + * 6 + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 7 + * Lennox Wu <lennox.wu@sunplusct.com> 8 + * Chen Liqin <liqin.chen@sunplusct.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + * 15 + * This program is distributed in the hope that it will be useful, 16 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 + * GNU General Public License for more details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, see the file COPYING, or write 22 + * to the Free Software Foundation, Inc., 23 + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 + */ 25 + 26 + #include <linux/highmem.h> 27 + #include <linux/module.h> 28 + 29 + #include <asm/irq.h> 30 + #include <asm/mmu_context.h> 31 + #include <asm/tlb.h> 32 + 33 + #define TLBSIZE 32 34 + 35 + unsigned long asid_cache = ASID_FIRST_VERSION; 36 + EXPORT_SYMBOL(asid_cache); 37 + 38 + void local_flush_tlb_all(void) 39 + { 40 + unsigned long flags; 41 + unsigned long old_ASID; 42 + int entry; 43 + 44 + local_irq_save(flags); 45 + old_ASID = pevn_get() & ASID_MASK; 46 + pectx_set(0); /* invalid */ 47 + entry = tlblock_get(); /* skip locked entries*/ 48 + 49 + for (; entry < TLBSIZE; entry++) { 50 + tlbpt_set(entry); 51 + pevn_set(KSEG1); 52 + barrier(); 53 + tlb_write_indexed(); 54 + } 55 + pevn_set(old_ASID); 56 + local_irq_restore(flags); 57 + } 58 + 59 + /* 60 + * If mm is currently active_mm, we can't really drop it. Instead, 61 + * we will get a new one for it. 62 + */ 63 + static inline void 64 + drop_mmu_context(struct mm_struct *mm) 65 + { 66 + unsigned long flags; 67 + 68 + local_irq_save(flags); 69 + get_new_mmu_context(mm); 70 + pevn_set(mm->context & ASID_MASK); 71 + local_irq_restore(flags); 72 + } 73 + 74 + void local_flush_tlb_mm(struct mm_struct *mm) 75 + { 76 + if (mm->context != 0) 77 + drop_mmu_context(mm); 78 + } 79 + 80 + void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 81 + unsigned long end) 82 + { 83 + struct mm_struct *mm = vma->vm_mm; 84 + unsigned long vma_mm_context = mm->context; 85 + if (mm->context != 0) { 86 + unsigned long flags; 87 + int size; 88 + 89 + local_irq_save(flags); 90 + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 91 + if (size <= TLBSIZE) { 92 + int oldpid = pevn_get() & ASID_MASK; 93 + int newpid = vma_mm_context & ASID_MASK; 94 + 95 + start &= PAGE_MASK; 96 + end += (PAGE_SIZE - 1); 97 + end &= PAGE_MASK; 98 + while (start < end) { 99 + int idx; 100 + 101 + pevn_set(start | newpid); 102 + start += PAGE_SIZE; 103 + barrier(); 104 + tlb_probe(); 105 + idx = tlbpt_get(); 106 + pectx_set(0); 107 + pevn_set(KSEG1); 108 + if (idx < 0) 109 + continue; 110 + tlb_write_indexed(); 111 + } 112 + pevn_set(oldpid); 113 + } else { 114 + /* Bigger than TLBSIZE, get new ASID directly */ 115 + get_new_mmu_context(mm); 116 + if (mm == current->active_mm) 117 + pevn_set(vma_mm_context & ASID_MASK); 118 + } 119 + local_irq_restore(flags); 120 + } 121 + } 122 + 123 + void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) 124 + { 125 + unsigned long flags; 126 + int size; 127 + 128 + local_irq_save(flags); 129 + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 130 + if (size <= TLBSIZE) { 131 + int pid = pevn_get(); 132 + 133 + start &= PAGE_MASK; 134 + end += PAGE_SIZE - 1; 135 + end &= PAGE_MASK; 136 + 137 + while (start < end) { 138 + long idx; 139 + 140 + pevn_set(start); 141 + start += PAGE_SIZE; 142 + tlb_probe(); 143 + idx = tlbpt_get(); 144 + if (idx < 0) 145 + continue; 146 + pectx_set(0); 147 + pevn_set(KSEG1); 148 + barrier(); 149 + tlb_write_indexed(); 150 + } 151 + pevn_set(pid); 152 + } else { 153 + local_flush_tlb_all(); 154 + } 155 + 156 + local_irq_restore(flags); 157 + } 158 + 159 + void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 160 + { 161 + if (!vma || vma->vm_mm->context != 0) { 162 + unsigned long flags; 163 + int oldpid, newpid, idx; 164 + unsigned long vma_ASID = vma->vm_mm->context; 165 + 166 + newpid = vma_ASID & ASID_MASK; 167 + page &= PAGE_MASK; 168 + local_irq_save(flags); 169 + oldpid = pevn_get() & ASID_MASK; 170 + pevn_set(page | newpid); 171 + barrier(); 172 + tlb_probe(); 173 + idx = tlbpt_get(); 174 + pectx_set(0); 175 + pevn_set(KSEG1); 176 + if (idx < 0) /* p_bit(31) - 1: miss, 0: hit*/ 177 + goto finish; 178 + barrier(); 179 + tlb_write_indexed(); 180 + finish: 181 + pevn_set(oldpid); 182 + local_irq_restore(flags); 183 + } 184 + } 185 + 186 + /* 187 + * This one is only used for pages with the global bit set so we don't care 188 + * much about the ASID. 189 + */ 190 + void local_flush_tlb_one(unsigned long page) 191 + { 192 + unsigned long flags; 193 + int oldpid, idx; 194 + 195 + local_irq_save(flags); 196 + oldpid = pevn_get(); 197 + page &= (PAGE_MASK << 1); 198 + pevn_set(page); 199 + barrier(); 200 + tlb_probe(); 201 + idx = tlbpt_get(); 202 + pectx_set(0); 203 + if (idx >= 0) { 204 + /* Make sure all entries differ. */ 205 + pevn_set(KSEG1); 206 + barrier(); 207 + tlb_write_indexed(); 208 + } 209 + pevn_set(oldpid); 210 + local_irq_restore(flags); 211 + } 212 + 213 + void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) 214 + { 215 + unsigned long flags; 216 + int idx, pid; 217 + 218 + /* 219 + * Handle debugger faulting in for debugee. 220 + */ 221 + if (current->active_mm != vma->vm_mm) 222 + return; 223 + 224 + pid = pevn_get() & ASID_MASK; 225 + 226 + local_irq_save(flags); 227 + address &= PAGE_MASK; 228 + pevn_set(address | pid); 229 + barrier(); 230 + tlb_probe(); 231 + idx = tlbpt_get(); 232 + pectx_set(pte_val(pte)); 233 + pevn_set(address | pid); 234 + if (idx < 0) 235 + tlb_write_random(); 236 + else 237 + tlb_write_indexed(); 238 + 239 + pevn_set(pid); 240 + local_irq_restore(flags); 241 + } 242 + 243 + void __cpuinit tlb_init(void) 244 + { 245 + tlblock_set(0); 246 + local_flush_tlb_all(); 247 + memcpy((void *)(EXCEPTION_VECTOR_BASE_ADDR + 0x100), 248 + &score7_FTLB_refill_Handler, 0xFC); 249 + flush_icache_range(EXCEPTION_VECTOR_BASE_ADDR + 0x100, 250 + EXCEPTION_VECTOR_BASE_ADDR + 0x1FC); 251 + }