···65#ifdef CONFIG_ETRAX_AXISFLASHMAP6667#define ASSEMBLER_MACROS_ONLY68-#include <asm/arch/sv_addr_ag.h>6970 ;; The partitiontable is looked for at the first sector after the boot71 ;; sector. Sector size is 65536 bytes in all flashes we use.
···65#ifdef CONFIG_ETRAX_AXISFLASHMAP6667#define ASSEMBLER_MACROS_ONLY68+#include <arch/sv_addr_ag.h>6970 ;; The partitiontable is looked for at the first sector after the boot71 ;; sector. Sector size is 65536 bytes in all flashes we use.
···26#include <asm/irq.h>27#include <asm/dma.h>28#include <asm/io.h>29-#include <asm/arch/svinto.h>30#include <asm/uaccess.h>31#include <asm/system.h>32#include <asm/sync_serial.h>33-#include <asm/arch/io_interface_mux.h>3435/* The receiver is a bit tricky beacuse of the continuous stream of data.*/36/* */
···26#include <asm/irq.h>27#include <asm/dma.h>28#include <asm/io.h>29+#include <arch/svinto.h>30#include <asm/uaccess.h>31#include <asm/system.h>32#include <asm/sync_serial.h>33+#include <arch/io_interface_mux.h>3435/* The receiver is a bit tricky beacuse of the continuous stream of data.*/36/* */
+1-1
arch/cris/arch-v10/kernel/crisksyms.c
···1#include <linux/module.h>2#include <asm/io.h>3-#include <asm/arch/svinto.h>45/* Export shadow registers for the CPU I/O pins */6EXPORT_SYMBOL(genconfig_shadow);
···1#include <linux/module.h>2#include <asm/io.h>3+#include <arch/svinto.h>45/* Export shadow registers for the CPU I/O pins */6EXPORT_SYMBOL(genconfig_shadow);
···10#define ASSEMBLER_MACROS_ONLY11/* The IO_* macros use the ## token concatenation operator, so12 -traditional must not be used when assembling this file. */13-#include <asm/arch/sv_addr_ag.h>1415#define CRAMFS_MAGIC 0x28cd3d4516#define RAM_INIT_MAGIC 0x56902387
···10#define ASSEMBLER_MACROS_ONLY11/* The IO_* macros use the ## token concatenation operator, so12 -traditional must not be used when assembling this file. */13+#include <arch/sv_addr_ag.h>1415#define CRAMFS_MAGIC 0x28cd3d4516#define RAM_INIT_MAGIC 0x56902387
···1#include <linux/module.h>2#include <asm/io.h>3-#include <asm/arch/cache.h>4-#include <asm/arch/hwregs/dma.h>56/* This file is used to workaround a cache bug, Guinness TR 106. */7
···1#include <linux/module.h>2#include <asm/io.h>3+#include <arch/cache.h>4+#include <arch/hwregs/dma.h>56/* This file is used to workaround a cache bug, Guinness TR 106. */7
···11 * -traditional must not be used when assembling this file.12 */13#include <hwregs/reg_rdwr.h>14-#include <asm/arch/memmap.h>15#include <hwregs/intr_vect.h>16#include <hwregs/asm/mmu_defs_asm.h>17#include <hwregs/asm/reg_map_asm.h>18-#include <asm/arch/mach/startup.inc>1920#define CRAMFS_MAGIC 0x28cd3d4521#define JHEAD_MAGIC 0x1FF528A6
···11 * -traditional must not be used when assembling this file.12 */13#include <hwregs/reg_rdwr.h>14+#include <arch/memmap.h>15#include <hwregs/intr_vect.h>16#include <hwregs/asm/mmu_defs_asm.h>17#include <hwregs/asm/reg_map_asm.h>18+#include <mach/startup.inc>1920#define CRAMFS_MAGIC 0x28cd3d4521#define JHEAD_MAGIC 0x1FF528A6
···17#include <asm/pgtable.h>18#include <asm/system.h>19#include <asm/processor.h>20-#include <asm/arch/hwregs/supp_reg.h>2122/*23 * Determines which bits in CCS the user has access to.
···17#include <asm/pgtable.h>18#include <asm/system.h>19#include <asm/processor.h>20+#include <arch/hwregs/supp_reg.h>2122/*23 * Determines which bits in CCS the user has access to.
+2-2
arch/cris/arch-v32/kernel/signal.c
···18#include <asm/processor.h>19#include <asm/ucontext.h>20#include <asm/uaccess.h>21-#include <asm/arch/ptrace.h>22-#include <asm/arch/hwregs/cpu_vect.h>2324extern unsigned long cris_signal_return_page;25
···18#include <asm/processor.h>19#include <asm/ucontext.h>20#include <asm/uaccess.h>21+#include <arch/ptrace.h>22+#include <arch/hwregs/cpu_vect.h>2324extern unsigned long cris_signal_return_page;25
+5-5
arch/cris/arch-v32/lib/nand_init.S
···22##23##=============================================================================2425-#include <asm/arch/hwregs/asm/reg_map_asm.h>26-#include <asm/arch/hwregs/asm/gio_defs_asm.h>27-#include <asm/arch/hwregs/asm/pinmux_defs_asm.h>28-#include <asm/arch/hwregs/asm/bif_core_defs_asm.h>29-#include <asm/arch/hwregs/asm/config_defs_asm.h>3031;; There are 8-bit NAND flashes and 16-bit NAND flashes.32;; We need to treat them slightly different.
···22##23##=============================================================================2425+#include <arch/hwregs/asm/reg_map_asm.h>26+#include <arch/hwregs/asm/gio_defs_asm.h>27+#include <arch/hwregs/asm/pinmux_defs_asm.h>28+#include <arch/hwregs/asm/bif_core_defs_asm.h>29+#include <arch/hwregs/asm/config_defs_asm.h>3031;; There are 8-bit NAND flashes and 16-bit NAND flashes.32;; We need to treat them slightly different.
···1+/*2+ * Interrupt handling assembler and defines for Linux/CRISv103+ */4+5+#ifndef _ASM_ARCH_IRQ_H6+#define _ASM_ARCH_IRQ_H7+8+#include <arch/sv_addr_ag.h>9+10+#define NR_IRQS 3211+12+/* The first vector number used for IRQs in v10 is really 0x20 */13+/* but all the code and constants are offseted to make 0 the first */14+#define FIRST_IRQ 015+16+#define SOME_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, some) /* 0 ? */17+#define NMI_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, nmi) /* 1 */18+#define TIMER0_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, timer0) /* 2 */19+#define TIMER1_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, timer1) /* 3 */20+/* mio, ata, par0, scsi0 on 4 */21+/* par1, scsi1 on 5 */22+#define NETWORK_STATUS_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, network) /* 6 */23+24+#define SERIAL_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, serial) /* 8 */25+#define PA_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, pa) /* 11 */26+/* extdma0 and extdma1 is at irq 12 and 13 and/or same as dma5 and dma6 ? */27+#define EXTDMA0_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, ext_dma0)28+#define EXTDMA1_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, ext_dma1)29+30+/* dma0-9 is irq 16..25 */31+/* 16,17: network */32+#define DMA0_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma0)33+#define DMA1_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma1)34+#define NETWORK_DMA_TX_IRQ_NBR DMA0_TX_IRQ_NBR35+#define NETWORK_DMA_RX_IRQ_NBR DMA1_RX_IRQ_NBR36+37+/* 18,19: dma2 and dma3 shared by par0, scsi0, ser2 and ata */38+#define DMA2_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma2)39+#define DMA3_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma3)40+#define SER2_DMA_TX_IRQ_NBR DMA2_TX_IRQ_NBR41+#define SER2_DMA_RX_IRQ_NBR DMA3_RX_IRQ_NBR42+43+/* 20,21: dma4 and dma5 shared by par1, scsi1, ser3 and extdma0 */44+#define DMA4_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma4)45+#define DMA5_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma5)46+#define SER3_DMA_TX_IRQ_NBR DMA4_TX_IRQ_NBR47+#define SER3_DMA_RX_IRQ_NBR DMA5_RX_IRQ_NBR48+49+/* 22,23: dma6 and dma7 shared by ser0, extdma1 and mem2mem */50+#define DMA6_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma6)51+#define DMA7_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma7)52+#define SER0_DMA_TX_IRQ_NBR DMA6_TX_IRQ_NBR53+#define SER0_DMA_RX_IRQ_NBR DMA7_RX_IRQ_NBR54+#define MEM2MEM_DMA_TX_IRQ_NBR DMA6_TX_IRQ_NBR55+#define MEM2MEM_DMA_RX_IRQ_NBR DMA7_RX_IRQ_NBR56+57+/* 24,25: dma8 and dma9 shared by ser1 and usb */58+#define DMA8_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma8)59+#define DMA9_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma9)60+#define SER1_DMA_TX_IRQ_NBR DMA8_TX_IRQ_NBR61+#define SER1_DMA_RX_IRQ_NBR DMA9_RX_IRQ_NBR62+#define USB_DMA_TX_IRQ_NBR DMA8_TX_IRQ_NBR63+#define USB_DMA_RX_IRQ_NBR DMA9_RX_IRQ_NBR64+65+/* usb: controller at irq 31 + uses DMA8 and DMA9 */66+#define USB_HC_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, usb)67+68+/* our fine, global, etrax irq vector! the pointer lives in the head.S file. */69+70+typedef void (*irqvectptr)(void);71+72+struct etrax_interrupt_vector {73+ irqvectptr v[256];74+};75+76+extern struct etrax_interrupt_vector *etrax_irv;77+void set_int_vector(int n, irqvectptr addr);78+void set_break_vector(int n, irqvectptr addr);79+80+#define __STR(x) #x81+#define STR(x) __STR(x)82+83+/* SAVE_ALL saves registers so they match pt_regs */84+85+#define SAVE_ALL \86+ "move $irp,[$sp=$sp-16]\n\t" /* push instruction pointer and fake SBFS struct */ \87+ "push $srp\n\t" /* push subroutine return pointer */ \88+ "push $dccr\n\t" /* push condition codes */ \89+ "push $mof\n\t" /* push multiply overflow reg */ \90+ "di\n\t" /* need to disable irq's at this point */\91+ "subq 14*4,$sp\n\t" /* make room for r0-r13 */ \92+ "movem $r13,[$sp]\n\t" /* push the r0-r13 registers */ \93+ "push $r10\n\t" /* push orig_r10 */ \94+ "clear.d [$sp=$sp-4]\n\t" /* frametype - this is a normal stackframe */95+96+ /* BLOCK_IRQ and UNBLOCK_IRQ do the same as mask_irq and unmask_irq */97+98+#define BLOCK_IRQ(mask,nr) \99+ "move.d " #mask ",$r0\n\t" \100+ "move.d $r0,[0xb00000d8]\n\t" 101+102+#define UNBLOCK_IRQ(mask) \103+ "move.d " #mask ",$r0\n\t" \104+ "move.d $r0,[0xb00000dc]\n\t" 105+106+#define IRQ_NAME2(nr) nr##_interrupt(void)107+#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)108+#define sIRQ_NAME(nr) IRQ_NAME2(sIRQ##nr)109+#define BAD_IRQ_NAME(nr) IRQ_NAME2(bad_IRQ##nr)110+111+ /* the asm IRQ handler makes sure the causing IRQ is blocked, then it calls112+ * do_IRQ (with irq disabled still). after that it unblocks and jumps to113+ * ret_from_intr (entry.S)114+ *115+ * The reason the IRQ is blocked is to allow an sti() before the handler which116+ * will acknowledge the interrupt is run.117+ */118+119+#define BUILD_IRQ(nr,mask) \120+void IRQ_NAME(nr); \121+__asm__ ( \122+ ".text\n\t" \123+ "IRQ" #nr "_interrupt:\n\t" \124+ SAVE_ALL \125+ BLOCK_IRQ(mask,nr) /* this must be done to prevent irq loops when we ei later */ \126+ "moveq "#nr",$r10\n\t" \127+ "move.d $sp,$r11\n\t" \128+ "jsr do_IRQ\n\t" /* irq.c, r10 and r11 are arguments */ \129+ UNBLOCK_IRQ(mask) \130+ "moveq 0,$r9\n\t" /* make ret_from_intr realise we came from an irq */ \131+ "jump ret_from_intr\n\t");132+133+/* This is subtle. The timer interrupt is crucial and it should not be disabled for 134+ * too long. However, if it had been a normal interrupt as per BUILD_IRQ, it would135+ * have been BLOCK'ed, and then softirq's are run before we return here to UNBLOCK.136+ * If the softirq's take too much time to run, the timer irq won't run and the 137+ * watchdog will kill us.138+ *139+ * Furthermore, if a lot of other irq's occur before we return here, the multiple_irq140+ * handler is run and it prioritizes the timer interrupt. However if we had BLOCK'ed141+ * it here, we would not get the multiple_irq at all.142+ *143+ * The non-blocking here is based on the knowledge that the timer interrupt is 144+ * registred as a fast interrupt (IRQF_DISABLED) so that we _know_ there will not145+ * be an sti() before the timer irq handler is run to acknowledge the interrupt.146+ */147+148+#define BUILD_TIMER_IRQ(nr,mask) \149+void IRQ_NAME(nr); \150+__asm__ ( \151+ ".text\n\t" \152+ "IRQ" #nr "_interrupt:\n\t" \153+ SAVE_ALL \154+ "moveq "#nr",$r10\n\t" \155+ "move.d $sp,$r11\n\t" \156+ "jsr do_IRQ\n\t" /* irq.c, r10 and r11 are arguments */ \157+ "moveq 0,$r9\n\t" /* make ret_from_intr realise we came from an irq */ \158+ "jump ret_from_intr\n\t");159+160+#endif
+19
arch/cris/include/arch-v32/arch/cache.h
···0000000000000000000
···1+#ifndef _ASM_CRIS_ARCH_CACHE_H2+#define _ASM_CRIS_ARCH_CACHE_H3+4+#include <arch/hwregs/dma.h>5+6+/* A cache-line is 32 bytes. */7+#define L1_CACHE_BYTES 328+#define L1_CACHE_SHIFT 59+10+void flush_dma_list(dma_descr_data *descr);11+void flush_dma_descr(dma_descr_data *descr, int flush_buf);12+13+#define flush_dma_context(c) \14+ flush_dma_list(phys_to_virt((c)->saved_data));15+16+void cris_flush_cache_range(void *buf, unsigned long len);17+void cris_flush_cache(void);18+19+#endif /* _ASM_CRIS_ARCH_CACHE_H */
···1+/* asm/bitops.h for Linux/CRIS2+ *3+ * TODO: asm versions if speed is needed4+ *5+ * All bit operations return 0 if the bit was cleared before the6+ * operation and != 0 if it was not.7+ *8+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).9+ */10+11+#ifndef _CRIS_BITOPS_H12+#define _CRIS_BITOPS_H13+14+/* Currently this is unsuitable for consumption outside the kernel. */15+#ifdef __KERNEL__ 16+17+#ifndef _LINUX_BITOPS_H18+#error only <linux/bitops.h> can be included directly19+#endif20+21+#include <arch/bitops.h>22+#include <asm/system.h>23+#include <asm/atomic.h>24+#include <linux/compiler.h>25+26+/*27+ * set_bit - Atomically set a bit in memory28+ * @nr: the bit to set29+ * @addr: the address to start counting from30+ *31+ * This function is atomic and may not be reordered. See __set_bit()32+ * if you do not require the atomic guarantees.33+ * Note that @nr may be almost arbitrarily large; this function is not34+ * restricted to acting on a single-word quantity.35+ */36+37+#define set_bit(nr, addr) (void)test_and_set_bit(nr, addr)38+39+/*40+ * clear_bit - Clears a bit in memory41+ * @nr: Bit to clear42+ * @addr: Address to start counting from43+ *44+ * clear_bit() is atomic and may not be reordered. However, it does45+ * not contain a memory barrier, so if it is used for locking purposes,46+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()47+ * in order to ensure changes are visible on other processors.48+ */49+50+#define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr)51+52+/*53+ * change_bit - Toggle a bit in memory54+ * @nr: Bit to change55+ * @addr: Address to start counting from56+ *57+ * change_bit() is atomic and may not be reordered.58+ * Note that @nr may be almost arbitrarily large; this function is not59+ * restricted to acting on a single-word quantity.60+ */61+62+#define change_bit(nr, addr) (void)test_and_change_bit(nr, addr)63+64+/**65+ * test_and_set_bit - Set a bit and return its old value66+ * @nr: Bit to set67+ * @addr: Address to count from68+ *69+ * This operation is atomic and cannot be reordered. 70+ * It also implies a memory barrier.71+ */72+73+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)74+{75+ unsigned int mask, retval;76+ unsigned long flags;77+ unsigned int *adr = (unsigned int *)addr;78+79+ adr += nr >> 5;80+ mask = 1 << (nr & 0x1f);81+ cris_atomic_save(addr, flags);82+ retval = (mask & *adr) != 0;83+ *adr |= mask;84+ cris_atomic_restore(addr, flags);85+ return retval;86+}87+88+/*89+ * clear_bit() doesn't provide any barrier for the compiler.90+ */91+#define smp_mb__before_clear_bit() barrier()92+#define smp_mb__after_clear_bit() barrier()93+94+/**95+ * test_and_clear_bit - Clear a bit and return its old value96+ * @nr: Bit to clear97+ * @addr: Address to count from98+ *99+ * This operation is atomic and cannot be reordered. 100+ * It also implies a memory barrier.101+ */102+103+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)104+{105+ unsigned int mask, retval;106+ unsigned long flags;107+ unsigned int *adr = (unsigned int *)addr;108+109+ adr += nr >> 5;110+ mask = 1 << (nr & 0x1f);111+ cris_atomic_save(addr, flags);112+ retval = (mask & *adr) != 0;113+ *adr &= ~mask;114+ cris_atomic_restore(addr, flags);115+ return retval;116+}117+118+/**119+ * test_and_change_bit - Change a bit and return its old value120+ * @nr: Bit to change121+ * @addr: Address to count from122+ *123+ * This operation is atomic and cannot be reordered. 124+ * It also implies a memory barrier.125+ */126+127+static inline int test_and_change_bit(int nr, volatile unsigned long *addr)128+{129+ unsigned int mask, retval;130+ unsigned long flags;131+ unsigned int *adr = (unsigned int *)addr;132+ adr += nr >> 5;133+ mask = 1 << (nr & 0x1f);134+ cris_atomic_save(addr, flags);135+ retval = (mask & *adr) != 0;136+ *adr ^= mask;137+ cris_atomic_restore(addr, flags);138+ return retval;139+}140+141+#include <asm-generic/bitops/non-atomic.h>142+143+/*144+ * Since we define it "external", it collides with the built-in145+ * definition, which doesn't have the same semantics. We don't want to146+ * use -fno-builtin, so just hide the name ffs.147+ */148+#define ffs kernel_ffs149+150+#include <asm-generic/bitops/fls.h>151+#include <asm-generic/bitops/fls64.h>152+#include <asm-generic/bitops/hweight.h>153+#include <asm-generic/bitops/find.h>154+#include <asm-generic/bitops/lock.h>155+156+#include <asm-generic/bitops/ext2-non-atomic.h>157+158+#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)159+#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)160+161+#include <asm-generic/bitops/minix.h>162+#include <asm-generic/bitops/sched.h>163+164+#endif /* __KERNEL__ */165+166+#endif /* _CRIS_BITOPS_H */
···1+#ifndef _CRIS_BYTEORDER_H2+#define _CRIS_BYTEORDER_H3+4+#ifdef __GNUC__5+6+#ifdef __KERNEL__7+#include <arch/byteorder.h>8+9+/* defines are necessary because the other files detect the presence10+ * of a defined __arch_swab32, not an inline11+ */12+#define __arch__swab32(x) ___arch__swab32(x)13+#define __arch__swab16(x) ___arch__swab16(x)14+#endif /* __KERNEL__ */15+16+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)17+# define __BYTEORDER_HAS_U64__18+# define __SWAB_64_THRU_32__19+#endif20+21+#endif /* __GNUC__ */22+23+#include <linux/byteorder/little_endian.h>24+25+#endif26+27+
···1+/* TODO: csum_tcpudp_magic could be speeded up, and csum_fold as well */2+3+#ifndef _CRIS_CHECKSUM_H4+#define _CRIS_CHECKSUM_H5+6+#include <arch/checksum.h>7+8+/*9+ * computes the checksum of a memory block at buff, length len,10+ * and adds in "sum" (32-bit)11+ *12+ * returns a 32-bit number suitable for feeding into itself13+ * or csum_tcpudp_magic14+ *15+ * this function must be called with even lengths, except16+ * for the last fragment, which may be odd17+ *18+ * it's best to have buff aligned on a 32-bit boundary19+ */20+__wsum csum_partial(const void *buff, int len, __wsum sum);21+22+/*23+ * the same as csum_partial, but copies from src while it24+ * checksums25+ *26+ * here even more important to align src and dst on a 32-bit (or even27+ * better 64-bit) boundary28+ */29+30+__wsum csum_partial_copy_nocheck(const void *src, void *dst,31+ int len, __wsum sum);32+33+/*34+ * Fold a partial checksum into a word35+ */36+37+static inline __sum16 csum_fold(__wsum csum)38+{39+ u32 sum = (__force u32)csum;40+ sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */41+ sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */42+ return (__force __sum16)~sum;43+}44+45+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,46+ int len, __wsum sum,47+ int *errptr);48+49+/*50+ * This is a version of ip_compute_csum() optimized for IP headers,51+ * which always checksum on 4 octet boundaries.52+ *53+ */54+55+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)56+{57+ return csum_fold(csum_partial(iph, ihl * 4, 0));58+}59+60+/*61+ * computes the checksum of the TCP/UDP pseudo-header62+ * returns a 16-bit checksum, already complemented63+ */64+65+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,66+ unsigned short len,67+ unsigned short proto,68+ __wsum sum)69+{70+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));71+}72+73+/*74+ * this routine is used for miscellaneous IP-like checksums, mainly75+ * in icmp.c76+ */77+78+static inline __sum16 ip_compute_csum(const void *buff, int len)79+{80+ return csum_fold (csum_partial(buff, len, 0));81+}82+83+#endif
+27
arch/cris/include/asm/delay.h
···000000000000000000000000000
···1+#ifndef _CRIS_DELAY_H2+#define _CRIS_DELAY_H3+4+/*5+ * Copyright (C) 1998-2002 Axis Communications AB6+ *7+ * Delay routines, using a pre-computed "loops_per_second" value.8+ */9+10+#include <arch/delay.h>11+12+/* Use only for very small delays ( < 1 msec). */13+14+extern unsigned long loops_per_usec; /* arch/cris/mm/init.c */15+16+/* May be defined by arch/delay.h. */17+#ifndef udelay18+static inline void udelay(unsigned long usecs)19+{20+ __delay(usecs * loops_per_usec);21+}22+#endif23+24+#endif /* defined(_CRIS_DELAY_H) */25+26+27+
+21
arch/cris/include/asm/dma.h
···000000000000000000000
···1+/* $Id: dma.h,v 1.2 2001/05/09 12:17:42 johana Exp $ */2+3+#ifndef _ASM_DMA_H4+#define _ASM_DMA_H5+6+#include <arch/dma.h>7+8+/* it's useless on the Etrax, but unfortunately needed by the new9+ bootmem allocator (but this should do it for this) */10+11+#define MAX_DMA_ADDRESS PAGE_OFFSET12+13+/* From PCI */14+15+#ifdef CONFIG_PCI16+extern int isa_dma_bridge_buggy;17+#else18+#define isa_dma_bridge_buggy (0)19+#endif20+21+#endif /* _ASM_DMA_H */
···1+#ifndef __ASMCRIS_ELF_H2+#define __ASMCRIS_ELF_H3+4+/*5+ * ELF register definitions..6+ */7+8+#include <asm/user.h>9+10+#define R_CRIS_NONE 011+#define R_CRIS_8 112+#define R_CRIS_16 213+#define R_CRIS_32 314+#define R_CRIS_8_PCREL 415+#define R_CRIS_16_PCREL 516+#define R_CRIS_32_PCREL 617+#define R_CRIS_GNU_VTINHERIT 718+#define R_CRIS_GNU_VTENTRY 819+#define R_CRIS_COPY 920+#define R_CRIS_GLOB_DAT 1021+#define R_CRIS_JUMP_SLOT 1122+#define R_CRIS_RELATIVE 1223+#define R_CRIS_16_GOT 1324+#define R_CRIS_32_GOT 1425+#define R_CRIS_16_GOTPLT 1526+#define R_CRIS_32_GOTPLT 1627+#define R_CRIS_32_GOTREL 1728+#define R_CRIS_32_PLT_GOTREL 1829+#define R_CRIS_32_PLT_PCREL 1930+31+typedef unsigned long elf_greg_t;32+33+/* Note that NGREG is defined to ELF_NGREG in include/linux/elfcore.h, and is34+ thus exposed to user-space. */35+#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))36+typedef elf_greg_t elf_gregset_t[ELF_NGREG];37+38+/* A placeholder; CRIS does not have any fp regs. */39+typedef unsigned long elf_fpregset_t;40+41+/*42+ * These are used to set parameters in the core dumps.43+ */44+#define ELF_CLASS ELFCLASS3245+#define ELF_DATA ELFDATA2LSB46+#define ELF_ARCH EM_CRIS47+48+#include <arch/elf.h>49+50+/* The master for these definitions is {binutils}/include/elf/cris.h: */51+/* User symbols in this file have a leading underscore. */52+#define EF_CRIS_UNDERSCORE 0x0000000153+54+/* This is a mask for different incompatible machine variants. */55+#define EF_CRIS_VARIANT_MASK 0x0000000e56+57+/* Variant 0; may contain v0..10 object. */58+#define EF_CRIS_VARIANT_ANY_V0_V10 0x0000000059+60+/* Variant 1; contains v32 object. */61+#define EF_CRIS_VARIANT_V32 0x0000000262+63+/* Variant 2; contains object compatible with v32 and v10. */64+#define EF_CRIS_VARIANT_COMMON_V10_V32 0x0000000465+/* End of excerpt from {binutils}/include/elf/cris.h. */66+67+#define USE_ELF_CORE_DUMP68+69+#define ELF_EXEC_PAGESIZE 819270+71+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical72+ use of this is to invoke "./ld.so someprog" to test out a new version of73+ the loader. We need to make sure that it is out of the way of the program74+ that it will "exec", and that there is sufficient room for the brk. */75+76+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)77+78+/* This yields a mask that user programs can use to figure out what79+ instruction set this CPU supports. This could be done in user space,80+ but it's not easy, and we've already done it here. */81+82+#define ELF_HWCAP (0)83+84+/* This yields a string that ld.so will use to load implementation85+ specific libraries for optimization. This is more specific in86+ intent than poking at uname or /proc/cpuinfo.87+*/88+89+#define ELF_PLATFORM (NULL)90+91+#define SET_PERSONALITY(ex) set_personality(PER_LINUX)92+93+#endif
···1+#ifndef _CRIS_PAGE_H2+#define _CRIS_PAGE_H3+4+#include <arch/page.h>5+#include <linux/const.h>6+7+/* PAGE_SHIFT determines the page size */8+#define PAGE_SHIFT 139+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)10+#define PAGE_MASK (~(PAGE_SIZE-1))11+12+#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)13+#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)14+15+#define clear_user_page(page, vaddr, pg) clear_page(page)16+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)17+18+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \19+ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)20+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE21+22+/*23+ * These are used to make use of C type-checking..24+ */25+#ifndef __ASSEMBLY__26+typedef struct { unsigned long pte; } pte_t;27+typedef struct { unsigned long pgd; } pgd_t;28+typedef struct { unsigned long pgprot; } pgprot_t;29+typedef struct page *pgtable_t;30+#endif31+32+#define pte_val(x) ((x).pte)33+#define pgd_val(x) ((x).pgd)34+#define pgprot_val(x) ((x).pgprot)35+36+#define __pte(x) ((pte_t) { (x) } )37+#define __pgd(x) ((pgd_t) { (x) } )38+#define __pgprot(x) ((pgprot_t) { (x) } )39+40+/* On CRIS the PFN numbers doesn't start at 0 so we have to compensate */41+/* for that before indexing into the page table starting at mem_map */42+#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)43+#define pfn_valid(pfn) (((pfn) - (PAGE_OFFSET >> PAGE_SHIFT)) < max_mapnr)44+45+/* to index into the page map. our pages all start at physical addr PAGE_OFFSET so46+ * we can let the map start there. notice that we subtract PAGE_OFFSET because47+ * we start our mem_map there - in other ports they map mem_map physically and48+ * use __pa instead. in our system both the physical and virtual address of DRAM49+ * is too high to let mem_map start at 0, so we do it this way instead (similar50+ * to arm and m68k I think)51+ */ 52+53+#define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT))54+#define VALID_PAGE(page) (((page) - mem_map) < max_mapnr)55+#define virt_addr_valid(kaddr) pfn_valid((unsigned)(kaddr) >> PAGE_SHIFT)56+57+/* convert a page (based on mem_map and forward) to a physical address58+ * do this by figuring out the virtual address and then use __pa59+ */60+61+#define page_to_phys(page) __pa((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)62+63+#ifndef __ASSEMBLY__64+65+#endif /* __ASSEMBLY__ */66+67+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \68+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)69+70+#include <asm-generic/memory_model.h>71+#include <asm-generic/page.h>72+73+#endif /* _CRIS_PAGE_H */74+
···1+/*2+ * CRIS pgtable.h - macros and functions to manipulate page tables.3+ */4+5+#ifndef _CRIS_PGTABLE_H6+#define _CRIS_PGTABLE_H7+8+#include <asm/page.h>9+#include <asm-generic/pgtable-nopmd.h>10+11+#ifndef __ASSEMBLY__12+#include <linux/sched.h>13+#include <asm/mmu.h>14+#endif15+#include <arch/pgtable.h>16+17+/*18+ * The Linux memory management assumes a three-level page table setup. On19+ * CRIS, we use that, but "fold" the mid level into the top-level page20+ * table. Since the MMU TLB is software loaded through an interrupt, it21+ * supports any page table structure, so we could have used a three-level22+ * setup, but for the amounts of memory we normally use, a two-level is23+ * probably more efficient.24+ *25+ * This file contains the functions and defines necessary to modify and use26+ * the CRIS page table tree.27+ */28+#ifndef __ASSEMBLY__29+extern void paging_init(void);30+#endif31+32+/* Certain architectures need to do special things when pte's33+ * within a page table are directly modified. Thus, the following34+ * hook is made available.35+ */36+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))37+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)38+39+/*40+ * (pmds are folded into pgds so this doesn't get actually called,41+ * but the define is needed for a generic inline function.)42+ */43+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)44+#define set_pgu(pudptr, pudval) (*(pudptr) = pudval)45+46+/* PGDIR_SHIFT determines the size of the area a second-level page table can47+ * map. It is equal to the page size times the number of PTE's that fit in48+ * a PMD page. A PTE is 4-bytes in CRIS. Hence the following number.49+ */50+51+#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2))52+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)53+#define PGDIR_MASK (~(PGDIR_SIZE-1))54+55+/*56+ * entries per page directory level: we use a two-level, so57+ * we don't really have any PMD directory physically.58+ * pointers are 4 bytes so we can use the page size and 59+ * divide it by 4 (shift by 2).60+ */61+#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2))62+#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2))63+64+/* calculate how many PGD entries a user-level program can use65+ * the first mappable virtual address is 066+ * (TASK_SIZE is the maximum virtual address space)67+ */68+69+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)70+#define FIRST_USER_ADDRESS 071+72+/* zero page used for uninitialized stuff */73+#ifndef __ASSEMBLY__74+extern unsigned long empty_zero_page;75+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))76+#endif77+78+/* number of bits that fit into a memory pointer */79+#define BITS_PER_PTR (8*sizeof(unsigned long))80+81+/* to align the pointer to a pointer address */82+#define PTR_MASK (~(sizeof(void*)-1))83+84+/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */85+/* 64-bit machines, beware! SRB. */86+#define SIZEOF_PTR_LOG2 287+88+/* to find an entry in a page-table */89+#define PAGE_PTR(address) \90+((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)91+92+/* to set the page-dir */93+#define SET_PAGE_DIR(tsk,pgdir)94+95+#define pte_none(x) (!pte_val(x))96+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)97+#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0)98+99+#define pmd_none(x) (!pmd_val(x))100+/* by removing the _PAGE_KERNEL bit from the comparision, the same pmd_bad101+ * works for both _PAGE_TABLE and _KERNPG_TABLE pmd entries.102+ */103+#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_KERNEL)) != _PAGE_TABLE)104+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)105+#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)106+107+#ifndef __ASSEMBLY__108+109+/*110+ * The following only work if pte_present() is true.111+ * Undefined behaviour if not..112+ */113+114+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }115+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }116+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }117+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }118+static inline int pte_special(pte_t pte) { return 0; }119+120+static inline pte_t pte_wrprotect(pte_t pte)121+{122+ pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);123+ return pte;124+}125+126+static inline pte_t pte_mkclean(pte_t pte)127+{128+ pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 129+ return pte; 130+}131+132+static inline pte_t pte_mkold(pte_t pte)133+{134+ pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);135+ return pte;136+}137+138+static inline pte_t pte_mkwrite(pte_t pte)139+{140+ pte_val(pte) |= _PAGE_WRITE;141+ if (pte_val(pte) & _PAGE_MODIFIED)142+ pte_val(pte) |= _PAGE_SILENT_WRITE;143+ return pte;144+}145+146+static inline pte_t pte_mkdirty(pte_t pte)147+{148+ pte_val(pte) |= _PAGE_MODIFIED;149+ if (pte_val(pte) & _PAGE_WRITE)150+ pte_val(pte) |= _PAGE_SILENT_WRITE;151+ return pte;152+}153+154+static inline pte_t pte_mkyoung(pte_t pte)155+{156+ pte_val(pte) |= _PAGE_ACCESSED;157+ if (pte_val(pte) & _PAGE_READ)158+ {159+ pte_val(pte) |= _PAGE_SILENT_READ;160+ if ((pte_val(pte) & (_PAGE_WRITE | _PAGE_MODIFIED)) ==161+ (_PAGE_WRITE | _PAGE_MODIFIED))162+ pte_val(pte) |= _PAGE_SILENT_WRITE;163+ }164+ return pte;165+}166+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }167+168+/*169+ * Conversion functions: convert a page and protection to a page entry,170+ * and a page entry and page directory to the page they refer to.171+ */172+173+/* What actually goes as arguments to the various functions is less than174+ * obvious, but a rule of thumb is that struct page's goes as struct page *,175+ * really physical DRAM addresses are unsigned long's, and DRAM "virtual"176+ * addresses (the 0xc0xxxxxx's) goes as void *'s.177+ */178+179+static inline pte_t __mk_pte(void * page, pgprot_t pgprot)180+{181+ pte_t pte;182+ /* the PTE needs a physical address */183+ pte_val(pte) = __pa(page) | pgprot_val(pgprot);184+ return pte;185+}186+187+#define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))188+189+#define mk_pte_phys(physpage, pgprot) \190+({ \191+ pte_t __pte; \192+ \193+ pte_val(__pte) = (physpage) + pgprot_val(pgprot); \194+ __pte; \195+})196+197+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)198+{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }199+200+201+/* pte_val refers to a page in the 0x4xxxxxxx physical DRAM interval202+ * __pte_page(pte_val) refers to the "virtual" DRAM interval203+ * pte_pagenr refers to the page-number counted starting from the virtual DRAM start204+ */205+206+static inline unsigned long __pte_page(pte_t pte)207+{208+ /* the PTE contains a physical address */209+ return (unsigned long)__va(pte_val(pte) & PAGE_MASK);210+}211+212+#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)213+214+/* permanent address of a page */215+216+#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))217+#define pte_page(pte) (mem_map+pte_pagenr(pte))218+219+/* only the pte's themselves need to point to physical DRAM (see above)220+ * the pagetable links are purely handled within the kernel SW and thus221+ * don't need the __pa and __va transformations.222+ */223+224+static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)225+{ pmd_val(*pmdp) = _PAGE_TABLE | (unsigned long) ptep; }226+227+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))228+#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))229+230+/* to find an entry in a page-table-directory. */231+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))232+233+/* to find an entry in a page-table-directory */234+static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long address)235+{236+ return mm->pgd + pgd_index(address);237+}238+239+/* to find an entry in a kernel page-table-directory */240+#define pgd_offset_k(address) pgd_offset(&init_mm, address)241+242+/* Find an entry in the third-level page table.. */243+#define __pte_offset(address) \244+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))245+#define pte_offset_kernel(dir, address) \246+ ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))247+#define pte_offset_map(dir, address) \248+ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))249+#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)250+251+#define pte_unmap(pte) do { } while (0)252+#define pte_unmap_nested(pte) do { } while (0)253+#define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT)254+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))255+256+#define pte_ERROR(e) \257+ printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), pte_val(e))258+#define pgd_ERROR(e) \259+ printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))260+261+262+extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */263+264+/*265+ * CRIS doesn't have any external MMU info: the kernel page266+ * tables contain all the necessary information.267+ * 268+ * Actually I am not sure on what this could be used for.269+ */270+static inline void update_mmu_cache(struct vm_area_struct * vma,271+ unsigned long address, pte_t pte)272+{273+}274+275+/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */276+/* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */277+278+#define __swp_type(x) (((x).val >> 5) & 0x7f)279+#define __swp_offset(x) ((x).val >> 12)280+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 5) | ((offset) << 12) })281+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })282+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })283+284+#define kern_addr_valid(addr) (1)285+286+#include <asm-generic/pgtable.h>287+288+/*289+ * No page table caches to initialise290+ */291+#define pgtable_cache_init() do { } while (0)292+293+#define pte_to_pgoff(x) (pte_val(x) >> 6)294+#define pgoff_to_pte(x) __pte(((x) << 6) | _PAGE_FILE)295+296+typedef pte_t *pte_addr_t;297+298+#endif /* __ASSEMBLY__ */299+#endif /* _CRIS_PGTABLE_H */
···1+/*2+ * include/asm-cris/processor.h3+ *4+ * Copyright (C) 2000, 2001 Axis Communications AB5+ *6+ * Authors: Bjorn Wesen Initial version7+ *8+ */9+10+#ifndef __ASM_CRIS_PROCESSOR_H11+#define __ASM_CRIS_PROCESSOR_H12+13+#include <asm/system.h>14+#include <asm/page.h>15+#include <asm/ptrace.h>16+#include <arch/processor.h>17+18+struct task_struct;19+20+#define STACK_TOP TASK_SIZE21+#define STACK_TOP_MAX STACK_TOP22+23+/* This decides where the kernel will search for a free chunk of vm24+ * space during mmap's.25+ */26+#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))27+28+/* THREAD_SIZE is the size of the task_struct/kernel_stack combo.29+ * normally, the stack is found by doing something like p + THREAD_SIZE30+ * in CRIS, a page is 8192 bytes, which seems like a sane size31+ */32+33+#define THREAD_SIZE PAGE_SIZE34+#define KERNEL_STACK_SIZE PAGE_SIZE35+36+/*37+ * At user->kernel entry, the pt_regs struct is stacked on the top of the kernel-stack.38+ * This macro allows us to find those regs for a task.39+ * Notice that subsequent pt_regs stackings, like recursive interrupts occurring while40+ * we're in the kernel, won't affect this - only the first user->kernel transition41+ * registers are reached by this.42+ */43+44+#define user_regs(thread_info) (((struct pt_regs *)((unsigned long)(thread_info) + THREAD_SIZE)) - 1)45+46+/*47+ * Dito but for the currently running task48+ */49+50+#define task_pt_regs(task) user_regs(task_thread_info(task))51+#define current_regs() task_pt_regs(current)52+53+static inline void prepare_to_copy(struct task_struct *tsk)54+{55+}56+57+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);58+59+unsigned long get_wchan(struct task_struct *p);60+61+#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)62+63+extern unsigned long thread_saved_pc(struct task_struct *tsk);64+65+/* Free all resources held by a thread. */66+static inline void release_thread(struct task_struct *dead_task)67+{68+ /* Nothing needs to be done. */69+}70+71+#define init_stack (init_thread_union.stack)72+73+#define cpu_relax() barrier()74+75+#endif /* __ASM_CRIS_PROCESSOR_H */
+16
arch/cris/include/asm/ptrace.h
···0000000000000000
···1+#ifndef _CRIS_PTRACE_H2+#define _CRIS_PTRACE_H3+4+#include <arch/ptrace.h>5+6+#ifdef __KERNEL__7+8+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */9+#define PTRACE_GETREGS 1210+#define PTRACE_SETREGS 1311+12+#define profile_pc(regs) instruction_pointer(regs)13+14+#endif /* __KERNEL__ */15+16+#endif /* _CRIS_PTRACE_H */
···1+#ifndef __ASM_CRIS_SYSTEM_H2+#define __ASM_CRIS_SYSTEM_H3+4+#include <arch/system.h>5+6+/* the switch_to macro calls resume, an asm function in entry.S which does the actual7+ * task switching.8+ */9+10+extern struct task_struct *resume(struct task_struct *prev, struct task_struct *next, int);11+#define switch_to(prev,next,last) last = resume(prev,next, \12+ (int)&((struct task_struct *)0)->thread)13+14+#define barrier() __asm__ __volatile__("": : :"memory")15+#define mb() barrier()16+#define rmb() mb()17+#define wmb() mb()18+#define read_barrier_depends() do { } while(0)19+#define set_mb(var, value) do { var = value; mb(); } while (0)20+21+#ifdef CONFIG_SMP22+#define smp_mb() mb()23+#define smp_rmb() rmb()24+#define smp_wmb() wmb()25+#define smp_read_barrier_depends() read_barrier_depends()26+#else27+#define smp_mb() barrier()28+#define smp_rmb() barrier()29+#define smp_wmb() barrier()30+#define smp_read_barrier_depends() do { } while(0)31+#endif32+33+#define iret()34+35+/*36+ * disable hlt during certain critical i/o operations37+ */38+#define HAVE_DISABLE_HLT39+void disable_hlt(void);40+void enable_hlt(void);41+42+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)43+{44+ /* since Etrax doesn't have any atomic xchg instructions, we need to disable45+ irq's (if enabled) and do it with move.d's */46+ unsigned long flags,temp;47+ local_irq_save(flags); /* save flags, including irq enable bit and shut off irqs */48+ switch (size) {49+ case 1:50+ *((unsigned char *)&temp) = x;51+ x = *(unsigned char *)ptr;52+ *(unsigned char *)ptr = *((unsigned char *)&temp);53+ break;54+ case 2:55+ *((unsigned short *)&temp) = x;56+ x = *(unsigned short *)ptr;57+ *(unsigned short *)ptr = *((unsigned short *)&temp);58+ break;59+ case 4:60+ temp = x;61+ x = *(unsigned long *)ptr;62+ *(unsigned long *)ptr = temp;63+ break;64+ }65+ local_irq_restore(flags); /* restore irq enable bit */66+ return x;67+}68+69+#include <asm-generic/cmpxchg-local.h>70+71+/*72+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make73+ * them available.74+ */75+#define cmpxchg_local(ptr, o, n) \76+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\77+ (unsigned long)(n), sizeof(*(ptr))))78+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))79+80+#ifndef CONFIG_SMP81+#include <asm-generic/cmpxchg.h>82+#endif83+84+#define arch_align_stack(x) (x)85+86+void default_idle(void);87+88+#endif
···1+/* thread_info.h: CRIS low-level thread information2+ *3+ * Copyright (C) 2002 David Howells (dhowells@redhat.com)4+ * - Incorporating suggestions made by Linus Torvalds and Dave Miller5+ * 6+ * CRIS port by Axis Communications7+ */8+9+#ifndef _ASM_THREAD_INFO_H10+#define _ASM_THREAD_INFO_H11+12+#ifdef __KERNEL__13+14+#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR15+16+#ifndef __ASSEMBLY__17+#include <asm/types.h>18+#include <asm/processor.h>19+#include <arch/thread_info.h>20+#include <asm/segment.h>21+#endif22+23+24+/*25+ * low level task data that entry.S needs immediate access to26+ * - this struct should fit entirely inside of one cache line27+ * - this struct shares the supervisor stack pages28+ * - if the contents of this structure are changed, the assembly constants must also be changed29+ */30+#ifndef __ASSEMBLY__31+struct thread_info {32+ struct task_struct *task; /* main task structure */33+ struct exec_domain *exec_domain; /* execution domain */34+ unsigned long flags; /* low level flags */35+ __u32 cpu; /* current CPU */36+ int preempt_count; /* 0 => preemptable, <0 => BUG */37+ __u32 tls; /* TLS for this thread */38+39+ mm_segment_t addr_limit; /* thread address space:40+ 0-0xBFFFFFFF for user-thead41+ 0-0xFFFFFFFF for kernel-thread42+ */43+ struct restart_block restart_block;44+ __u8 supervisor_stack[0];45+};46+47+#endif48+49+#define PREEMPT_ACTIVE 0x1000000050+51+/*52+ * macros/functions for gaining access to the thread information structure53+ *54+ * preempt_count needs to be 1 initially, until the scheduler is functional.55+ */56+#ifndef __ASSEMBLY__57+#define INIT_THREAD_INFO(tsk) \58+{ \59+ .task = &tsk, \60+ .exec_domain = &default_exec_domain, \61+ .flags = 0, \62+ .cpu = 0, \63+ .preempt_count = 1, \64+ .addr_limit = KERNEL_DS, \65+ .restart_block = { \66+ .fn = do_no_restart_syscall, \67+ }, \68+}69+70+#define init_thread_info (init_thread_union.thread_info)71+72+/* thread information allocation */73+#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))74+#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)75+76+#endif /* !__ASSEMBLY__ */77+78+/*79+ * thread information flags80+ * - these are process state flags that various assembly files may need to access81+ * - pending work-to-be-done flags are in LSW82+ * - other flags in MSW83+ */84+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */85+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */86+#define TIF_SIGPENDING 2 /* signal pending */87+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */88+#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */89+#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */90+#define TIF_MEMDIE 1791+#define TIF_FREEZE 18 /* is freezing for suspend */92+93+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)94+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)95+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)96+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)97+#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)98+#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)99+#define _TIF_FREEZE (1<<TIF_FREEZE)100+101+#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */102+#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */103+104+#endif /* __KERNEL__ */105+106+#endif /* _ASM_THREAD_INFO_H */
+24
arch/cris/include/asm/timex.h
···000000000000000000000000
···1+/*2+ * linux/include/asm-cris/timex.h3+ *4+ * CRIS architecture timex specifications5+ */6+7+#ifndef _ASM_CRIS_TIMEX_H8+#define _ASM_CRIS_TIMEX_H9+10+#include <arch/timex.h>11+12+/*13+ * We don't have a cycle-counter.. but we do not support SMP anyway where this is14+ * used so it does not matter.15+ */16+17+typedef unsigned long long cycles_t;18+19+static inline cycles_t get_cycles(void)20+{21+ return 0;22+}23+24+#endif
+19
arch/cris/include/asm/tlb.h
···0000000000000000000
···1+#ifndef _CRIS_TLB_H2+#define _CRIS_TLB_H3+4+#include <linux/pagemap.h>5+6+#include <arch/tlb.h>7+8+/*9+ * cris doesn't need any special per-pte or10+ * per-vma handling..11+ */12+#define tlb_start_vma(tlb, vma) do { } while (0)13+#define tlb_end_vma(tlb, vma) do { } while (0)14+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)15+16+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)17+#include <asm-generic/tlb.h>18+19+#endif
···1+/* 2+ * Authors: Bjorn Wesen (bjornw@axis.com)3+ * Hans-Peter Nilsson (hp@axis.com)4+ */5+6+/* Asm:s have been tweaked (within the domain of correctness) to give7+ satisfactory results for "gcc version 2.96 20000427 (experimental)".8+9+ Check regularly...10+11+ Register $r9 is chosen for temporaries, being a call-clobbered register12+ first in line to be used (notably for local blocks), not colliding with13+ parameter registers. */14+15+#ifndef _CRIS_UACCESS_H16+#define _CRIS_UACCESS_H17+18+#ifndef __ASSEMBLY__19+#include <linux/sched.h>20+#include <linux/errno.h>21+#include <asm/processor.h>22+#include <asm/page.h>23+24+#define VERIFY_READ 025+#define VERIFY_WRITE 126+27+/*28+ * The fs value determines whether argument validity checking should be29+ * performed or not. If get_fs() == USER_DS, checking is performed, with30+ * get_fs() == KERNEL_DS, checking is bypassed.31+ *32+ * For historical reasons, these macros are grossly misnamed.33+ */34+35+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })36+37+/* addr_limit is the maximum accessible address for the task. we misuse38+ * the KERNEL_DS and USER_DS values to both assign and compare the 39+ * addr_limit values through the equally misnamed get/set_fs macros.40+ * (see above)41+ */42+43+#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)44+#define USER_DS MAKE_MM_SEG(TASK_SIZE)45+46+#define get_ds() (KERNEL_DS)47+#define get_fs() (current_thread_info()->addr_limit)48+#define set_fs(x) (current_thread_info()->addr_limit = (x))49+50+#define segment_eq(a,b) ((a).seg == (b).seg)51+52+#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))53+#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))54+#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))55+#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))56+57+#include <arch/uaccess.h>58+59+/*60+ * The exception table consists of pairs of addresses: the first is the61+ * address of an instruction that is allowed to fault, and the second is62+ * the address at which the program should continue. No registers are63+ * modified, so it is entirely up to the continuation code to figure out64+ * what to do.65+ *66+ * All the routines below use bits of fixup code that are out of line67+ * with the main instruction path. This means when everything is well,68+ * we don't even have to jump over them. Further, they do not intrude69+ * on our cache or tlb entries.70+ */71+72+struct exception_table_entry73+{74+ unsigned long insn, fixup;75+};76+77+/*78+ * These are the main single-value transfer routines. They automatically79+ * use the right size if we just have the right pointer type.80+ *81+ * This gets kind of ugly. We want to return _two_ values in "get_user()"82+ * and yet we don't want to do any pointers, because that is too much83+ * of a performance impact. Thus we have a few rather ugly macros here,84+ * and hide all the ugliness from the user.85+ *86+ * The "__xxx" versions of the user access functions are versions that87+ * do not verify the address space, that must have been done previously88+ * with a separate "access_ok()" call (this is used when we do multiple89+ * accesses to the same area of user memory).90+ *91+ * As we use the same address space for kernel and user data on92+ * CRIS, we can just do these as direct assignments. (Of course, the93+ * exception handling means that it's no longer "just"...)94+ */95+#define get_user(x,ptr) \96+ __get_user_check((x),(ptr),sizeof(*(ptr)))97+#define put_user(x,ptr) \98+ __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))99+100+#define __get_user(x,ptr) \101+ __get_user_nocheck((x),(ptr),sizeof(*(ptr)))102+#define __put_user(x,ptr) \103+ __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))104+105+extern long __put_user_bad(void);106+107+#define __put_user_size(x,ptr,size,retval) \108+do { \109+ retval = 0; \110+ switch (size) { \111+ case 1: __put_user_asm(x,ptr,retval,"move.b"); break; \112+ case 2: __put_user_asm(x,ptr,retval,"move.w"); break; \113+ case 4: __put_user_asm(x,ptr,retval,"move.d"); break; \114+ case 8: __put_user_asm_64(x,ptr,retval); break; \115+ default: __put_user_bad(); \116+ } \117+} while (0)118+119+#define __get_user_size(x,ptr,size,retval) \120+do { \121+ retval = 0; \122+ switch (size) { \123+ case 1: __get_user_asm(x,ptr,retval,"move.b"); break; \124+ case 2: __get_user_asm(x,ptr,retval,"move.w"); break; \125+ case 4: __get_user_asm(x,ptr,retval,"move.d"); break; \126+ case 8: __get_user_asm_64(x,ptr,retval); break; \127+ default: (x) = __get_user_bad(); \128+ } \129+} while (0)130+131+#define __put_user_nocheck(x,ptr,size) \132+({ \133+ long __pu_err; \134+ __put_user_size((x),(ptr),(size),__pu_err); \135+ __pu_err; \136+})137+138+#define __put_user_check(x,ptr,size) \139+({ \140+ long __pu_err = -EFAULT; \141+ __typeof__(*(ptr)) *__pu_addr = (ptr); \142+ if (access_ok(VERIFY_WRITE,__pu_addr,size)) \143+ __put_user_size((x),__pu_addr,(size),__pu_err); \144+ __pu_err; \145+})146+147+struct __large_struct { unsigned long buf[100]; };148+#define __m(x) (*(struct __large_struct *)(x))149+150+151+152+#define __get_user_nocheck(x,ptr,size) \153+({ \154+ long __gu_err, __gu_val; \155+ __get_user_size(__gu_val,(ptr),(size),__gu_err); \156+ (x) = (__typeof__(*(ptr)))__gu_val; \157+ __gu_err; \158+})159+160+#define __get_user_check(x,ptr,size) \161+({ \162+ long __gu_err = -EFAULT, __gu_val = 0; \163+ const __typeof__(*(ptr)) *__gu_addr = (ptr); \164+ if (access_ok(VERIFY_READ,__gu_addr,size)) \165+ __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \166+ (x) = (__typeof__(*(ptr)))__gu_val; \167+ __gu_err; \168+})169+170+extern long __get_user_bad(void);171+172+/* More complex functions. Most are inline, but some call functions that173+ live in lib/usercopy.c */174+175+extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n);176+extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);177+extern unsigned long __do_clear_user(void __user *to, unsigned long n);178+179+static inline unsigned long180+__generic_copy_to_user(void __user *to, const void *from, unsigned long n)181+{182+ if (access_ok(VERIFY_WRITE, to, n))183+ return __copy_user(to,from,n);184+ return n;185+}186+187+static inline unsigned long188+__generic_copy_from_user(void *to, const void __user *from, unsigned long n)189+{190+ if (access_ok(VERIFY_READ, from, n))191+ return __copy_user_zeroing(to,from,n);192+ return n;193+}194+195+static inline unsigned long196+__generic_clear_user(void __user *to, unsigned long n)197+{198+ if (access_ok(VERIFY_WRITE, to, n))199+ return __do_clear_user(to,n);200+ return n;201+}202+203+static inline long204+__strncpy_from_user(char *dst, const char __user *src, long count)205+{206+ return __do_strncpy_from_user(dst, src, count);207+}208+209+static inline long210+strncpy_from_user(char *dst, const char __user *src, long count)211+{212+ long res = -EFAULT;213+ if (access_ok(VERIFY_READ, src, 1))214+ res = __do_strncpy_from_user(dst, src, count);215+ return res;216+}217+218+219+/* Note that these expand awfully if made into switch constructs, so220+ don't do that. */221+222+static inline unsigned long223+__constant_copy_from_user(void *to, const void __user *from, unsigned long n)224+{225+ unsigned long ret = 0;226+ if (n == 0)227+ ;228+ else if (n == 1)229+ __asm_copy_from_user_1(to, from, ret);230+ else if (n == 2)231+ __asm_copy_from_user_2(to, from, ret);232+ else if (n == 3)233+ __asm_copy_from_user_3(to, from, ret);234+ else if (n == 4)235+ __asm_copy_from_user_4(to, from, ret);236+ else if (n == 5)237+ __asm_copy_from_user_5(to, from, ret);238+ else if (n == 6)239+ __asm_copy_from_user_6(to, from, ret);240+ else if (n == 7)241+ __asm_copy_from_user_7(to, from, ret);242+ else if (n == 8)243+ __asm_copy_from_user_8(to, from, ret);244+ else if (n == 9)245+ __asm_copy_from_user_9(to, from, ret);246+ else if (n == 10)247+ __asm_copy_from_user_10(to, from, ret);248+ else if (n == 11)249+ __asm_copy_from_user_11(to, from, ret);250+ else if (n == 12)251+ __asm_copy_from_user_12(to, from, ret);252+ else if (n == 13)253+ __asm_copy_from_user_13(to, from, ret);254+ else if (n == 14)255+ __asm_copy_from_user_14(to, from, ret);256+ else if (n == 15)257+ __asm_copy_from_user_15(to, from, ret);258+ else if (n == 16)259+ __asm_copy_from_user_16(to, from, ret);260+ else if (n == 20)261+ __asm_copy_from_user_20(to, from, ret);262+ else if (n == 24)263+ __asm_copy_from_user_24(to, from, ret);264+ else265+ ret = __generic_copy_from_user(to, from, n);266+267+ return ret;268+}269+270+/* Ditto, don't make a switch out of this. */271+272+static inline unsigned long273+__constant_copy_to_user(void __user *to, const void *from, unsigned long n)274+{275+ unsigned long ret = 0;276+ if (n == 0)277+ ;278+ else if (n == 1)279+ __asm_copy_to_user_1(to, from, ret);280+ else if (n == 2)281+ __asm_copy_to_user_2(to, from, ret);282+ else if (n == 3)283+ __asm_copy_to_user_3(to, from, ret);284+ else if (n == 4)285+ __asm_copy_to_user_4(to, from, ret);286+ else if (n == 5)287+ __asm_copy_to_user_5(to, from, ret);288+ else if (n == 6)289+ __asm_copy_to_user_6(to, from, ret);290+ else if (n == 7)291+ __asm_copy_to_user_7(to, from, ret);292+ else if (n == 8)293+ __asm_copy_to_user_8(to, from, ret);294+ else if (n == 9)295+ __asm_copy_to_user_9(to, from, ret);296+ else if (n == 10)297+ __asm_copy_to_user_10(to, from, ret);298+ else if (n == 11)299+ __asm_copy_to_user_11(to, from, ret);300+ else if (n == 12)301+ __asm_copy_to_user_12(to, from, ret);302+ else if (n == 13)303+ __asm_copy_to_user_13(to, from, ret);304+ else if (n == 14)305+ __asm_copy_to_user_14(to, from, ret);306+ else if (n == 15)307+ __asm_copy_to_user_15(to, from, ret);308+ else if (n == 16)309+ __asm_copy_to_user_16(to, from, ret);310+ else if (n == 20)311+ __asm_copy_to_user_20(to, from, ret);312+ else if (n == 24)313+ __asm_copy_to_user_24(to, from, ret);314+ else315+ ret = __generic_copy_to_user(to, from, n);316+317+ return ret;318+}319+320+/* No switch, please. */321+322+static inline unsigned long323+__constant_clear_user(void __user *to, unsigned long n)324+{325+ unsigned long ret = 0;326+ if (n == 0)327+ ;328+ else if (n == 1)329+ __asm_clear_1(to, ret);330+ else if (n == 2)331+ __asm_clear_2(to, ret);332+ else if (n == 3)333+ __asm_clear_3(to, ret);334+ else if (n == 4)335+ __asm_clear_4(to, ret);336+ else if (n == 8)337+ __asm_clear_8(to, ret);338+ else if (n == 12)339+ __asm_clear_12(to, ret);340+ else if (n == 16)341+ __asm_clear_16(to, ret);342+ else if (n == 20)343+ __asm_clear_20(to, ret);344+ else if (n == 24)345+ __asm_clear_24(to, ret);346+ else347+ ret = __generic_clear_user(to, n);348+349+ return ret;350+}351+352+353+#define clear_user(to, n) \354+(__builtin_constant_p(n) ? \355+ __constant_clear_user(to, n) : \356+ __generic_clear_user(to, n))357+358+#define copy_from_user(to, from, n) \359+(__builtin_constant_p(n) ? \360+ __constant_copy_from_user(to, from, n) : \361+ __generic_copy_from_user(to, from, n))362+363+#define copy_to_user(to, from, n) \364+(__builtin_constant_p(n) ? \365+ __constant_copy_to_user(to, from, n) : \366+ __generic_copy_to_user(to, from, n))367+368+/* We let the __ versions of copy_from/to_user inline, because they're often369+ * used in fast paths and have only a small space overhead.370+ */371+372+static inline unsigned long373+__generic_copy_from_user_nocheck(void *to, const void __user *from,374+ unsigned long n)375+{376+ return __copy_user_zeroing(to,from,n);377+}378+379+static inline unsigned long380+__generic_copy_to_user_nocheck(void __user *to, const void *from,381+ unsigned long n)382+{383+ return __copy_user(to,from,n);384+}385+386+static inline unsigned long387+__generic_clear_user_nocheck(void __user *to, unsigned long n)388+{389+ return __do_clear_user(to,n);390+}391+392+/* without checking */393+394+#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n))395+#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))396+#define __copy_to_user_inatomic __copy_to_user397+#define __copy_from_user_inatomic __copy_from_user398+#define __clear_user(to,n) __generic_clear_user_nocheck((to),(n))399+400+#define strlen_user(str) strnlen_user((str), 0x7ffffffe)401+402+#endif /* __ASSEMBLY__ */403+404+#endif /* _CRIS_UACCESS_H */
···1+#ifndef __ASM_CRIS_USER_H2+#define __ASM_CRIS_USER_H3+4+#include <linux/types.h>5+#include <asm/ptrace.h>6+#include <asm/page.h>7+#include <arch/user.h>8+9+/*10+ * Core file format: The core file is written in such a way that gdb11+ * can understand it and provide useful information to the user (under12+ * linux we use the `trad-core' bfd). The file contents are as follows:13+ *14+ * upage: 1 page consisting of a user struct that tells gdb15+ * what is present in the file. Directly after this is a16+ * copy of the task_struct, which is currently not used by gdb,17+ * but it may come in handy at some point. All of the registers18+ * are stored as part of the upage. The upage should always be19+ * only one page long.20+ * data: The data segment follows next. We use current->end_text to21+ * current->brk to pick up all of the user variables, plus any memory22+ * that may have been sbrk'ed. No attempt is made to determine if a23+ * page is demand-zero or if a page is totally unused, we just cover24+ * the entire range. All of the addresses are rounded in such a way25+ * that an integral number of pages is written.26+ * stack: We need the stack information in order to get a meaningful27+ * backtrace. We need to write the data from usp to28+ * current->start_stack, so we round each of these in order to be able29+ * to write an integer number of pages.30+ */31+32+struct user {33+ struct user_regs_struct regs; /* entire machine state */34+ size_t u_tsize; /* text size (pages) */35+ size_t u_dsize; /* data size (pages) */36+ size_t u_ssize; /* stack size (pages) */37+ unsigned long start_code; /* text starting address */38+ unsigned long start_data; /* data starting address */39+ unsigned long start_stack; /* stack starting address */40+ long int signal; /* signal causing core dump */41+ unsigned long u_ar0; /* help gdb find registers */42+ unsigned long magic; /* identifies a core file */43+ char u_comm[32]; /* user command name */44+};45+46+#define NBPG PAGE_SIZE47+#define UPAGES 148+#define HOST_TEXT_START_ADDR (u.start_code)49+#define HOST_DATA_START_ADDR (u.start_data)50+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)51+52+#endif /* __ASM_CRIS_USER_H */
···34#include <asm/system.h>35#include <linux/delay.h>3637-#include <asm/arch/svinto.h>3839/* non-arch dependent serial structures are in linux/serial.h */40#include <linux/serial.h>41/* while we keep our own stuff (struct e100_serial) in a local .h file */42#include "crisv10.h"43#include <asm/fasttimer.h>44-#include <asm/arch/io_interface_mux.h>4546#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER47#ifndef CONFIG_ETRAX_FAST_TIMER
···34#include <asm/system.h>35#include <linux/delay.h>3637+#include <arch/svinto.h>3839/* non-arch dependent serial structures are in linux/serial.h */40#include <linux/serial.h>41/* while we keep our own stuff (struct e100_serial) in a local .h file */42#include "crisv10.h"43#include <asm/fasttimer.h>44+#include <arch/io_interface_mux.h>4546#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER47#ifndef CONFIG_ETRAX_FAST_TIMER
+1-1
drivers/serial/crisv10.h
···10#include <linux/circ_buf.h>11#include <asm/termios.h>12#include <asm/dma.h>13-#include <asm/arch/io_interface_mux.h>1415/* Software state per channel */16
···10#include <linux/circ_buf.h>11#include <asm/termios.h>12#include <asm/dma.h>13+#include <arch/io_interface_mux.h>1415/* Software state per channel */16
···1-/*2- * Interrupt handling assembler and defines for Linux/CRISv103- */4-5-#ifndef _ASM_ARCH_IRQ_H6-#define _ASM_ARCH_IRQ_H7-8-#include <asm/arch/sv_addr_ag.h>9-10-#define NR_IRQS 3211-12-/* The first vector number used for IRQs in v10 is really 0x20 */13-/* but all the code and constants are offseted to make 0 the first */14-#define FIRST_IRQ 015-16-#define SOME_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, some) /* 0 ? */17-#define NMI_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, nmi) /* 1 */18-#define TIMER0_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, timer0) /* 2 */19-#define TIMER1_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, timer1) /* 3 */20-/* mio, ata, par0, scsi0 on 4 */21-/* par1, scsi1 on 5 */22-#define NETWORK_STATUS_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, network) /* 6 */23-24-#define SERIAL_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, serial) /* 8 */25-#define PA_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, pa) /* 11 */26-/* extdma0 and extdma1 is at irq 12 and 13 and/or same as dma5 and dma6 ? */27-#define EXTDMA0_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, ext_dma0)28-#define EXTDMA1_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, ext_dma1)29-30-/* dma0-9 is irq 16..25 */31-/* 16,17: network */32-#define DMA0_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma0)33-#define DMA1_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma1)34-#define NETWORK_DMA_TX_IRQ_NBR DMA0_TX_IRQ_NBR35-#define NETWORK_DMA_RX_IRQ_NBR DMA1_RX_IRQ_NBR36-37-/* 18,19: dma2 and dma3 shared by par0, scsi0, ser2 and ata */38-#define DMA2_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma2)39-#define DMA3_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma3)40-#define SER2_DMA_TX_IRQ_NBR DMA2_TX_IRQ_NBR41-#define SER2_DMA_RX_IRQ_NBR DMA3_RX_IRQ_NBR42-43-/* 20,21: dma4 and dma5 shared by par1, scsi1, ser3 and extdma0 */44-#define DMA4_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma4)45-#define DMA5_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma5)46-#define SER3_DMA_TX_IRQ_NBR DMA4_TX_IRQ_NBR47-#define SER3_DMA_RX_IRQ_NBR DMA5_RX_IRQ_NBR48-49-/* 22,23: dma6 and dma7 shared by ser0, extdma1 and mem2mem */50-#define DMA6_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma6)51-#define DMA7_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma7)52-#define SER0_DMA_TX_IRQ_NBR DMA6_TX_IRQ_NBR53-#define SER0_DMA_RX_IRQ_NBR DMA7_RX_IRQ_NBR54-#define MEM2MEM_DMA_TX_IRQ_NBR DMA6_TX_IRQ_NBR55-#define MEM2MEM_DMA_RX_IRQ_NBR DMA7_RX_IRQ_NBR56-57-/* 24,25: dma8 and dma9 shared by ser1 and usb */58-#define DMA8_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma8)59-#define DMA9_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma9)60-#define SER1_DMA_TX_IRQ_NBR DMA8_TX_IRQ_NBR61-#define SER1_DMA_RX_IRQ_NBR DMA9_RX_IRQ_NBR62-#define USB_DMA_TX_IRQ_NBR DMA8_TX_IRQ_NBR63-#define USB_DMA_RX_IRQ_NBR DMA9_RX_IRQ_NBR64-65-/* usb: controller at irq 31 + uses DMA8 and DMA9 */66-#define USB_HC_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, usb)67-68-/* our fine, global, etrax irq vector! the pointer lives in the head.S file. */69-70-typedef void (*irqvectptr)(void);71-72-struct etrax_interrupt_vector {73- irqvectptr v[256];74-};75-76-extern struct etrax_interrupt_vector *etrax_irv;77-void set_int_vector(int n, irqvectptr addr);78-void set_break_vector(int n, irqvectptr addr);79-80-#define __STR(x) #x81-#define STR(x) __STR(x)82-83-/* SAVE_ALL saves registers so they match pt_regs */84-85-#define SAVE_ALL \86- "move $irp,[$sp=$sp-16]\n\t" /* push instruction pointer and fake SBFS struct */ \87- "push $srp\n\t" /* push subroutine return pointer */ \88- "push $dccr\n\t" /* push condition codes */ \89- "push $mof\n\t" /* push multiply overflow reg */ \90- "di\n\t" /* need to disable irq's at this point */\91- "subq 14*4,$sp\n\t" /* make room for r0-r13 */ \92- "movem $r13,[$sp]\n\t" /* push the r0-r13 registers */ \93- "push $r10\n\t" /* push orig_r10 */ \94- "clear.d [$sp=$sp-4]\n\t" /* frametype - this is a normal stackframe */95-96- /* BLOCK_IRQ and UNBLOCK_IRQ do the same as mask_irq and unmask_irq */97-98-#define BLOCK_IRQ(mask,nr) \99- "move.d " #mask ",$r0\n\t" \100- "move.d $r0,[0xb00000d8]\n\t" 101-102-#define UNBLOCK_IRQ(mask) \103- "move.d " #mask ",$r0\n\t" \104- "move.d $r0,[0xb00000dc]\n\t" 105-106-#define IRQ_NAME2(nr) nr##_interrupt(void)107-#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)108-#define sIRQ_NAME(nr) IRQ_NAME2(sIRQ##nr)109-#define BAD_IRQ_NAME(nr) IRQ_NAME2(bad_IRQ##nr)110-111- /* the asm IRQ handler makes sure the causing IRQ is blocked, then it calls112- * do_IRQ (with irq disabled still). after that it unblocks and jumps to113- * ret_from_intr (entry.S)114- *115- * The reason the IRQ is blocked is to allow an sti() before the handler which116- * will acknowledge the interrupt is run.117- */118-119-#define BUILD_IRQ(nr,mask) \120-void IRQ_NAME(nr); \121-__asm__ ( \122- ".text\n\t" \123- "IRQ" #nr "_interrupt:\n\t" \124- SAVE_ALL \125- BLOCK_IRQ(mask,nr) /* this must be done to prevent irq loops when we ei later */ \126- "moveq "#nr",$r10\n\t" \127- "move.d $sp,$r11\n\t" \128- "jsr do_IRQ\n\t" /* irq.c, r10 and r11 are arguments */ \129- UNBLOCK_IRQ(mask) \130- "moveq 0,$r9\n\t" /* make ret_from_intr realise we came from an irq */ \131- "jump ret_from_intr\n\t");132-133-/* This is subtle. The timer interrupt is crucial and it should not be disabled for 134- * too long. However, if it had been a normal interrupt as per BUILD_IRQ, it would135- * have been BLOCK'ed, and then softirq's are run before we return here to UNBLOCK.136- * If the softirq's take too much time to run, the timer irq won't run and the 137- * watchdog will kill us.138- *139- * Furthermore, if a lot of other irq's occur before we return here, the multiple_irq140- * handler is run and it prioritizes the timer interrupt. However if we had BLOCK'ed141- * it here, we would not get the multiple_irq at all.142- *143- * The non-blocking here is based on the knowledge that the timer interrupt is 144- * registred as a fast interrupt (IRQF_DISABLED) so that we _know_ there will not145- * be an sti() before the timer irq handler is run to acknowledge the interrupt.146- */147-148-#define BUILD_TIMER_IRQ(nr,mask) \149-void IRQ_NAME(nr); \150-__asm__ ( \151- ".text\n\t" \152- "IRQ" #nr "_interrupt:\n\t" \153- SAVE_ALL \154- "moveq "#nr",$r10\n\t" \155- "move.d $sp,$r11\n\t" \156- "jsr do_IRQ\n\t" /* irq.c, r10 and r11 are arguments */ \157- "moveq 0,$r9\n\t" /* make ret_from_intr realise we came from an irq */ \158- "jump ret_from_intr\n\t");159-160-#endif
···1-/* asm/bitops.h for Linux/CRIS2- *3- * TODO: asm versions if speed is needed4- *5- * All bit operations return 0 if the bit was cleared before the6- * operation and != 0 if it was not.7- *8- * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).9- */10-11-#ifndef _CRIS_BITOPS_H12-#define _CRIS_BITOPS_H13-14-/* Currently this is unsuitable for consumption outside the kernel. */15-#ifdef __KERNEL__ 16-17-#ifndef _LINUX_BITOPS_H18-#error only <linux/bitops.h> can be included directly19-#endif20-21-#include <asm/arch/bitops.h>22-#include <asm/system.h>23-#include <asm/atomic.h>24-#include <linux/compiler.h>25-26-/*27- * set_bit - Atomically set a bit in memory28- * @nr: the bit to set29- * @addr: the address to start counting from30- *31- * This function is atomic and may not be reordered. See __set_bit()32- * if you do not require the atomic guarantees.33- * Note that @nr may be almost arbitrarily large; this function is not34- * restricted to acting on a single-word quantity.35- */36-37-#define set_bit(nr, addr) (void)test_and_set_bit(nr, addr)38-39-/*40- * clear_bit - Clears a bit in memory41- * @nr: Bit to clear42- * @addr: Address to start counting from43- *44- * clear_bit() is atomic and may not be reordered. However, it does45- * not contain a memory barrier, so if it is used for locking purposes,46- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()47- * in order to ensure changes are visible on other processors.48- */49-50-#define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr)51-52-/*53- * change_bit - Toggle a bit in memory54- * @nr: Bit to change55- * @addr: Address to start counting from56- *57- * change_bit() is atomic and may not be reordered.58- * Note that @nr may be almost arbitrarily large; this function is not59- * restricted to acting on a single-word quantity.60- */61-62-#define change_bit(nr, addr) (void)test_and_change_bit(nr, addr)63-64-/**65- * test_and_set_bit - Set a bit and return its old value66- * @nr: Bit to set67- * @addr: Address to count from68- *69- * This operation is atomic and cannot be reordered. 70- * It also implies a memory barrier.71- */72-73-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)74-{75- unsigned int mask, retval;76- unsigned long flags;77- unsigned int *adr = (unsigned int *)addr;78-79- adr += nr >> 5;80- mask = 1 << (nr & 0x1f);81- cris_atomic_save(addr, flags);82- retval = (mask & *adr) != 0;83- *adr |= mask;84- cris_atomic_restore(addr, flags);85- return retval;86-}87-88-/*89- * clear_bit() doesn't provide any barrier for the compiler.90- */91-#define smp_mb__before_clear_bit() barrier()92-#define smp_mb__after_clear_bit() barrier()93-94-/**95- * test_and_clear_bit - Clear a bit and return its old value96- * @nr: Bit to clear97- * @addr: Address to count from98- *99- * This operation is atomic and cannot be reordered. 100- * It also implies a memory barrier.101- */102-103-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)104-{105- unsigned int mask, retval;106- unsigned long flags;107- unsigned int *adr = (unsigned int *)addr;108-109- adr += nr >> 5;110- mask = 1 << (nr & 0x1f);111- cris_atomic_save(addr, flags);112- retval = (mask & *adr) != 0;113- *adr &= ~mask;114- cris_atomic_restore(addr, flags);115- return retval;116-}117-118-/**119- * test_and_change_bit - Change a bit and return its old value120- * @nr: Bit to change121- * @addr: Address to count from122- *123- * This operation is atomic and cannot be reordered. 124- * It also implies a memory barrier.125- */126-127-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)128-{129- unsigned int mask, retval;130- unsigned long flags;131- unsigned int *adr = (unsigned int *)addr;132- adr += nr >> 5;133- mask = 1 << (nr & 0x1f);134- cris_atomic_save(addr, flags);135- retval = (mask & *adr) != 0;136- *adr ^= mask;137- cris_atomic_restore(addr, flags);138- return retval;139-}140-141-#include <asm-generic/bitops/non-atomic.h>142-143-/*144- * Since we define it "external", it collides with the built-in145- * definition, which doesn't have the same semantics. We don't want to146- * use -fno-builtin, so just hide the name ffs.147- */148-#define ffs kernel_ffs149-150-#include <asm-generic/bitops/fls.h>151-#include <asm-generic/bitops/fls64.h>152-#include <asm-generic/bitops/hweight.h>153-#include <asm-generic/bitops/find.h>154-#include <asm-generic/bitops/lock.h>155-156-#include <asm-generic/bitops/ext2-non-atomic.h>157-158-#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)159-#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)160-161-#include <asm-generic/bitops/minix.h>162-#include <asm-generic/bitops/sched.h>163-164-#endif /* __KERNEL__ */165-166-#endif /* _CRIS_BITOPS_H */
···1-#ifndef _CRIS_BYTEORDER_H2-#define _CRIS_BYTEORDER_H3-4-#ifdef __GNUC__5-6-#ifdef __KERNEL__7-#include <asm/arch/byteorder.h>8-9-/* defines are necessary because the other files detect the presence10- * of a defined __arch_swab32, not an inline11- */12-#define __arch__swab32(x) ___arch__swab32(x)13-#define __arch__swab16(x) ___arch__swab16(x)14-#endif /* __KERNEL__ */15-16-#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)17-# define __BYTEORDER_HAS_U64__18-# define __SWAB_64_THRU_32__19-#endif20-21-#endif /* __GNUC__ */22-23-#include <linux/byteorder/little_endian.h>24-25-#endif26-27-
···1-/* TODO: csum_tcpudp_magic could be speeded up, and csum_fold as well */2-3-#ifndef _CRIS_CHECKSUM_H4-#define _CRIS_CHECKSUM_H5-6-#include <asm/arch/checksum.h>7-8-/*9- * computes the checksum of a memory block at buff, length len,10- * and adds in "sum" (32-bit)11- *12- * returns a 32-bit number suitable for feeding into itself13- * or csum_tcpudp_magic14- *15- * this function must be called with even lengths, except16- * for the last fragment, which may be odd17- *18- * it's best to have buff aligned on a 32-bit boundary19- */20-__wsum csum_partial(const void *buff, int len, __wsum sum);21-22-/*23- * the same as csum_partial, but copies from src while it24- * checksums25- *26- * here even more important to align src and dst on a 32-bit (or even27- * better 64-bit) boundary28- */29-30-__wsum csum_partial_copy_nocheck(const void *src, void *dst,31- int len, __wsum sum);32-33-/*34- * Fold a partial checksum into a word35- */36-37-static inline __sum16 csum_fold(__wsum csum)38-{39- u32 sum = (__force u32)csum;40- sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */41- sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */42- return (__force __sum16)~sum;43-}44-45-extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,46- int len, __wsum sum,47- int *errptr);48-49-/*50- * This is a version of ip_compute_csum() optimized for IP headers,51- * which always checksum on 4 octet boundaries.52- *53- */54-55-static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)56-{57- return csum_fold(csum_partial(iph, ihl * 4, 0));58-}59-60-/*61- * computes the checksum of the TCP/UDP pseudo-header62- * returns a 16-bit checksum, already complemented63- */64-65-static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,66- unsigned short len,67- unsigned short proto,68- __wsum sum)69-{70- return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));71-}72-73-/*74- * this routine is used for miscellaneous IP-like checksums, mainly75- * in icmp.c76- */77-78-static inline __sum16 ip_compute_csum(const void *buff, int len)79-{80- return csum_fold (csum_partial(buff, len, 0));81-}82-83-#endif
···1-#ifndef _CRIS_DELAY_H2-#define _CRIS_DELAY_H3-4-/*5- * Copyright (C) 1998-2002 Axis Communications AB6- *7- * Delay routines, using a pre-computed "loops_per_second" value.8- */9-10-#include <asm/arch/delay.h>11-12-/* Use only for very small delays ( < 1 msec). */13-14-extern unsigned long loops_per_usec; /* arch/cris/mm/init.c */15-16-/* May be defined by arch/delay.h. */17-#ifndef udelay18-static inline void udelay(unsigned long usecs)19-{20- __delay(usecs * loops_per_usec);21-}22-#endif23-24-#endif /* defined(_CRIS_DELAY_H) */25-26-27-
···1-/* $Id: dma.h,v 1.2 2001/05/09 12:17:42 johana Exp $ */2-3-#ifndef _ASM_DMA_H4-#define _ASM_DMA_H5-6-#include <asm/arch/dma.h>7-8-/* it's useless on the Etrax, but unfortunately needed by the new9- bootmem allocator (but this should do it for this) */10-11-#define MAX_DMA_ADDRESS PAGE_OFFSET12-13-/* From PCI */14-15-#ifdef CONFIG_PCI16-extern int isa_dma_bridge_buggy;17-#else18-#define isa_dma_bridge_buggy (0)19-#endif20-21-#endif /* _ASM_DMA_H */
···000000000000000000000
-93
include/asm-cris/elf.h
···1-#ifndef __ASMCRIS_ELF_H2-#define __ASMCRIS_ELF_H3-4-/*5- * ELF register definitions..6- */7-8-#include <asm/user.h>9-10-#define R_CRIS_NONE 011-#define R_CRIS_8 112-#define R_CRIS_16 213-#define R_CRIS_32 314-#define R_CRIS_8_PCREL 415-#define R_CRIS_16_PCREL 516-#define R_CRIS_32_PCREL 617-#define R_CRIS_GNU_VTINHERIT 718-#define R_CRIS_GNU_VTENTRY 819-#define R_CRIS_COPY 920-#define R_CRIS_GLOB_DAT 1021-#define R_CRIS_JUMP_SLOT 1122-#define R_CRIS_RELATIVE 1223-#define R_CRIS_16_GOT 1324-#define R_CRIS_32_GOT 1425-#define R_CRIS_16_GOTPLT 1526-#define R_CRIS_32_GOTPLT 1627-#define R_CRIS_32_GOTREL 1728-#define R_CRIS_32_PLT_GOTREL 1829-#define R_CRIS_32_PLT_PCREL 1930-31-typedef unsigned long elf_greg_t;32-33-/* Note that NGREG is defined to ELF_NGREG in include/linux/elfcore.h, and is34- thus exposed to user-space. */35-#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))36-typedef elf_greg_t elf_gregset_t[ELF_NGREG];37-38-/* A placeholder; CRIS does not have any fp regs. */39-typedef unsigned long elf_fpregset_t;40-41-/*42- * These are used to set parameters in the core dumps.43- */44-#define ELF_CLASS ELFCLASS3245-#define ELF_DATA ELFDATA2LSB46-#define ELF_ARCH EM_CRIS47-48-#include <asm/arch/elf.h>49-50-/* The master for these definitions is {binutils}/include/elf/cris.h: */51-/* User symbols in this file have a leading underscore. */52-#define EF_CRIS_UNDERSCORE 0x0000000153-54-/* This is a mask for different incompatible machine variants. */55-#define EF_CRIS_VARIANT_MASK 0x0000000e56-57-/* Variant 0; may contain v0..10 object. */58-#define EF_CRIS_VARIANT_ANY_V0_V10 0x0000000059-60-/* Variant 1; contains v32 object. */61-#define EF_CRIS_VARIANT_V32 0x0000000262-63-/* Variant 2; contains object compatible with v32 and v10. */64-#define EF_CRIS_VARIANT_COMMON_V10_V32 0x0000000465-/* End of excerpt from {binutils}/include/elf/cris.h. */66-67-#define USE_ELF_CORE_DUMP68-69-#define ELF_EXEC_PAGESIZE 819270-71-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical72- use of this is to invoke "./ld.so someprog" to test out a new version of73- the loader. We need to make sure that it is out of the way of the program74- that it will "exec", and that there is sufficient room for the brk. */75-76-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)77-78-/* This yields a mask that user programs can use to figure out what79- instruction set this CPU supports. This could be done in user space,80- but it's not easy, and we've already done it here. */81-82-#define ELF_HWCAP (0)83-84-/* This yields a string that ld.so will use to load implementation85- specific libraries for optimization. This is more specific in86- intent than poking at uname or /proc/cpuinfo.87-*/88-89-#define ELF_PLATFORM (NULL)90-91-#define SET_PERSONALITY(ex) set_personality(PER_LINUX)92-93-#endif
···1-#ifndef _CRIS_PAGE_H2-#define _CRIS_PAGE_H3-4-#include <asm/arch/page.h>5-#include <linux/const.h>6-7-/* PAGE_SHIFT determines the page size */8-#define PAGE_SHIFT 139-#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)10-#define PAGE_MASK (~(PAGE_SIZE-1))11-12-#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)13-#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)14-15-#define clear_user_page(page, vaddr, pg) clear_page(page)16-#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)17-18-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \19- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)20-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE21-22-/*23- * These are used to make use of C type-checking..24- */25-#ifndef __ASSEMBLY__26-typedef struct { unsigned long pte; } pte_t;27-typedef struct { unsigned long pgd; } pgd_t;28-typedef struct { unsigned long pgprot; } pgprot_t;29-typedef struct page *pgtable_t;30-#endif31-32-#define pte_val(x) ((x).pte)33-#define pgd_val(x) ((x).pgd)34-#define pgprot_val(x) ((x).pgprot)35-36-#define __pte(x) ((pte_t) { (x) } )37-#define __pgd(x) ((pgd_t) { (x) } )38-#define __pgprot(x) ((pgprot_t) { (x) } )39-40-/* On CRIS the PFN numbers doesn't start at 0 so we have to compensate */41-/* for that before indexing into the page table starting at mem_map */42-#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)43-#define pfn_valid(pfn) (((pfn) - (PAGE_OFFSET >> PAGE_SHIFT)) < max_mapnr)44-45-/* to index into the page map. our pages all start at physical addr PAGE_OFFSET so46- * we can let the map start there. notice that we subtract PAGE_OFFSET because47- * we start our mem_map there - in other ports they map mem_map physically and48- * use __pa instead. in our system both the physical and virtual address of DRAM49- * is too high to let mem_map start at 0, so we do it this way instead (similar50- * to arm and m68k I think)51- */ 52-53-#define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT))54-#define VALID_PAGE(page) (((page) - mem_map) < max_mapnr)55-#define virt_addr_valid(kaddr) pfn_valid((unsigned)(kaddr) >> PAGE_SHIFT)56-57-/* convert a page (based on mem_map and forward) to a physical address58- * do this by figuring out the virtual address and then use __pa59- */60-61-#define page_to_phys(page) __pa((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)62-63-#ifndef __ASSEMBLY__64-65-#endif /* __ASSEMBLY__ */66-67-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \68- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)69-70-#include <asm-generic/memory_model.h>71-#include <asm-generic/page.h>72-73-#endif /* _CRIS_PAGE_H */74-
···1-/*2- * CRIS pgtable.h - macros and functions to manipulate page tables.3- */4-5-#ifndef _CRIS_PGTABLE_H6-#define _CRIS_PGTABLE_H7-8-#include <asm/page.h>9-#include <asm-generic/pgtable-nopmd.h>10-11-#ifndef __ASSEMBLY__12-#include <linux/sched.h>13-#include <asm/mmu.h>14-#endif15-#include <asm/arch/pgtable.h>16-17-/*18- * The Linux memory management assumes a three-level page table setup. On19- * CRIS, we use that, but "fold" the mid level into the top-level page20- * table. Since the MMU TLB is software loaded through an interrupt, it21- * supports any page table structure, so we could have used a three-level22- * setup, but for the amounts of memory we normally use, a two-level is23- * probably more efficient.24- *25- * This file contains the functions and defines necessary to modify and use26- * the CRIS page table tree.27- */28-#ifndef __ASSEMBLY__29-extern void paging_init(void);30-#endif31-32-/* Certain architectures need to do special things when pte's33- * within a page table are directly modified. Thus, the following34- * hook is made available.35- */36-#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))37-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)38-39-/*40- * (pmds are folded into pgds so this doesn't get actually called,41- * but the define is needed for a generic inline function.)42- */43-#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)44-#define set_pgu(pudptr, pudval) (*(pudptr) = pudval)45-46-/* PGDIR_SHIFT determines the size of the area a second-level page table can47- * map. It is equal to the page size times the number of PTE's that fit in48- * a PMD page. A PTE is 4-bytes in CRIS. Hence the following number.49- */50-51-#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2))52-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)53-#define PGDIR_MASK (~(PGDIR_SIZE-1))54-55-/*56- * entries per page directory level: we use a two-level, so57- * we don't really have any PMD directory physically.58- * pointers are 4 bytes so we can use the page size and 59- * divide it by 4 (shift by 2).60- */61-#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2))62-#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2))63-64-/* calculate how many PGD entries a user-level program can use65- * the first mappable virtual address is 066- * (TASK_SIZE is the maximum virtual address space)67- */68-69-#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)70-#define FIRST_USER_ADDRESS 071-72-/* zero page used for uninitialized stuff */73-#ifndef __ASSEMBLY__74-extern unsigned long empty_zero_page;75-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))76-#endif77-78-/* number of bits that fit into a memory pointer */79-#define BITS_PER_PTR (8*sizeof(unsigned long))80-81-/* to align the pointer to a pointer address */82-#define PTR_MASK (~(sizeof(void*)-1))83-84-/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */85-/* 64-bit machines, beware! SRB. */86-#define SIZEOF_PTR_LOG2 287-88-/* to find an entry in a page-table */89-#define PAGE_PTR(address) \90-((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)91-92-/* to set the page-dir */93-#define SET_PAGE_DIR(tsk,pgdir)94-95-#define pte_none(x) (!pte_val(x))96-#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)97-#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0)98-99-#define pmd_none(x) (!pmd_val(x))100-/* by removing the _PAGE_KERNEL bit from the comparision, the same pmd_bad101- * works for both _PAGE_TABLE and _KERNPG_TABLE pmd entries.102- */103-#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_KERNEL)) != _PAGE_TABLE)104-#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)105-#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)106-107-#ifndef __ASSEMBLY__108-109-/*110- * The following only work if pte_present() is true.111- * Undefined behaviour if not..112- */113-114-static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }115-static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }116-static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }117-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }118-static inline int pte_special(pte_t pte) { return 0; }119-120-static inline pte_t pte_wrprotect(pte_t pte)121-{122- pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);123- return pte;124-}125-126-static inline pte_t pte_mkclean(pte_t pte)127-{128- pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 129- return pte; 130-}131-132-static inline pte_t pte_mkold(pte_t pte)133-{134- pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);135- return pte;136-}137-138-static inline pte_t pte_mkwrite(pte_t pte)139-{140- pte_val(pte) |= _PAGE_WRITE;141- if (pte_val(pte) & _PAGE_MODIFIED)142- pte_val(pte) |= _PAGE_SILENT_WRITE;143- return pte;144-}145-146-static inline pte_t pte_mkdirty(pte_t pte)147-{148- pte_val(pte) |= _PAGE_MODIFIED;149- if (pte_val(pte) & _PAGE_WRITE)150- pte_val(pte) |= _PAGE_SILENT_WRITE;151- return pte;152-}153-154-static inline pte_t pte_mkyoung(pte_t pte)155-{156- pte_val(pte) |= _PAGE_ACCESSED;157- if (pte_val(pte) & _PAGE_READ)158- {159- pte_val(pte) |= _PAGE_SILENT_READ;160- if ((pte_val(pte) & (_PAGE_WRITE | _PAGE_MODIFIED)) ==161- (_PAGE_WRITE | _PAGE_MODIFIED))162- pte_val(pte) |= _PAGE_SILENT_WRITE;163- }164- return pte;165-}166-static inline pte_t pte_mkspecial(pte_t pte) { return pte; }167-168-/*169- * Conversion functions: convert a page and protection to a page entry,170- * and a page entry and page directory to the page they refer to.171- */172-173-/* What actually goes as arguments to the various functions is less than174- * obvious, but a rule of thumb is that struct page's goes as struct page *,175- * really physical DRAM addresses are unsigned long's, and DRAM "virtual"176- * addresses (the 0xc0xxxxxx's) goes as void *'s.177- */178-179-static inline pte_t __mk_pte(void * page, pgprot_t pgprot)180-{181- pte_t pte;182- /* the PTE needs a physical address */183- pte_val(pte) = __pa(page) | pgprot_val(pgprot);184- return pte;185-}186-187-#define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))188-189-#define mk_pte_phys(physpage, pgprot) \190-({ \191- pte_t __pte; \192- \193- pte_val(__pte) = (physpage) + pgprot_val(pgprot); \194- __pte; \195-})196-197-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)198-{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }199-200-201-/* pte_val refers to a page in the 0x4xxxxxxx physical DRAM interval202- * __pte_page(pte_val) refers to the "virtual" DRAM interval203- * pte_pagenr refers to the page-number counted starting from the virtual DRAM start204- */205-206-static inline unsigned long __pte_page(pte_t pte)207-{208- /* the PTE contains a physical address */209- return (unsigned long)__va(pte_val(pte) & PAGE_MASK);210-}211-212-#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)213-214-/* permanent address of a page */215-216-#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))217-#define pte_page(pte) (mem_map+pte_pagenr(pte))218-219-/* only the pte's themselves need to point to physical DRAM (see above)220- * the pagetable links are purely handled within the kernel SW and thus221- * don't need the __pa and __va transformations.222- */223-224-static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)225-{ pmd_val(*pmdp) = _PAGE_TABLE | (unsigned long) ptep; }226-227-#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))228-#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))229-230-/* to find an entry in a page-table-directory. */231-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))232-233-/* to find an entry in a page-table-directory */234-static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long address)235-{236- return mm->pgd + pgd_index(address);237-}238-239-/* to find an entry in a kernel page-table-directory */240-#define pgd_offset_k(address) pgd_offset(&init_mm, address)241-242-/* Find an entry in the third-level page table.. */243-#define __pte_offset(address) \244- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))245-#define pte_offset_kernel(dir, address) \246- ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))247-#define pte_offset_map(dir, address) \248- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))249-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)250-251-#define pte_unmap(pte) do { } while (0)252-#define pte_unmap_nested(pte) do { } while (0)253-#define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT)254-#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))255-256-#define pte_ERROR(e) \257- printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), pte_val(e))258-#define pgd_ERROR(e) \259- printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))260-261-262-extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */263-264-/*265- * CRIS doesn't have any external MMU info: the kernel page266- * tables contain all the necessary information.267- * 268- * Actually I am not sure on what this could be used for.269- */270-static inline void update_mmu_cache(struct vm_area_struct * vma,271- unsigned long address, pte_t pte)272-{273-}274-275-/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */276-/* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */277-278-#define __swp_type(x) (((x).val >> 5) & 0x7f)279-#define __swp_offset(x) ((x).val >> 12)280-#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 5) | ((offset) << 12) })281-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })282-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })283-284-#define kern_addr_valid(addr) (1)285-286-#include <asm-generic/pgtable.h>287-288-/*289- * No page table caches to initialise290- */291-#define pgtable_cache_init() do { } while (0)292-293-#define pte_to_pgoff(x) (pte_val(x) >> 6)294-#define pgoff_to_pte(x) __pte(((x) << 6) | _PAGE_FILE)295-296-typedef pte_t *pte_addr_t;297-298-#endif /* __ASSEMBLY__ */299-#endif /* _CRIS_PGTABLE_H */
···1-/*2- * include/asm-cris/processor.h3- *4- * Copyright (C) 2000, 2001 Axis Communications AB5- *6- * Authors: Bjorn Wesen Initial version7- *8- */9-10-#ifndef __ASM_CRIS_PROCESSOR_H11-#define __ASM_CRIS_PROCESSOR_H12-13-#include <asm/system.h>14-#include <asm/page.h>15-#include <asm/ptrace.h>16-#include <asm/arch/processor.h>17-18-struct task_struct;19-20-#define STACK_TOP TASK_SIZE21-#define STACK_TOP_MAX STACK_TOP22-23-/* This decides where the kernel will search for a free chunk of vm24- * space during mmap's.25- */26-#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))27-28-/* THREAD_SIZE is the size of the task_struct/kernel_stack combo.29- * normally, the stack is found by doing something like p + THREAD_SIZE30- * in CRIS, a page is 8192 bytes, which seems like a sane size31- */32-33-#define THREAD_SIZE PAGE_SIZE34-#define KERNEL_STACK_SIZE PAGE_SIZE35-36-/*37- * At user->kernel entry, the pt_regs struct is stacked on the top of the kernel-stack.38- * This macro allows us to find those regs for a task.39- * Notice that subsequent pt_regs stackings, like recursive interrupts occurring while40- * we're in the kernel, won't affect this - only the first user->kernel transition41- * registers are reached by this.42- */43-44-#define user_regs(thread_info) (((struct pt_regs *)((unsigned long)(thread_info) + THREAD_SIZE)) - 1)45-46-/*47- * Dito but for the currently running task48- */49-50-#define task_pt_regs(task) user_regs(task_thread_info(task))51-#define current_regs() task_pt_regs(current)52-53-static inline void prepare_to_copy(struct task_struct *tsk)54-{55-}56-57-extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);58-59-unsigned long get_wchan(struct task_struct *p);60-61-#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)62-63-extern unsigned long thread_saved_pc(struct task_struct *tsk);64-65-/* Free all resources held by a thread. */66-static inline void release_thread(struct task_struct *dead_task)67-{68- /* Nothing needs to be done. */69-}70-71-#define init_stack (init_thread_union.stack)72-73-#define cpu_relax() barrier()74-75-#endif /* __ASM_CRIS_PROCESSOR_H */
···1-#ifndef _CRIS_PTRACE_H2-#define _CRIS_PTRACE_H3-4-#include <asm/arch/ptrace.h>5-6-#ifdef __KERNEL__7-8-/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */9-#define PTRACE_GETREGS 1210-#define PTRACE_SETREGS 1311-12-#define profile_pc(regs) instruction_pointer(regs)13-14-#endif /* __KERNEL__ */15-16-#endif /* _CRIS_PTRACE_H */
···1-#ifndef __ASM_CRIS_SYSTEM_H2-#define __ASM_CRIS_SYSTEM_H3-4-#include <asm/arch/system.h>5-6-/* the switch_to macro calls resume, an asm function in entry.S which does the actual7- * task switching.8- */9-10-extern struct task_struct *resume(struct task_struct *prev, struct task_struct *next, int);11-#define switch_to(prev,next,last) last = resume(prev,next, \12- (int)&((struct task_struct *)0)->thread)13-14-#define barrier() __asm__ __volatile__("": : :"memory")15-#define mb() barrier()16-#define rmb() mb()17-#define wmb() mb()18-#define read_barrier_depends() do { } while(0)19-#define set_mb(var, value) do { var = value; mb(); } while (0)20-21-#ifdef CONFIG_SMP22-#define smp_mb() mb()23-#define smp_rmb() rmb()24-#define smp_wmb() wmb()25-#define smp_read_barrier_depends() read_barrier_depends()26-#else27-#define smp_mb() barrier()28-#define smp_rmb() barrier()29-#define smp_wmb() barrier()30-#define smp_read_barrier_depends() do { } while(0)31-#endif32-33-#define iret()34-35-/*36- * disable hlt during certain critical i/o operations37- */38-#define HAVE_DISABLE_HLT39-void disable_hlt(void);40-void enable_hlt(void);41-42-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)43-{44- /* since Etrax doesn't have any atomic xchg instructions, we need to disable45- irq's (if enabled) and do it with move.d's */46- unsigned long flags,temp;47- local_irq_save(flags); /* save flags, including irq enable bit and shut off irqs */48- switch (size) {49- case 1:50- *((unsigned char *)&temp) = x;51- x = *(unsigned char *)ptr;52- *(unsigned char *)ptr = *((unsigned char *)&temp);53- break;54- case 2:55- *((unsigned short *)&temp) = x;56- x = *(unsigned short *)ptr;57- *(unsigned short *)ptr = *((unsigned short *)&temp);58- break;59- case 4:60- temp = x;61- x = *(unsigned long *)ptr;62- *(unsigned long *)ptr = temp;63- break;64- }65- local_irq_restore(flags); /* restore irq enable bit */66- return x;67-}68-69-#include <asm-generic/cmpxchg-local.h>70-71-/*72- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make73- * them available.74- */75-#define cmpxchg_local(ptr, o, n) \76- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\77- (unsigned long)(n), sizeof(*(ptr))))78-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))79-80-#ifndef CONFIG_SMP81-#include <asm-generic/cmpxchg.h>82-#endif83-84-#define arch_align_stack(x) (x)85-86-void default_idle(void);87-88-#endif
···1-/* thread_info.h: CRIS low-level thread information2- *3- * Copyright (C) 2002 David Howells (dhowells@redhat.com)4- * - Incorporating suggestions made by Linus Torvalds and Dave Miller5- * 6- * CRIS port by Axis Communications7- */8-9-#ifndef _ASM_THREAD_INFO_H10-#define _ASM_THREAD_INFO_H11-12-#ifdef __KERNEL__13-14-#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR15-16-#ifndef __ASSEMBLY__17-#include <asm/types.h>18-#include <asm/processor.h>19-#include <asm/arch/thread_info.h>20-#include <asm/segment.h>21-#endif22-23-24-/*25- * low level task data that entry.S needs immediate access to26- * - this struct should fit entirely inside of one cache line27- * - this struct shares the supervisor stack pages28- * - if the contents of this structure are changed, the assembly constants must also be changed29- */30-#ifndef __ASSEMBLY__31-struct thread_info {32- struct task_struct *task; /* main task structure */33- struct exec_domain *exec_domain; /* execution domain */34- unsigned long flags; /* low level flags */35- __u32 cpu; /* current CPU */36- int preempt_count; /* 0 => preemptable, <0 => BUG */37- __u32 tls; /* TLS for this thread */38-39- mm_segment_t addr_limit; /* thread address space:40- 0-0xBFFFFFFF for user-thead41- 0-0xFFFFFFFF for kernel-thread42- */43- struct restart_block restart_block;44- __u8 supervisor_stack[0];45-};46-47-#endif48-49-#define PREEMPT_ACTIVE 0x1000000050-51-/*52- * macros/functions for gaining access to the thread information structure53- *54- * preempt_count needs to be 1 initially, until the scheduler is functional.55- */56-#ifndef __ASSEMBLY__57-#define INIT_THREAD_INFO(tsk) \58-{ \59- .task = &tsk, \60- .exec_domain = &default_exec_domain, \61- .flags = 0, \62- .cpu = 0, \63- .preempt_count = 1, \64- .addr_limit = KERNEL_DS, \65- .restart_block = { \66- .fn = do_no_restart_syscall, \67- }, \68-}69-70-#define init_thread_info (init_thread_union.thread_info)71-72-/* thread information allocation */73-#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))74-#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)75-76-#endif /* !__ASSEMBLY__ */77-78-/*79- * thread information flags80- * - these are process state flags that various assembly files may need to access81- * - pending work-to-be-done flags are in LSW82- * - other flags in MSW83- */84-#define TIF_SYSCALL_TRACE 0 /* syscall trace active */85-#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */86-#define TIF_SIGPENDING 2 /* signal pending */87-#define TIF_NEED_RESCHED 3 /* rescheduling necessary */88-#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */89-#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */90-#define TIF_MEMDIE 1791-#define TIF_FREEZE 18 /* is freezing for suspend */92-93-#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)94-#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)95-#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)96-#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)97-#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)98-#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)99-#define _TIF_FREEZE (1<<TIF_FREEZE)100-101-#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */102-#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */103-104-#endif /* __KERNEL__ */105-106-#endif /* _ASM_THREAD_INFO_H */
···1-/*2- * linux/include/asm-cris/timex.h3- *4- * CRIS architecture timex specifications5- */6-7-#ifndef _ASM_CRIS_TIMEX_H8-#define _ASM_CRIS_TIMEX_H9-10-#include <asm/arch/timex.h>11-12-/*13- * We don't have a cycle-counter.. but we do not support SMP anyway where this is14- * used so it does not matter.15- */16-17-typedef unsigned long long cycles_t;18-19-static inline cycles_t get_cycles(void)20-{21- return 0;22-}23-24-#endif
···000000000000000000000000
-19
include/asm-cris/tlb.h
···1-#ifndef _CRIS_TLB_H2-#define _CRIS_TLB_H3-4-#include <linux/pagemap.h>5-6-#include <asm/arch/tlb.h>7-8-/*9- * cris doesn't need any special per-pte or10- * per-vma handling..11- */12-#define tlb_start_vma(tlb, vma) do { } while (0)13-#define tlb_end_vma(tlb, vma) do { } while (0)14-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)15-16-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)17-#include <asm-generic/tlb.h>18-19-#endif
···1-/* 2- * Authors: Bjorn Wesen (bjornw@axis.com)3- * Hans-Peter Nilsson (hp@axis.com)4- */5-6-/* Asm:s have been tweaked (within the domain of correctness) to give7- satisfactory results for "gcc version 2.96 20000427 (experimental)".8-9- Check regularly...10-11- Register $r9 is chosen for temporaries, being a call-clobbered register12- first in line to be used (notably for local blocks), not colliding with13- parameter registers. */14-15-#ifndef _CRIS_UACCESS_H16-#define _CRIS_UACCESS_H17-18-#ifndef __ASSEMBLY__19-#include <linux/sched.h>20-#include <linux/errno.h>21-#include <asm/processor.h>22-#include <asm/page.h>23-24-#define VERIFY_READ 025-#define VERIFY_WRITE 126-27-/*28- * The fs value determines whether argument validity checking should be29- * performed or not. If get_fs() == USER_DS, checking is performed, with30- * get_fs() == KERNEL_DS, checking is bypassed.31- *32- * For historical reasons, these macros are grossly misnamed.33- */34-35-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })36-37-/* addr_limit is the maximum accessible address for the task. we misuse38- * the KERNEL_DS and USER_DS values to both assign and compare the 39- * addr_limit values through the equally misnamed get/set_fs macros.40- * (see above)41- */42-43-#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)44-#define USER_DS MAKE_MM_SEG(TASK_SIZE)45-46-#define get_ds() (KERNEL_DS)47-#define get_fs() (current_thread_info()->addr_limit)48-#define set_fs(x) (current_thread_info()->addr_limit = (x))49-50-#define segment_eq(a,b) ((a).seg == (b).seg)51-52-#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))53-#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))54-#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))55-#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))56-57-#include <asm/arch/uaccess.h>58-59-/*60- * The exception table consists of pairs of addresses: the first is the61- * address of an instruction that is allowed to fault, and the second is62- * the address at which the program should continue. No registers are63- * modified, so it is entirely up to the continuation code to figure out64- * what to do.65- *66- * All the routines below use bits of fixup code that are out of line67- * with the main instruction path. This means when everything is well,68- * we don't even have to jump over them. Further, they do not intrude69- * on our cache or tlb entries.70- */71-72-struct exception_table_entry73-{74- unsigned long insn, fixup;75-};76-77-/*78- * These are the main single-value transfer routines. They automatically79- * use the right size if we just have the right pointer type.80- *81- * This gets kind of ugly. We want to return _two_ values in "get_user()"82- * and yet we don't want to do any pointers, because that is too much83- * of a performance impact. Thus we have a few rather ugly macros here,84- * and hide all the ugliness from the user.85- *86- * The "__xxx" versions of the user access functions are versions that87- * do not verify the address space, that must have been done previously88- * with a separate "access_ok()" call (this is used when we do multiple89- * accesses to the same area of user memory).90- *91- * As we use the same address space for kernel and user data on92- * CRIS, we can just do these as direct assignments. (Of course, the93- * exception handling means that it's no longer "just"...)94- */95-#define get_user(x,ptr) \96- __get_user_check((x),(ptr),sizeof(*(ptr)))97-#define put_user(x,ptr) \98- __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))99-100-#define __get_user(x,ptr) \101- __get_user_nocheck((x),(ptr),sizeof(*(ptr)))102-#define __put_user(x,ptr) \103- __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))104-105-extern long __put_user_bad(void);106-107-#define __put_user_size(x,ptr,size,retval) \108-do { \109- retval = 0; \110- switch (size) { \111- case 1: __put_user_asm(x,ptr,retval,"move.b"); break; \112- case 2: __put_user_asm(x,ptr,retval,"move.w"); break; \113- case 4: __put_user_asm(x,ptr,retval,"move.d"); break; \114- case 8: __put_user_asm_64(x,ptr,retval); break; \115- default: __put_user_bad(); \116- } \117-} while (0)118-119-#define __get_user_size(x,ptr,size,retval) \120-do { \121- retval = 0; \122- switch (size) { \123- case 1: __get_user_asm(x,ptr,retval,"move.b"); break; \124- case 2: __get_user_asm(x,ptr,retval,"move.w"); break; \125- case 4: __get_user_asm(x,ptr,retval,"move.d"); break; \126- case 8: __get_user_asm_64(x,ptr,retval); break; \127- default: (x) = __get_user_bad(); \128- } \129-} while (0)130-131-#define __put_user_nocheck(x,ptr,size) \132-({ \133- long __pu_err; \134- __put_user_size((x),(ptr),(size),__pu_err); \135- __pu_err; \136-})137-138-#define __put_user_check(x,ptr,size) \139-({ \140- long __pu_err = -EFAULT; \141- __typeof__(*(ptr)) *__pu_addr = (ptr); \142- if (access_ok(VERIFY_WRITE,__pu_addr,size)) \143- __put_user_size((x),__pu_addr,(size),__pu_err); \144- __pu_err; \145-})146-147-struct __large_struct { unsigned long buf[100]; };148-#define __m(x) (*(struct __large_struct *)(x))149-150-151-152-#define __get_user_nocheck(x,ptr,size) \153-({ \154- long __gu_err, __gu_val; \155- __get_user_size(__gu_val,(ptr),(size),__gu_err); \156- (x) = (__typeof__(*(ptr)))__gu_val; \157- __gu_err; \158-})159-160-#define __get_user_check(x,ptr,size) \161-({ \162- long __gu_err = -EFAULT, __gu_val = 0; \163- const __typeof__(*(ptr)) *__gu_addr = (ptr); \164- if (access_ok(VERIFY_READ,__gu_addr,size)) \165- __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \166- (x) = (__typeof__(*(ptr)))__gu_val; \167- __gu_err; \168-})169-170-extern long __get_user_bad(void);171-172-/* More complex functions. Most are inline, but some call functions that173- live in lib/usercopy.c */174-175-extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n);176-extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);177-extern unsigned long __do_clear_user(void __user *to, unsigned long n);178-179-static inline unsigned long180-__generic_copy_to_user(void __user *to, const void *from, unsigned long n)181-{182- if (access_ok(VERIFY_WRITE, to, n))183- return __copy_user(to,from,n);184- return n;185-}186-187-static inline unsigned long188-__generic_copy_from_user(void *to, const void __user *from, unsigned long n)189-{190- if (access_ok(VERIFY_READ, from, n))191- return __copy_user_zeroing(to,from,n);192- return n;193-}194-195-static inline unsigned long196-__generic_clear_user(void __user *to, unsigned long n)197-{198- if (access_ok(VERIFY_WRITE, to, n))199- return __do_clear_user(to,n);200- return n;201-}202-203-static inline long204-__strncpy_from_user(char *dst, const char __user *src, long count)205-{206- return __do_strncpy_from_user(dst, src, count);207-}208-209-static inline long210-strncpy_from_user(char *dst, const char __user *src, long count)211-{212- long res = -EFAULT;213- if (access_ok(VERIFY_READ, src, 1))214- res = __do_strncpy_from_user(dst, src, count);215- return res;216-}217-218-219-/* Note that these expand awfully if made into switch constructs, so220- don't do that. */221-222-static inline unsigned long223-__constant_copy_from_user(void *to, const void __user *from, unsigned long n)224-{225- unsigned long ret = 0;226- if (n == 0)227- ;228- else if (n == 1)229- __asm_copy_from_user_1(to, from, ret);230- else if (n == 2)231- __asm_copy_from_user_2(to, from, ret);232- else if (n == 3)233- __asm_copy_from_user_3(to, from, ret);234- else if (n == 4)235- __asm_copy_from_user_4(to, from, ret);236- else if (n == 5)237- __asm_copy_from_user_5(to, from, ret);238- else if (n == 6)239- __asm_copy_from_user_6(to, from, ret);240- else if (n == 7)241- __asm_copy_from_user_7(to, from, ret);242- else if (n == 8)243- __asm_copy_from_user_8(to, from, ret);244- else if (n == 9)245- __asm_copy_from_user_9(to, from, ret);246- else if (n == 10)247- __asm_copy_from_user_10(to, from, ret);248- else if (n == 11)249- __asm_copy_from_user_11(to, from, ret);250- else if (n == 12)251- __asm_copy_from_user_12(to, from, ret);252- else if (n == 13)253- __asm_copy_from_user_13(to, from, ret);254- else if (n == 14)255- __asm_copy_from_user_14(to, from, ret);256- else if (n == 15)257- __asm_copy_from_user_15(to, from, ret);258- else if (n == 16)259- __asm_copy_from_user_16(to, from, ret);260- else if (n == 20)261- __asm_copy_from_user_20(to, from, ret);262- else if (n == 24)263- __asm_copy_from_user_24(to, from, ret);264- else265- ret = __generic_copy_from_user(to, from, n);266-267- return ret;268-}269-270-/* Ditto, don't make a switch out of this. */271-272-static inline unsigned long273-__constant_copy_to_user(void __user *to, const void *from, unsigned long n)274-{275- unsigned long ret = 0;276- if (n == 0)277- ;278- else if (n == 1)279- __asm_copy_to_user_1(to, from, ret);280- else if (n == 2)281- __asm_copy_to_user_2(to, from, ret);282- else if (n == 3)283- __asm_copy_to_user_3(to, from, ret);284- else if (n == 4)285- __asm_copy_to_user_4(to, from, ret);286- else if (n == 5)287- __asm_copy_to_user_5(to, from, ret);288- else if (n == 6)289- __asm_copy_to_user_6(to, from, ret);290- else if (n == 7)291- __asm_copy_to_user_7(to, from, ret);292- else if (n == 8)293- __asm_copy_to_user_8(to, from, ret);294- else if (n == 9)295- __asm_copy_to_user_9(to, from, ret);296- else if (n == 10)297- __asm_copy_to_user_10(to, from, ret);298- else if (n == 11)299- __asm_copy_to_user_11(to, from, ret);300- else if (n == 12)301- __asm_copy_to_user_12(to, from, ret);302- else if (n == 13)303- __asm_copy_to_user_13(to, from, ret);304- else if (n == 14)305- __asm_copy_to_user_14(to, from, ret);306- else if (n == 15)307- __asm_copy_to_user_15(to, from, ret);308- else if (n == 16)309- __asm_copy_to_user_16(to, from, ret);310- else if (n == 20)311- __asm_copy_to_user_20(to, from, ret);312- else if (n == 24)313- __asm_copy_to_user_24(to, from, ret);314- else315- ret = __generic_copy_to_user(to, from, n);316-317- return ret;318-}319-320-/* No switch, please. */321-322-static inline unsigned long323-__constant_clear_user(void __user *to, unsigned long n)324-{325- unsigned long ret = 0;326- if (n == 0)327- ;328- else if (n == 1)329- __asm_clear_1(to, ret);330- else if (n == 2)331- __asm_clear_2(to, ret);332- else if (n == 3)333- __asm_clear_3(to, ret);334- else if (n == 4)335- __asm_clear_4(to, ret);336- else if (n == 8)337- __asm_clear_8(to, ret);338- else if (n == 12)339- __asm_clear_12(to, ret);340- else if (n == 16)341- __asm_clear_16(to, ret);342- else if (n == 20)343- __asm_clear_20(to, ret);344- else if (n == 24)345- __asm_clear_24(to, ret);346- else347- ret = __generic_clear_user(to, n);348-349- return ret;350-}351-352-353-#define clear_user(to, n) \354-(__builtin_constant_p(n) ? \355- __constant_clear_user(to, n) : \356- __generic_clear_user(to, n))357-358-#define copy_from_user(to, from, n) \359-(__builtin_constant_p(n) ? \360- __constant_copy_from_user(to, from, n) : \361- __generic_copy_from_user(to, from, n))362-363-#define copy_to_user(to, from, n) \364-(__builtin_constant_p(n) ? \365- __constant_copy_to_user(to, from, n) : \366- __generic_copy_to_user(to, from, n))367-368-/* We let the __ versions of copy_from/to_user inline, because they're often369- * used in fast paths and have only a small space overhead.370- */371-372-static inline unsigned long373-__generic_copy_from_user_nocheck(void *to, const void __user *from,374- unsigned long n)375-{376- return __copy_user_zeroing(to,from,n);377-}378-379-static inline unsigned long380-__generic_copy_to_user_nocheck(void __user *to, const void *from,381- unsigned long n)382-{383- return __copy_user(to,from,n);384-}385-386-static inline unsigned long387-__generic_clear_user_nocheck(void __user *to, unsigned long n)388-{389- return __do_clear_user(to,n);390-}391-392-/* without checking */393-394-#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n))395-#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))396-#define __copy_to_user_inatomic __copy_to_user397-#define __copy_from_user_inatomic __copy_from_user398-#define __clear_user(to,n) __generic_clear_user_nocheck((to),(n))399-400-#define strlen_user(str) strnlen_user((str), 0x7ffffffe)401-402-#endif /* __ASSEMBLY__ */403-404-#endif /* _CRIS_UACCESS_H */
···1-#ifndef __ASM_CRIS_USER_H2-#define __ASM_CRIS_USER_H3-4-#include <linux/types.h>5-#include <asm/ptrace.h>6-#include <asm/page.h>7-#include <asm/arch/user.h>8-9-/*10- * Core file format: The core file is written in such a way that gdb11- * can understand it and provide useful information to the user (under12- * linux we use the `trad-core' bfd). The file contents are as follows:13- *14- * upage: 1 page consisting of a user struct that tells gdb15- * what is present in the file. Directly after this is a16- * copy of the task_struct, which is currently not used by gdb,17- * but it may come in handy at some point. All of the registers18- * are stored as part of the upage. The upage should always be19- * only one page long.20- * data: The data segment follows next. We use current->end_text to21- * current->brk to pick up all of the user variables, plus any memory22- * that may have been sbrk'ed. No attempt is made to determine if a23- * page is demand-zero or if a page is totally unused, we just cover24- * the entire range. All of the addresses are rounded in such a way25- * that an integral number of pages is written.26- * stack: We need the stack information in order to get a meaningful27- * backtrace. We need to write the data from usp to28- * current->start_stack, so we round each of these in order to be able29- * to write an integer number of pages.30- */31-32-struct user {33- struct user_regs_struct regs; /* entire machine state */34- size_t u_tsize; /* text size (pages) */35- size_t u_dsize; /* data size (pages) */36- size_t u_ssize; /* stack size (pages) */37- unsigned long start_code; /* text starting address */38- unsigned long start_data; /* data starting address */39- unsigned long start_stack; /* stack starting address */40- long int signal; /* signal causing core dump */41- unsigned long u_ar0; /* help gdb find registers */42- unsigned long magic; /* identifies a core file */43- char u_comm[32]; /* user command name */44-};45-46-#define NBPG PAGE_SIZE47-#define UPAGES 148-#define HOST_TEXT_START_ADDR (u.start_code)49-#define HOST_DATA_START_ADDR (u.start_data)50-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)51-52-#endif /* __ASM_CRIS_USER_H */