···6565#ifdef CONFIG_ETRAX_AXISFLASHMAP66666767#define ASSEMBLER_MACROS_ONLY6868-#include <asm/arch/sv_addr_ag.h>6868+#include <arch/sv_addr_ag.h>69697070 ;; The partitiontable is looked for at the first sector after the boot7171 ;; sector. Sector size is 65536 bytes in all flashes we use.
···2626#include <asm/irq.h>2727#include <asm/dma.h>2828#include <asm/io.h>2929-#include <asm/arch/svinto.h>2929+#include <arch/svinto.h>3030#include <asm/uaccess.h>3131#include <asm/system.h>3232#include <asm/sync_serial.h>3333-#include <asm/arch/io_interface_mux.h>3333+#include <arch/io_interface_mux.h>34343535/* The receiver is a bit tricky beacuse of the continuous stream of data.*/3636/* */
+1-1
arch/cris/arch-v10/kernel/crisksyms.c
···11#include <linux/module.h>22#include <asm/io.h>33-#include <asm/arch/svinto.h>33+#include <arch/svinto.h>4455/* Export shadow registers for the CPU I/O pins */66EXPORT_SYMBOL(genconfig_shadow);
···1010#define ASSEMBLER_MACROS_ONLY1111/* The IO_* macros use the ## token concatenation operator, so1212 -traditional must not be used when assembling this file. */1313-#include <asm/arch/sv_addr_ag.h>1313+#include <arch/sv_addr_ag.h>14141515#define CRAMFS_MAGIC 0x28cd3d451616#define RAM_INIT_MAGIC 0x56902387
···11#include <linux/module.h>22#include <asm/io.h>33-#include <asm/arch/cache.h>44-#include <asm/arch/hwregs/dma.h>33+#include <arch/cache.h>44+#include <arch/hwregs/dma.h>5566/* This file is used to workaround a cache bug, Guinness TR 106. */77
···1111 * -traditional must not be used when assembling this file.1212 */1313#include <hwregs/reg_rdwr.h>1414-#include <asm/arch/memmap.h>1414+#include <arch/memmap.h>1515#include <hwregs/intr_vect.h>1616#include <hwregs/asm/mmu_defs_asm.h>1717#include <hwregs/asm/reg_map_asm.h>1818-#include <asm/arch/mach/startup.inc>1818+#include <mach/startup.inc>19192020#define CRAMFS_MAGIC 0x28cd3d452121#define JHEAD_MAGIC 0x1FF528A6
···1717#include <asm/pgtable.h>1818#include <asm/system.h>1919#include <asm/processor.h>2020-#include <asm/arch/hwregs/supp_reg.h>2020+#include <arch/hwregs/supp_reg.h>21212222/*2323 * Determines which bits in CCS the user has access to.
···2222##2323##=============================================================================24242525-#include <asm/arch/hwregs/asm/reg_map_asm.h>2626-#include <asm/arch/hwregs/asm/gio_defs_asm.h>2727-#include <asm/arch/hwregs/asm/pinmux_defs_asm.h>2828-#include <asm/arch/hwregs/asm/bif_core_defs_asm.h>2929-#include <asm/arch/hwregs/asm/config_defs_asm.h>2525+#include <arch/hwregs/asm/reg_map_asm.h>2626+#include <arch/hwregs/asm/gio_defs_asm.h>2727+#include <arch/hwregs/asm/pinmux_defs_asm.h>2828+#include <arch/hwregs/asm/bif_core_defs_asm.h>2929+#include <arch/hwregs/asm/config_defs_asm.h>30303131;; There are 8-bit NAND flashes and 16-bit NAND flashes.3232;; We need to treat them slightly different.
···11+/*22+ * Interrupt handling assembler and defines for Linux/CRISv1033+ */44+55+#ifndef _ASM_ARCH_IRQ_H66+#define _ASM_ARCH_IRQ_H77+88+#include <arch/sv_addr_ag.h>99+1010+#define NR_IRQS 321111+1212+/* The first vector number used for IRQs in v10 is really 0x20 */1313+/* but all the code and constants are offseted to make 0 the first */1414+#define FIRST_IRQ 01515+1616+#define SOME_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, some) /* 0 ? */1717+#define NMI_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, nmi) /* 1 */1818+#define TIMER0_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, timer0) /* 2 */1919+#define TIMER1_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, timer1) /* 3 */2020+/* mio, ata, par0, scsi0 on 4 */2121+/* par1, scsi1 on 5 */2222+#define NETWORK_STATUS_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, network) /* 6 */2323+2424+#define SERIAL_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, serial) /* 8 */2525+#define PA_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, pa) /* 11 */2626+/* extdma0 and extdma1 is at irq 12 and 13 and/or same as dma5 and dma6 ? */2727+#define EXTDMA0_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, ext_dma0)2828+#define EXTDMA1_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, ext_dma1)2929+3030+/* dma0-9 is irq 16..25 */3131+/* 16,17: network */3232+#define DMA0_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma0)3333+#define DMA1_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma1)3434+#define NETWORK_DMA_TX_IRQ_NBR DMA0_TX_IRQ_NBR3535+#define NETWORK_DMA_RX_IRQ_NBR DMA1_RX_IRQ_NBR3636+3737+/* 18,19: dma2 and dma3 shared by par0, scsi0, ser2 and ata */3838+#define DMA2_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma2)3939+#define DMA3_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma3)4040+#define SER2_DMA_TX_IRQ_NBR DMA2_TX_IRQ_NBR4141+#define SER2_DMA_RX_IRQ_NBR DMA3_RX_IRQ_NBR4242+4343+/* 20,21: dma4 and dma5 shared by par1, scsi1, ser3 and extdma0 */4444+#define DMA4_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma4)4545+#define DMA5_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma5)4646+#define SER3_DMA_TX_IRQ_NBR DMA4_TX_IRQ_NBR4747+#define SER3_DMA_RX_IRQ_NBR DMA5_RX_IRQ_NBR4848+4949+/* 22,23: dma6 and dma7 shared by ser0, extdma1 and mem2mem */5050+#define DMA6_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma6)5151+#define DMA7_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma7)5252+#define SER0_DMA_TX_IRQ_NBR DMA6_TX_IRQ_NBR5353+#define SER0_DMA_RX_IRQ_NBR DMA7_RX_IRQ_NBR5454+#define MEM2MEM_DMA_TX_IRQ_NBR DMA6_TX_IRQ_NBR5555+#define MEM2MEM_DMA_RX_IRQ_NBR DMA7_RX_IRQ_NBR5656+5757+/* 24,25: dma8 and dma9 shared by ser1 and usb */5858+#define DMA8_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma8)5959+#define DMA9_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma9)6060+#define SER1_DMA_TX_IRQ_NBR DMA8_TX_IRQ_NBR6161+#define SER1_DMA_RX_IRQ_NBR DMA9_RX_IRQ_NBR6262+#define USB_DMA_TX_IRQ_NBR DMA8_TX_IRQ_NBR6363+#define USB_DMA_RX_IRQ_NBR DMA9_RX_IRQ_NBR6464+6565+/* usb: controller at irq 31 + uses DMA8 and DMA9 */6666+#define USB_HC_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, usb)6767+6868+/* our fine, global, etrax irq vector! the pointer lives in the head.S file. */6969+7070+typedef void (*irqvectptr)(void);7171+7272+struct etrax_interrupt_vector {7373+ irqvectptr v[256];7474+};7575+7676+extern struct etrax_interrupt_vector *etrax_irv;7777+void set_int_vector(int n, irqvectptr addr);7878+void set_break_vector(int n, irqvectptr addr);7979+8080+#define __STR(x) #x8181+#define STR(x) __STR(x)8282+8383+/* SAVE_ALL saves registers so they match pt_regs */8484+8585+#define SAVE_ALL \8686+ "move $irp,[$sp=$sp-16]\n\t" /* push instruction pointer and fake SBFS struct */ \8787+ "push $srp\n\t" /* push subroutine return pointer */ \8888+ "push $dccr\n\t" /* push condition codes */ \8989+ "push $mof\n\t" /* push multiply overflow reg */ \9090+ "di\n\t" /* need to disable irq's at this point */\9191+ "subq 14*4,$sp\n\t" /* make room for r0-r13 */ \9292+ "movem $r13,[$sp]\n\t" /* push the r0-r13 registers */ \9393+ "push $r10\n\t" /* push orig_r10 */ \9494+ "clear.d [$sp=$sp-4]\n\t" /* frametype - this is a normal stackframe */9595+9696+ /* BLOCK_IRQ and UNBLOCK_IRQ do the same as mask_irq and unmask_irq */9797+9898+#define BLOCK_IRQ(mask,nr) \9999+ "move.d " #mask ",$r0\n\t" \100100+ "move.d $r0,[0xb00000d8]\n\t" 101101+102102+#define UNBLOCK_IRQ(mask) \103103+ "move.d " #mask ",$r0\n\t" \104104+ "move.d $r0,[0xb00000dc]\n\t" 105105+106106+#define IRQ_NAME2(nr) nr##_interrupt(void)107107+#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)108108+#define sIRQ_NAME(nr) IRQ_NAME2(sIRQ##nr)109109+#define BAD_IRQ_NAME(nr) IRQ_NAME2(bad_IRQ##nr)110110+111111+ /* the asm IRQ handler makes sure the causing IRQ is blocked, then it calls112112+ * do_IRQ (with irq disabled still). after that it unblocks and jumps to113113+ * ret_from_intr (entry.S)114114+ *115115+ * The reason the IRQ is blocked is to allow an sti() before the handler which116116+ * will acknowledge the interrupt is run.117117+ */118118+119119+#define BUILD_IRQ(nr,mask) \120120+void IRQ_NAME(nr); \121121+__asm__ ( \122122+ ".text\n\t" \123123+ "IRQ" #nr "_interrupt:\n\t" \124124+ SAVE_ALL \125125+ BLOCK_IRQ(mask,nr) /* this must be done to prevent irq loops when we ei later */ \126126+ "moveq "#nr",$r10\n\t" \127127+ "move.d $sp,$r11\n\t" \128128+ "jsr do_IRQ\n\t" /* irq.c, r10 and r11 are arguments */ \129129+ UNBLOCK_IRQ(mask) \130130+ "moveq 0,$r9\n\t" /* make ret_from_intr realise we came from an irq */ \131131+ "jump ret_from_intr\n\t");132132+133133+/* This is subtle. The timer interrupt is crucial and it should not be disabled for 134134+ * too long. However, if it had been a normal interrupt as per BUILD_IRQ, it would135135+ * have been BLOCK'ed, and then softirq's are run before we return here to UNBLOCK.136136+ * If the softirq's take too much time to run, the timer irq won't run and the 137137+ * watchdog will kill us.138138+ *139139+ * Furthermore, if a lot of other irq's occur before we return here, the multiple_irq140140+ * handler is run and it prioritizes the timer interrupt. However if we had BLOCK'ed141141+ * it here, we would not get the multiple_irq at all.142142+ *143143+ * The non-blocking here is based on the knowledge that the timer interrupt is 144144+ * registred as a fast interrupt (IRQF_DISABLED) so that we _know_ there will not145145+ * be an sti() before the timer irq handler is run to acknowledge the interrupt.146146+ */147147+148148+#define BUILD_TIMER_IRQ(nr,mask) \149149+void IRQ_NAME(nr); \150150+__asm__ ( \151151+ ".text\n\t" \152152+ "IRQ" #nr "_interrupt:\n\t" \153153+ SAVE_ALL \154154+ "moveq "#nr",$r10\n\t" \155155+ "move.d $sp,$r11\n\t" \156156+ "jsr do_IRQ\n\t" /* irq.c, r10 and r11 are arguments */ \157157+ "moveq 0,$r9\n\t" /* make ret_from_intr realise we came from an irq */ \158158+ "jump ret_from_intr\n\t");159159+160160+#endif
+19
arch/cris/include/arch-v32/arch/cache.h
···11+#ifndef _ASM_CRIS_ARCH_CACHE_H22+#define _ASM_CRIS_ARCH_CACHE_H33+44+#include <arch/hwregs/dma.h>55+66+/* A cache-line is 32 bytes. */77+#define L1_CACHE_BYTES 3288+#define L1_CACHE_SHIFT 599+1010+void flush_dma_list(dma_descr_data *descr);1111+void flush_dma_descr(dma_descr_data *descr, int flush_buf);1212+1313+#define flush_dma_context(c) \1414+ flush_dma_list(phys_to_virt((c)->saved_data));1515+1616+void cris_flush_cache_range(void *buf, unsigned long len);1717+void cris_flush_cache(void);1818+1919+#endif /* _ASM_CRIS_ARCH_CACHE_H */
+272
arch/cris/include/arch-v32/arch/cryptocop.h
···11+/*22+ * The device /dev/cryptocop is accessible using this driver using33+ * CRYPTOCOP_MAJOR (254) and minor number 0.44+ */55+66+#ifndef CRYPTOCOP_H77+#define CRYPTOCOP_H88+99+#include <linux/uio.h>1010+1111+1212+#define CRYPTOCOP_SESSION_ID_NONE (0)1313+1414+typedef unsigned long long int cryptocop_session_id;1515+1616+/* cryptocop ioctls */1717+#define ETRAXCRYPTOCOP_IOCTYPE (250)1818+1919+#define CRYPTOCOP_IO_CREATE_SESSION _IOWR(ETRAXCRYPTOCOP_IOCTYPE, 1, struct strcop_session_op)2020+#define CRYPTOCOP_IO_CLOSE_SESSION _IOW(ETRAXCRYPTOCOP_IOCTYPE, 2, struct strcop_session_op)2121+#define CRYPTOCOP_IO_PROCESS_OP _IOWR(ETRAXCRYPTOCOP_IOCTYPE, 3, struct strcop_crypto_op)2222+#define CRYPTOCOP_IO_MAXNR (3)2323+2424+typedef enum {2525+ cryptocop_cipher_des = 0,2626+ cryptocop_cipher_3des = 1,2727+ cryptocop_cipher_aes = 2,2828+ cryptocop_cipher_m2m = 3, /* mem2mem is essentially a NULL cipher with blocklength=1 */2929+ cryptocop_cipher_none3030+} cryptocop_cipher_type;3131+3232+typedef enum {3333+ cryptocop_digest_sha1 = 0,3434+ cryptocop_digest_md5 = 1,3535+ cryptocop_digest_none3636+} cryptocop_digest_type;3737+3838+typedef enum {3939+ cryptocop_csum_le = 0,4040+ cryptocop_csum_be = 1,4141+ cryptocop_csum_none4242+} cryptocop_csum_type;4343+4444+typedef enum {4545+ cryptocop_cipher_mode_ecb = 0,4646+ cryptocop_cipher_mode_cbc,4747+ cryptocop_cipher_mode_none4848+} cryptocop_cipher_mode;4949+5050+typedef enum {5151+ cryptocop_3des_eee = 0,5252+ cryptocop_3des_eed = 1,5353+ cryptocop_3des_ede = 2,5454+ cryptocop_3des_edd = 3,5555+ cryptocop_3des_dee = 4,5656+ cryptocop_3des_ded = 5,5757+ cryptocop_3des_dde = 6,5858+ cryptocop_3des_ddd = 75959+} cryptocop_3des_mode;6060+6161+/* Usermode accessible (ioctl) operations. */6262+struct strcop_session_op{6363+ cryptocop_session_id ses_id;6464+6565+ cryptocop_cipher_type cipher; /* AES, DES, 3DES, m2m, none */6666+6767+ cryptocop_cipher_mode cmode; /* ECB, CBC, none */6868+ cryptocop_3des_mode des3_mode;6969+7070+ cryptocop_digest_type digest; /* MD5, SHA1, none */7171+7272+ cryptocop_csum_type csum; /* BE, LE, none */7373+7474+ unsigned char *key;7575+ size_t keylen;7676+};7777+7878+#define CRYPTOCOP_CSUM_LENGTH (2)7979+#define CRYPTOCOP_MAX_DIGEST_LENGTH (20) /* SHA-1 20, MD5 16 */8080+#define CRYPTOCOP_MAX_IV_LENGTH (16) /* (3)DES==8, AES == 16 */8181+#define CRYPTOCOP_MAX_KEY_LENGTH (32)8282+8383+struct strcop_crypto_op{8484+ cryptocop_session_id ses_id;8585+8686+ /* Indata. */8787+ unsigned char *indata;8888+ size_t inlen; /* Total indata length. */8989+9090+ /* Cipher configuration. */9191+ unsigned char do_cipher:1;9292+ unsigned char decrypt:1; /* 1 == decrypt, 0 == encrypt */9393+ unsigned char cipher_explicit:1;9494+ size_t cipher_start;9595+ size_t cipher_len;9696+ /* cipher_iv is used if do_cipher and cipher_explicit and the cipher9797+ mode is CBC. The length is controlled by the type of cipher,9898+ e.g. DES/3DES 8 octets and AES 16 octets. */9999+ unsigned char cipher_iv[CRYPTOCOP_MAX_IV_LENGTH];100100+ /* Outdata. */101101+ unsigned char *cipher_outdata;102102+ size_t cipher_outlen;103103+104104+ /* digest configuration. */105105+ unsigned char do_digest:1;106106+ size_t digest_start;107107+ size_t digest_len;108108+ /* Outdata. The actual length is determined by the type of the digest. */109109+ unsigned char digest[CRYPTOCOP_MAX_DIGEST_LENGTH];110110+111111+ /* Checksum configuration. */112112+ unsigned char do_csum:1;113113+ size_t csum_start;114114+ size_t csum_len;115115+ /* Outdata. */116116+ unsigned char csum[CRYPTOCOP_CSUM_LENGTH];117117+};118118+119119+120120+121121+#ifdef __KERNEL__122122+123123+/********** The API to use from inside the kernel. ************/124124+125125+#include <arch/hwregs/dma.h>126126+127127+typedef enum {128128+ cryptocop_alg_csum = 0,129129+ cryptocop_alg_mem2mem,130130+ cryptocop_alg_md5,131131+ cryptocop_alg_sha1,132132+ cryptocop_alg_des,133133+ cryptocop_alg_3des,134134+ cryptocop_alg_aes,135135+ cryptocop_no_alg,136136+} cryptocop_algorithm;137137+138138+typedef u8 cryptocop_tfrm_id;139139+140140+141141+struct cryptocop_operation;142142+143143+typedef void (cryptocop_callback)(struct cryptocop_operation*, void*);144144+145145+struct cryptocop_transform_init {146146+ cryptocop_algorithm alg;147147+ /* Keydata for ciphers. */148148+ unsigned char key[CRYPTOCOP_MAX_KEY_LENGTH];149149+ unsigned int keylen;150150+ cryptocop_cipher_mode cipher_mode;151151+ cryptocop_3des_mode tdes_mode;152152+ cryptocop_csum_type csum_mode; /* cryptocop_csum_none is not allowed when alg==cryptocop_alg_csum */153153+154154+ cryptocop_tfrm_id tid; /* Locally unique in session; assigned by user, checked by driver. */155155+ struct cryptocop_transform_init *next;156156+};157157+158158+159159+typedef enum {160160+ cryptocop_source_dma = 0,161161+ cryptocop_source_des,162162+ cryptocop_source_3des,163163+ cryptocop_source_aes,164164+ cryptocop_source_md5,165165+ cryptocop_source_sha1,166166+ cryptocop_source_csum,167167+ cryptocop_source_none,168168+} cryptocop_source;169169+170170+171171+struct cryptocop_desc_cfg {172172+ cryptocop_tfrm_id tid;173173+ cryptocop_source src;174174+ unsigned int last:1; /* Last use of this transform in the operation. Will push outdata when encountered. */175175+ struct cryptocop_desc_cfg *next;176176+};177177+178178+struct cryptocop_desc {179179+ size_t length;180180+ struct cryptocop_desc_cfg *cfg;181181+ struct cryptocop_desc *next;182182+};183183+184184+185185+/* Flags for cryptocop_tfrm_cfg */186186+#define CRYPTOCOP_NO_FLAG (0x00)187187+#define CRYPTOCOP_ENCRYPT (0x01)188188+#define CRYPTOCOP_DECRYPT (0x02)189189+#define CRYPTOCOP_EXPLICIT_IV (0x04)190190+191191+struct cryptocop_tfrm_cfg {192192+ cryptocop_tfrm_id tid;193193+194194+ unsigned int flags; /* DECRYPT, ENCRYPT, EXPLICIT_IV */195195+196196+ /* CBC initialisation vector for cihers. */197197+ u8 iv[CRYPTOCOP_MAX_IV_LENGTH];198198+199199+ /* The position in output where to write the transform output. The order200200+ in which the driver writes the output is unspecified, hence if several201201+ transforms write on the same positions in the output the result is202202+ unspecified. */203203+ size_t inject_ix;204204+205205+ struct cryptocop_tfrm_cfg *next;206206+};207207+208208+209209+210210+struct cryptocop_dma_list_operation{211211+ /* The consumer can provide DMA lists to send to the co-processor. 'use_dmalists' in212212+ struct cryptocop_operation must be set for the driver to use them. outlist,213213+ out_data_buf, inlist and in_data_buf must all be physical addresses since they will214214+ be loaded to DMA . */215215+ dma_descr_data *outlist; /* Out from memory to the co-processor. */216216+ char *out_data_buf;217217+ dma_descr_data *inlist; /* In from the co-processor to memory. */218218+ char *in_data_buf;219219+220220+ cryptocop_3des_mode tdes_mode;221221+ cryptocop_csum_type csum_mode;222222+};223223+224224+225225+struct cryptocop_tfrm_operation{226226+ /* Operation configuration, if not 'use_dmalists' is set. */227227+ struct cryptocop_tfrm_cfg *tfrm_cfg;228228+ struct cryptocop_desc *desc;229229+230230+ struct iovec *indata;231231+ size_t incount;232232+ size_t inlen; /* Total inlength. */233233+234234+ struct iovec *outdata;235235+ size_t outcount;236236+ size_t outlen; /* Total outlength. */237237+};238238+239239+240240+struct cryptocop_operation {241241+ cryptocop_callback *cb;242242+ void *cb_data;243243+244244+ cryptocop_session_id sid;245245+246246+ /* The status of the operation when returned to consumer. */247247+ int operation_status; /* 0, -EAGAIN */248248+249249+ /* Flags */250250+ unsigned int use_dmalists:1; /* Use outlist and inlist instead of the desc/tfrm_cfg configuration. */251251+ unsigned int in_interrupt:1; /* Set if inserting job from interrupt context. */252252+ unsigned int fast_callback:1; /* Set if fast callback wanted, i.e. from interrupt context. */253253+254254+ union{255255+ struct cryptocop_dma_list_operation list_op;256256+ struct cryptocop_tfrm_operation tfrm_op;257257+ };258258+};259259+260260+261261+int cryptocop_new_session(cryptocop_session_id *sid, struct cryptocop_transform_init *tinit, int alloc_flag);262262+int cryptocop_free_session(cryptocop_session_id sid);263263+264264+int cryptocop_job_queue_insert_csum(struct cryptocop_operation *operation);265265+266266+int cryptocop_job_queue_insert_crypto(struct cryptocop_operation *operation);267267+268268+int cryptocop_job_queue_insert_user_job(struct cryptocop_operation *operation);269269+270270+#endif /* __KERNEL__ */271271+272272+#endif /* CRYPTOCOP_H */
···11+/* $Id: atomic.h,v 1.3 2001/07/25 16:15:19 bjornw Exp $ */22+33+#ifndef __ASM_CRIS_ATOMIC__44+#define __ASM_CRIS_ATOMIC__55+66+#include <linux/compiler.h>77+88+#include <asm/system.h>99+#include <arch/atomic.h>1010+1111+/*1212+ * Atomic operations that C can't guarantee us. Useful for1313+ * resource counting etc..1414+ */1515+1616+typedef struct { volatile int counter; } atomic_t;1717+1818+#define ATOMIC_INIT(i) { (i) }1919+2020+#define atomic_read(v) ((v)->counter)2121+#define atomic_set(v,i) (((v)->counter) = (i))2222+2323+/* These should be written in asm but we do it in C for now. */2424+2525+static inline void atomic_add(int i, volatile atomic_t *v)2626+{2727+ unsigned long flags;2828+ cris_atomic_save(v, flags);2929+ v->counter += i;3030+ cris_atomic_restore(v, flags);3131+}3232+3333+static inline void atomic_sub(int i, volatile atomic_t *v)3434+{3535+ unsigned long flags;3636+ cris_atomic_save(v, flags);3737+ v->counter -= i;3838+ cris_atomic_restore(v, flags);3939+}4040+4141+static inline int atomic_add_return(int i, volatile atomic_t *v)4242+{4343+ unsigned long flags;4444+ int retval;4545+ cris_atomic_save(v, flags);4646+ retval = (v->counter += i);4747+ cris_atomic_restore(v, flags);4848+ return retval;4949+}5050+5151+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)5252+5353+static inline int atomic_sub_return(int i, volatile atomic_t *v)5454+{5555+ unsigned long flags;5656+ int retval;5757+ cris_atomic_save(v, flags);5858+ retval = (v->counter -= i);5959+ cris_atomic_restore(v, flags);6060+ return retval;6161+}6262+6363+static inline int atomic_sub_and_test(int i, volatile atomic_t *v)6464+{6565+ int retval;6666+ unsigned long flags;6767+ cris_atomic_save(v, flags);6868+ retval = (v->counter -= i) == 0;6969+ cris_atomic_restore(v, flags);7070+ return retval;7171+}7272+7373+static inline void atomic_inc(volatile atomic_t *v)7474+{7575+ unsigned long flags;7676+ cris_atomic_save(v, flags);7777+ (v->counter)++;7878+ cris_atomic_restore(v, flags);7979+}8080+8181+static inline void atomic_dec(volatile atomic_t *v)8282+{8383+ unsigned long flags;8484+ cris_atomic_save(v, flags);8585+ (v->counter)--;8686+ cris_atomic_restore(v, flags);8787+}8888+8989+static inline int atomic_inc_return(volatile atomic_t *v)9090+{9191+ unsigned long flags;9292+ int retval;9393+ cris_atomic_save(v, flags);9494+ retval = ++(v->counter);9595+ cris_atomic_restore(v, flags);9696+ return retval;9797+}9898+9999+static inline int atomic_dec_return(volatile atomic_t *v)100100+{101101+ unsigned long flags;102102+ int retval;103103+ cris_atomic_save(v, flags);104104+ retval = --(v->counter);105105+ cris_atomic_restore(v, flags);106106+ return retval;107107+}108108+static inline int atomic_dec_and_test(volatile atomic_t *v)109109+{110110+ int retval;111111+ unsigned long flags;112112+ cris_atomic_save(v, flags);113113+ retval = --(v->counter) == 0;114114+ cris_atomic_restore(v, flags);115115+ return retval;116116+}117117+118118+static inline int atomic_inc_and_test(volatile atomic_t *v)119119+{120120+ int retval;121121+ unsigned long flags;122122+ cris_atomic_save(v, flags);123123+ retval = ++(v->counter) == 0;124124+ cris_atomic_restore(v, flags);125125+ return retval;126126+}127127+128128+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)129129+{130130+ int ret;131131+ unsigned long flags;132132+133133+ cris_atomic_save(v, flags);134134+ ret = v->counter;135135+ if (likely(ret == old))136136+ v->counter = new;137137+ cris_atomic_restore(v, flags);138138+ return ret;139139+}140140+141141+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))142142+143143+static inline int atomic_add_unless(atomic_t *v, int a, int u)144144+{145145+ int ret;146146+ unsigned long flags;147147+148148+ cris_atomic_save(v, flags);149149+ ret = v->counter;150150+ if (ret != u)151151+ v->counter += a;152152+ cris_atomic_restore(v, flags);153153+ return ret != u;154154+}155155+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)156156+157157+/* Atomic operations are already serializing */158158+#define smp_mb__before_atomic_dec() barrier()159159+#define smp_mb__after_atomic_dec() barrier()160160+#define smp_mb__before_atomic_inc() barrier()161161+#define smp_mb__after_atomic_inc() barrier()162162+163163+#include <asm-generic/atomic.h>164164+#endif
+166
arch/cris/include/asm/bitops.h
···11+/* asm/bitops.h for Linux/CRIS22+ *33+ * TODO: asm versions if speed is needed44+ *55+ * All bit operations return 0 if the bit was cleared before the66+ * operation and != 0 if it was not.77+ *88+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).99+ */1010+1111+#ifndef _CRIS_BITOPS_H1212+#define _CRIS_BITOPS_H1313+1414+/* Currently this is unsuitable for consumption outside the kernel. */1515+#ifdef __KERNEL__ 1616+1717+#ifndef _LINUX_BITOPS_H1818+#error only <linux/bitops.h> can be included directly1919+#endif2020+2121+#include <arch/bitops.h>2222+#include <asm/system.h>2323+#include <asm/atomic.h>2424+#include <linux/compiler.h>2525+2626+/*2727+ * set_bit - Atomically set a bit in memory2828+ * @nr: the bit to set2929+ * @addr: the address to start counting from3030+ *3131+ * This function is atomic and may not be reordered. See __set_bit()3232+ * if you do not require the atomic guarantees.3333+ * Note that @nr may be almost arbitrarily large; this function is not3434+ * restricted to acting on a single-word quantity.3535+ */3636+3737+#define set_bit(nr, addr) (void)test_and_set_bit(nr, addr)3838+3939+/*4040+ * clear_bit - Clears a bit in memory4141+ * @nr: Bit to clear4242+ * @addr: Address to start counting from4343+ *4444+ * clear_bit() is atomic and may not be reordered. However, it does4545+ * not contain a memory barrier, so if it is used for locking purposes,4646+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()4747+ * in order to ensure changes are visible on other processors.4848+ */4949+5050+#define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr)5151+5252+/*5353+ * change_bit - Toggle a bit in memory5454+ * @nr: Bit to change5555+ * @addr: Address to start counting from5656+ *5757+ * change_bit() is atomic and may not be reordered.5858+ * Note that @nr may be almost arbitrarily large; this function is not5959+ * restricted to acting on a single-word quantity.6060+ */6161+6262+#define change_bit(nr, addr) (void)test_and_change_bit(nr, addr)6363+6464+/**6565+ * test_and_set_bit - Set a bit and return its old value6666+ * @nr: Bit to set6767+ * @addr: Address to count from6868+ *6969+ * This operation is atomic and cannot be reordered. 7070+ * It also implies a memory barrier.7171+ */7272+7373+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)7474+{7575+ unsigned int mask, retval;7676+ unsigned long flags;7777+ unsigned int *adr = (unsigned int *)addr;7878+7979+ adr += nr >> 5;8080+ mask = 1 << (nr & 0x1f);8181+ cris_atomic_save(addr, flags);8282+ retval = (mask & *adr) != 0;8383+ *adr |= mask;8484+ cris_atomic_restore(addr, flags);8585+ return retval;8686+}8787+8888+/*8989+ * clear_bit() doesn't provide any barrier for the compiler.9090+ */9191+#define smp_mb__before_clear_bit() barrier()9292+#define smp_mb__after_clear_bit() barrier()9393+9494+/**9595+ * test_and_clear_bit - Clear a bit and return its old value9696+ * @nr: Bit to clear9797+ * @addr: Address to count from9898+ *9999+ * This operation is atomic and cannot be reordered. 100100+ * It also implies a memory barrier.101101+ */102102+103103+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)104104+{105105+ unsigned int mask, retval;106106+ unsigned long flags;107107+ unsigned int *adr = (unsigned int *)addr;108108+109109+ adr += nr >> 5;110110+ mask = 1 << (nr & 0x1f);111111+ cris_atomic_save(addr, flags);112112+ retval = (mask & *adr) != 0;113113+ *adr &= ~mask;114114+ cris_atomic_restore(addr, flags);115115+ return retval;116116+}117117+118118+/**119119+ * test_and_change_bit - Change a bit and return its old value120120+ * @nr: Bit to change121121+ * @addr: Address to count from122122+ *123123+ * This operation is atomic and cannot be reordered. 124124+ * It also implies a memory barrier.125125+ */126126+127127+static inline int test_and_change_bit(int nr, volatile unsigned long *addr)128128+{129129+ unsigned int mask, retval;130130+ unsigned long flags;131131+ unsigned int *adr = (unsigned int *)addr;132132+ adr += nr >> 5;133133+ mask = 1 << (nr & 0x1f);134134+ cris_atomic_save(addr, flags);135135+ retval = (mask & *adr) != 0;136136+ *adr ^= mask;137137+ cris_atomic_restore(addr, flags);138138+ return retval;139139+}140140+141141+#include <asm-generic/bitops/non-atomic.h>142142+143143+/*144144+ * Since we define it "external", it collides with the built-in145145+ * definition, which doesn't have the same semantics. We don't want to146146+ * use -fno-builtin, so just hide the name ffs.147147+ */148148+#define ffs kernel_ffs149149+150150+#include <asm-generic/bitops/fls.h>151151+#include <asm-generic/bitops/fls64.h>152152+#include <asm-generic/bitops/hweight.h>153153+#include <asm-generic/bitops/find.h>154154+#include <asm-generic/bitops/lock.h>155155+156156+#include <asm-generic/bitops/ext2-non-atomic.h>157157+158158+#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)159159+#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)160160+161161+#include <asm-generic/bitops/minix.h>162162+#include <asm-generic/bitops/sched.h>163163+164164+#endif /* __KERNEL__ */165165+166166+#endif /* _CRIS_BITOPS_H */
···11+#ifndef _CRIS_BYTEORDER_H22+#define _CRIS_BYTEORDER_H33+44+#ifdef __GNUC__55+66+#ifdef __KERNEL__77+#include <arch/byteorder.h>88+99+/* defines are necessary because the other files detect the presence1010+ * of a defined __arch_swab32, not an inline1111+ */1212+#define __arch__swab32(x) ___arch__swab32(x)1313+#define __arch__swab16(x) ___arch__swab16(x)1414+#endif /* __KERNEL__ */1515+1616+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)1717+# define __BYTEORDER_HAS_U64__1818+# define __SWAB_64_THRU_32__1919+#endif2020+2121+#endif /* __GNUC__ */2222+2323+#include <linux/byteorder/little_endian.h>2424+2525+#endif2626+2727+
···11+/* TODO: csum_tcpudp_magic could be speeded up, and csum_fold as well */22+33+#ifndef _CRIS_CHECKSUM_H44+#define _CRIS_CHECKSUM_H55+66+#include <arch/checksum.h>77+88+/*99+ * computes the checksum of a memory block at buff, length len,1010+ * and adds in "sum" (32-bit)1111+ *1212+ * returns a 32-bit number suitable for feeding into itself1313+ * or csum_tcpudp_magic1414+ *1515+ * this function must be called with even lengths, except1616+ * for the last fragment, which may be odd1717+ *1818+ * it's best to have buff aligned on a 32-bit boundary1919+ */2020+__wsum csum_partial(const void *buff, int len, __wsum sum);2121+2222+/*2323+ * the same as csum_partial, but copies from src while it2424+ * checksums2525+ *2626+ * here even more important to align src and dst on a 32-bit (or even2727+ * better 64-bit) boundary2828+ */2929+3030+__wsum csum_partial_copy_nocheck(const void *src, void *dst,3131+ int len, __wsum sum);3232+3333+/*3434+ * Fold a partial checksum into a word3535+ */3636+3737+static inline __sum16 csum_fold(__wsum csum)3838+{3939+ u32 sum = (__force u32)csum;4040+ sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */4141+ sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */4242+ return (__force __sum16)~sum;4343+}4444+4545+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,4646+ int len, __wsum sum,4747+ int *errptr);4848+4949+/*5050+ * This is a version of ip_compute_csum() optimized for IP headers,5151+ * which always checksum on 4 octet boundaries.5252+ *5353+ */5454+5555+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)5656+{5757+ return csum_fold(csum_partial(iph, ihl * 4, 0));5858+}5959+6060+/*6161+ * computes the checksum of the TCP/UDP pseudo-header6262+ * returns a 16-bit checksum, already complemented6363+ */6464+6565+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,6666+ unsigned short len,6767+ unsigned short proto,6868+ __wsum sum)6969+{7070+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));7171+}7272+7373+/*7474+ * this routine is used for miscellaneous IP-like checksums, mainly7575+ * in icmp.c7676+ */7777+7878+static inline __sum16 ip_compute_csum(const void *buff, int len)7979+{8080+ return csum_fold (csum_partial(buff, len, 0));8181+}8282+8383+#endif
+27
arch/cris/include/asm/delay.h
···11+#ifndef _CRIS_DELAY_H22+#define _CRIS_DELAY_H33+44+/*55+ * Copyright (C) 1998-2002 Axis Communications AB66+ *77+ * Delay routines, using a pre-computed "loops_per_second" value.88+ */99+1010+#include <arch/delay.h>1111+1212+/* Use only for very small delays ( < 1 msec). */1313+1414+extern unsigned long loops_per_usec; /* arch/cris/mm/init.c */1515+1616+/* May be defined by arch/delay.h. */1717+#ifndef udelay1818+static inline void udelay(unsigned long usecs)1919+{2020+ __delay(usecs * loops_per_usec);2121+}2222+#endif2323+2424+#endif /* defined(_CRIS_DELAY_H) */2525+2626+2727+
+21
arch/cris/include/asm/dma.h
···11+/* $Id: dma.h,v 1.2 2001/05/09 12:17:42 johana Exp $ */22+33+#ifndef _ASM_DMA_H44+#define _ASM_DMA_H55+66+#include <arch/dma.h>77+88+/* it's useless on the Etrax, but unfortunately needed by the new99+ bootmem allocator (but this should do it for this) */1010+1111+#define MAX_DMA_ADDRESS PAGE_OFFSET1212+1313+/* From PCI */1414+1515+#ifdef CONFIG_PCI1616+extern int isa_dma_bridge_buggy;1717+#else1818+#define isa_dma_bridge_buggy (0)1919+#endif2020+2121+#endif /* _ASM_DMA_H */
+93
arch/cris/include/asm/elf.h
···11+#ifndef __ASMCRIS_ELF_H22+#define __ASMCRIS_ELF_H33+44+/*55+ * ELF register definitions..66+ */77+88+#include <asm/user.h>99+1010+#define R_CRIS_NONE 01111+#define R_CRIS_8 11212+#define R_CRIS_16 21313+#define R_CRIS_32 31414+#define R_CRIS_8_PCREL 41515+#define R_CRIS_16_PCREL 51616+#define R_CRIS_32_PCREL 61717+#define R_CRIS_GNU_VTINHERIT 71818+#define R_CRIS_GNU_VTENTRY 81919+#define R_CRIS_COPY 92020+#define R_CRIS_GLOB_DAT 102121+#define R_CRIS_JUMP_SLOT 112222+#define R_CRIS_RELATIVE 122323+#define R_CRIS_16_GOT 132424+#define R_CRIS_32_GOT 142525+#define R_CRIS_16_GOTPLT 152626+#define R_CRIS_32_GOTPLT 162727+#define R_CRIS_32_GOTREL 172828+#define R_CRIS_32_PLT_GOTREL 182929+#define R_CRIS_32_PLT_PCREL 193030+3131+typedef unsigned long elf_greg_t;3232+3333+/* Note that NGREG is defined to ELF_NGREG in include/linux/elfcore.h, and is3434+ thus exposed to user-space. */3535+#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))3636+typedef elf_greg_t elf_gregset_t[ELF_NGREG];3737+3838+/* A placeholder; CRIS does not have any fp regs. */3939+typedef unsigned long elf_fpregset_t;4040+4141+/*4242+ * These are used to set parameters in the core dumps.4343+ */4444+#define ELF_CLASS ELFCLASS324545+#define ELF_DATA ELFDATA2LSB4646+#define ELF_ARCH EM_CRIS4747+4848+#include <arch/elf.h>4949+5050+/* The master for these definitions is {binutils}/include/elf/cris.h: */5151+/* User symbols in this file have a leading underscore. */5252+#define EF_CRIS_UNDERSCORE 0x000000015353+5454+/* This is a mask for different incompatible machine variants. */5555+#define EF_CRIS_VARIANT_MASK 0x0000000e5656+5757+/* Variant 0; may contain v0..10 object. */5858+#define EF_CRIS_VARIANT_ANY_V0_V10 0x000000005959+6060+/* Variant 1; contains v32 object. */6161+#define EF_CRIS_VARIANT_V32 0x000000026262+6363+/* Variant 2; contains object compatible with v32 and v10. */6464+#define EF_CRIS_VARIANT_COMMON_V10_V32 0x000000046565+/* End of excerpt from {binutils}/include/elf/cris.h. */6666+6767+#define USE_ELF_CORE_DUMP6868+6969+#define ELF_EXEC_PAGESIZE 81927070+7171+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical7272+ use of this is to invoke "./ld.so someprog" to test out a new version of7373+ the loader. We need to make sure that it is out of the way of the program7474+ that it will "exec", and that there is sufficient room for the brk. */7575+7676+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)7777+7878+/* This yields a mask that user programs can use to figure out what7979+ instruction set this CPU supports. This could be done in user space,8080+ but it's not easy, and we've already done it here. */8181+8282+#define ELF_HWCAP (0)8383+8484+/* This yields a string that ld.so will use to load implementation8585+ specific libraries for optimization. This is more specific in8686+ intent than poking at uname or /proc/cpuinfo.8787+*/8888+8989+#define ELF_PLATFORM (NULL)9090+9191+#define SET_PERSONALITY(ex) set_personality(PER_LINUX)9292+9393+#endif
+154
arch/cris/include/asm/io.h
···11+#ifndef _ASM_CRIS_IO_H22+#define _ASM_CRIS_IO_H33+44+#include <asm/page.h> /* for __va, __pa */55+#include <arch/io.h>66+#include <linux/kernel.h>77+88+struct cris_io_operations99+{1010+ u32 (*read_mem)(void *addr, int size);1111+ void (*write_mem)(u32 val, int size, void *addr);1212+ u32 (*read_io)(u32 port, void *addr, int size, int count);1313+ void (*write_io)(u32 port, void *addr, int size, int count);1414+};1515+1616+#ifdef CONFIG_PCI1717+extern struct cris_io_operations *cris_iops;1818+#else1919+#define cris_iops ((struct cris_io_operations*)NULL)2020+#endif2121+2222+/*2323+ * Change virtual addresses to physical addresses and vv.2424+ */2525+2626+static inline unsigned long virt_to_phys(volatile void * address)2727+{2828+ return __pa(address);2929+}3030+3131+static inline void * phys_to_virt(unsigned long address)3232+{3333+ return __va(address);3434+}3535+3636+extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);3737+extern void __iomem * __ioremap_prot(unsigned long phys_addr, unsigned long size, pgprot_t prot);3838+3939+static inline void __iomem * ioremap (unsigned long offset, unsigned long size)4040+{4141+ return __ioremap(offset, size, 0);4242+}4343+4444+extern void iounmap(volatile void * __iomem addr);4545+4646+extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size);4747+4848+/*4949+ * IO bus memory addresses are also 1:1 with the physical address5050+ */5151+#define virt_to_bus virt_to_phys5252+#define bus_to_virt phys_to_virt5353+5454+/*5555+ * readX/writeX() are used to access memory mapped devices. On some5656+ * architectures the memory mapped IO stuff needs to be accessed5757+ * differently. On the CRIS architecture, we just read/write the5858+ * memory location directly.5959+ */6060+#ifdef CONFIG_PCI6161+#define PCI_SPACE(x) ((((unsigned)(x)) & 0x10000000) == 0x10000000)6262+#else6363+#define PCI_SPACE(x) 06464+#endif6565+static inline unsigned char readb(const volatile void __iomem *addr)6666+{6767+ if (PCI_SPACE(addr) && cris_iops)6868+ return cris_iops->read_mem((void*)addr, 1);6969+ else7070+ return *(volatile unsigned char __force *) addr;7171+}7272+static inline unsigned short readw(const volatile void __iomem *addr)7373+{7474+ if (PCI_SPACE(addr) && cris_iops)7575+ return cris_iops->read_mem((void*)addr, 2);7676+ else7777+ return *(volatile unsigned short __force *) addr;7878+}7979+static inline unsigned int readl(const volatile void __iomem *addr)8080+{8181+ if (PCI_SPACE(addr) && cris_iops)8282+ return cris_iops->read_mem((void*)addr, 4);8383+ else8484+ return *(volatile unsigned int __force *) addr;8585+}8686+#define readb_relaxed(addr) readb(addr)8787+#define readw_relaxed(addr) readw(addr)8888+#define readl_relaxed(addr) readl(addr)8989+#define __raw_readb readb9090+#define __raw_readw readw9191+#define __raw_readl readl9292+9393+static inline void writeb(unsigned char b, volatile void __iomem *addr)9494+{9595+ if (PCI_SPACE(addr) && cris_iops)9696+ cris_iops->write_mem(b, 1, (void*)addr);9797+ else9898+ *(volatile unsigned char __force *) addr = b;9999+}100100+static inline void writew(unsigned short b, volatile void __iomem *addr)101101+{102102+ if (PCI_SPACE(addr) && cris_iops)103103+ cris_iops->write_mem(b, 2, (void*)addr);104104+ else105105+ *(volatile unsigned short __force *) addr = b;106106+}107107+static inline void writel(unsigned int b, volatile void __iomem *addr)108108+{109109+ if (PCI_SPACE(addr) && cris_iops)110110+ cris_iops->write_mem(b, 4, (void*)addr);111111+ else112112+ *(volatile unsigned int __force *) addr = b;113113+}114114+#define __raw_writeb writeb115115+#define __raw_writew writew116116+#define __raw_writel writel117117+118118+#define mmiowb()119119+120120+#define memset_io(a,b,c) memset((void *)(a),(b),(c))121121+#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))122122+#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))123123+124124+125125+/* I/O port access. Normally there is no I/O space on CRIS but when126126+ * Cardbus/PCI is enabled the request is passed through the bridge.127127+ */128128+129129+#define IO_SPACE_LIMIT 0xffff130130+#define inb(port) (cris_iops ? cris_iops->read_io(port,NULL,1,1) : 0)131131+#define inw(port) (cris_iops ? cris_iops->read_io(port,NULL,2,1) : 0)132132+#define inl(port) (cris_iops ? cris_iops->read_io(port,NULL,4,1) : 0)133133+#define insb(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,1,count) : 0)134134+#define insw(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,2,count) : 0)135135+#define insl(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,4,count) : 0)136136+#define outb(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,1,1)137137+#define outw(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,2,1)138138+#define outl(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,4,1)139139+#define outsb(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,1,count)140140+#define outsw(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,2,count)141141+#define outsl(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,3,count)142142+143143+/*144144+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem145145+ * access146146+ */147147+#define xlate_dev_mem_ptr(p) __va(p)148148+149149+/*150150+ * Convert a virtual cached pointer to an uncached pointer151151+ */152152+#define xlate_dev_kmem_ptr(p) p153153+154154+#endif
···11+#ifndef _CRIS_PAGE_H22+#define _CRIS_PAGE_H33+44+#include <arch/page.h>55+#include <linux/const.h>66+77+/* PAGE_SHIFT determines the page size */88+#define PAGE_SHIFT 1399+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)1010+#define PAGE_MASK (~(PAGE_SIZE-1))1111+1212+#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)1313+#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)1414+1515+#define clear_user_page(page, vaddr, pg) clear_page(page)1616+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)1717+1818+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \1919+ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)2020+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE2121+2222+/*2323+ * These are used to make use of C type-checking..2424+ */2525+#ifndef __ASSEMBLY__2626+typedef struct { unsigned long pte; } pte_t;2727+typedef struct { unsigned long pgd; } pgd_t;2828+typedef struct { unsigned long pgprot; } pgprot_t;2929+typedef struct page *pgtable_t;3030+#endif3131+3232+#define pte_val(x) ((x).pte)3333+#define pgd_val(x) ((x).pgd)3434+#define pgprot_val(x) ((x).pgprot)3535+3636+#define __pte(x) ((pte_t) { (x) } )3737+#define __pgd(x) ((pgd_t) { (x) } )3838+#define __pgprot(x) ((pgprot_t) { (x) } )3939+4040+/* On CRIS the PFN numbers doesn't start at 0 so we have to compensate */4141+/* for that before indexing into the page table starting at mem_map */4242+#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)4343+#define pfn_valid(pfn) (((pfn) - (PAGE_OFFSET >> PAGE_SHIFT)) < max_mapnr)4444+4545+/* to index into the page map. our pages all start at physical addr PAGE_OFFSET so4646+ * we can let the map start there. notice that we subtract PAGE_OFFSET because4747+ * we start our mem_map there - in other ports they map mem_map physically and4848+ * use __pa instead. in our system both the physical and virtual address of DRAM4949+ * is too high to let mem_map start at 0, so we do it this way instead (similar5050+ * to arm and m68k I think)5151+ */ 5252+5353+#define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT))5454+#define VALID_PAGE(page) (((page) - mem_map) < max_mapnr)5555+#define virt_addr_valid(kaddr) pfn_valid((unsigned)(kaddr) >> PAGE_SHIFT)5656+5757+/* convert a page (based on mem_map and forward) to a physical address5858+ * do this by figuring out the virtual address and then use __pa5959+ */6060+6161+#define page_to_phys(page) __pa((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)6262+6363+#ifndef __ASSEMBLY__6464+6565+#endif /* __ASSEMBLY__ */6666+6767+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \6868+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)6969+7070+#include <asm-generic/memory_model.h>7171+#include <asm-generic/page.h>7272+7373+#endif /* _CRIS_PAGE_H */7474+
+299
arch/cris/include/asm/pgtable.h
···11+/*22+ * CRIS pgtable.h - macros and functions to manipulate page tables.33+ */44+55+#ifndef _CRIS_PGTABLE_H66+#define _CRIS_PGTABLE_H77+88+#include <asm/page.h>99+#include <asm-generic/pgtable-nopmd.h>1010+1111+#ifndef __ASSEMBLY__1212+#include <linux/sched.h>1313+#include <asm/mmu.h>1414+#endif1515+#include <arch/pgtable.h>1616+1717+/*1818+ * The Linux memory management assumes a three-level page table setup. On1919+ * CRIS, we use that, but "fold" the mid level into the top-level page2020+ * table. Since the MMU TLB is software loaded through an interrupt, it2121+ * supports any page table structure, so we could have used a three-level2222+ * setup, but for the amounts of memory we normally use, a two-level is2323+ * probably more efficient.2424+ *2525+ * This file contains the functions and defines necessary to modify and use2626+ * the CRIS page table tree.2727+ */2828+#ifndef __ASSEMBLY__2929+extern void paging_init(void);3030+#endif3131+3232+/* Certain architectures need to do special things when pte's3333+ * within a page table are directly modified. Thus, the following3434+ * hook is made available.3535+ */3636+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))3737+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)3838+3939+/*4040+ * (pmds are folded into pgds so this doesn't get actually called,4141+ * but the define is needed for a generic inline function.)4242+ */4343+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)4444+#define set_pgu(pudptr, pudval) (*(pudptr) = pudval)4545+4646+/* PGDIR_SHIFT determines the size of the area a second-level page table can4747+ * map. It is equal to the page size times the number of PTE's that fit in4848+ * a PMD page. A PTE is 4-bytes in CRIS. Hence the following number.4949+ */5050+5151+#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2))5252+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)5353+#define PGDIR_MASK (~(PGDIR_SIZE-1))5454+5555+/*5656+ * entries per page directory level: we use a two-level, so5757+ * we don't really have any PMD directory physically.5858+ * pointers are 4 bytes so we can use the page size and 5959+ * divide it by 4 (shift by 2).6060+ */6161+#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2))6262+#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2))6363+6464+/* calculate how many PGD entries a user-level program can use6565+ * the first mappable virtual address is 06666+ * (TASK_SIZE is the maximum virtual address space)6767+ */6868+6969+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)7070+#define FIRST_USER_ADDRESS 07171+7272+/* zero page used for uninitialized stuff */7373+#ifndef __ASSEMBLY__7474+extern unsigned long empty_zero_page;7575+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))7676+#endif7777+7878+/* number of bits that fit into a memory pointer */7979+#define BITS_PER_PTR (8*sizeof(unsigned long))8080+8181+/* to align the pointer to a pointer address */8282+#define PTR_MASK (~(sizeof(void*)-1))8383+8484+/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */8585+/* 64-bit machines, beware! SRB. */8686+#define SIZEOF_PTR_LOG2 28787+8888+/* to find an entry in a page-table */8989+#define PAGE_PTR(address) \9090+((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)9191+9292+/* to set the page-dir */9393+#define SET_PAGE_DIR(tsk,pgdir)9494+9595+#define pte_none(x) (!pte_val(x))9696+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)9797+#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0)9898+9999+#define pmd_none(x) (!pmd_val(x))100100+/* by removing the _PAGE_KERNEL bit from the comparision, the same pmd_bad101101+ * works for both _PAGE_TABLE and _KERNPG_TABLE pmd entries.102102+ */103103+#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_KERNEL)) != _PAGE_TABLE)104104+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)105105+#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)106106+107107+#ifndef __ASSEMBLY__108108+109109+/*110110+ * The following only work if pte_present() is true.111111+ * Undefined behaviour if not..112112+ */113113+114114+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }115115+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }116116+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }117117+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }118118+static inline int pte_special(pte_t pte) { return 0; }119119+120120+static inline pte_t pte_wrprotect(pte_t pte)121121+{122122+ pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);123123+ return pte;124124+}125125+126126+static inline pte_t pte_mkclean(pte_t pte)127127+{128128+ pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 129129+ return pte; 130130+}131131+132132+static inline pte_t pte_mkold(pte_t pte)133133+{134134+ pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);135135+ return pte;136136+}137137+138138+static inline pte_t pte_mkwrite(pte_t pte)139139+{140140+ pte_val(pte) |= _PAGE_WRITE;141141+ if (pte_val(pte) & _PAGE_MODIFIED)142142+ pte_val(pte) |= _PAGE_SILENT_WRITE;143143+ return pte;144144+}145145+146146+static inline pte_t pte_mkdirty(pte_t pte)147147+{148148+ pte_val(pte) |= _PAGE_MODIFIED;149149+ if (pte_val(pte) & _PAGE_WRITE)150150+ pte_val(pte) |= _PAGE_SILENT_WRITE;151151+ return pte;152152+}153153+154154+static inline pte_t pte_mkyoung(pte_t pte)155155+{156156+ pte_val(pte) |= _PAGE_ACCESSED;157157+ if (pte_val(pte) & _PAGE_READ)158158+ {159159+ pte_val(pte) |= _PAGE_SILENT_READ;160160+ if ((pte_val(pte) & (_PAGE_WRITE | _PAGE_MODIFIED)) ==161161+ (_PAGE_WRITE | _PAGE_MODIFIED))162162+ pte_val(pte) |= _PAGE_SILENT_WRITE;163163+ }164164+ return pte;165165+}166166+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }167167+168168+/*169169+ * Conversion functions: convert a page and protection to a page entry,170170+ * and a page entry and page directory to the page they refer to.171171+ */172172+173173+/* What actually goes as arguments to the various functions is less than174174+ * obvious, but a rule of thumb is that struct page's goes as struct page *,175175+ * really physical DRAM addresses are unsigned long's, and DRAM "virtual"176176+ * addresses (the 0xc0xxxxxx's) goes as void *'s.177177+ */178178+179179+static inline pte_t __mk_pte(void * page, pgprot_t pgprot)180180+{181181+ pte_t pte;182182+ /* the PTE needs a physical address */183183+ pte_val(pte) = __pa(page) | pgprot_val(pgprot);184184+ return pte;185185+}186186+187187+#define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))188188+189189+#define mk_pte_phys(physpage, pgprot) \190190+({ \191191+ pte_t __pte; \192192+ \193193+ pte_val(__pte) = (physpage) + pgprot_val(pgprot); \194194+ __pte; \195195+})196196+197197+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)198198+{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }199199+200200+201201+/* pte_val refers to a page in the 0x4xxxxxxx physical DRAM interval202202+ * __pte_page(pte_val) refers to the "virtual" DRAM interval203203+ * pte_pagenr refers to the page-number counted starting from the virtual DRAM start204204+ */205205+206206+static inline unsigned long __pte_page(pte_t pte)207207+{208208+ /* the PTE contains a physical address */209209+ return (unsigned long)__va(pte_val(pte) & PAGE_MASK);210210+}211211+212212+#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)213213+214214+/* permanent address of a page */215215+216216+#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))217217+#define pte_page(pte) (mem_map+pte_pagenr(pte))218218+219219+/* only the pte's themselves need to point to physical DRAM (see above)220220+ * the pagetable links are purely handled within the kernel SW and thus221221+ * don't need the __pa and __va transformations.222222+ */223223+224224+static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)225225+{ pmd_val(*pmdp) = _PAGE_TABLE | (unsigned long) ptep; }226226+227227+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))228228+#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))229229+230230+/* to find an entry in a page-table-directory. */231231+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))232232+233233+/* to find an entry in a page-table-directory */234234+static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long address)235235+{236236+ return mm->pgd + pgd_index(address);237237+}238238+239239+/* to find an entry in a kernel page-table-directory */240240+#define pgd_offset_k(address) pgd_offset(&init_mm, address)241241+242242+/* Find an entry in the third-level page table.. */243243+#define __pte_offset(address) \244244+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))245245+#define pte_offset_kernel(dir, address) \246246+ ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))247247+#define pte_offset_map(dir, address) \248248+ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))249249+#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)250250+251251+#define pte_unmap(pte) do { } while (0)252252+#define pte_unmap_nested(pte) do { } while (0)253253+#define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT)254254+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))255255+256256+#define pte_ERROR(e) \257257+ printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), pte_val(e))258258+#define pgd_ERROR(e) \259259+ printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))260260+261261+262262+extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */263263+264264+/*265265+ * CRIS doesn't have any external MMU info: the kernel page266266+ * tables contain all the necessary information.267267+ * 268268+ * Actually I am not sure on what this could be used for.269269+ */270270+static inline void update_mmu_cache(struct vm_area_struct * vma,271271+ unsigned long address, pte_t pte)272272+{273273+}274274+275275+/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */276276+/* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */277277+278278+#define __swp_type(x) (((x).val >> 5) & 0x7f)279279+#define __swp_offset(x) ((x).val >> 12)280280+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 5) | ((offset) << 12) })281281+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })282282+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })283283+284284+#define kern_addr_valid(addr) (1)285285+286286+#include <asm-generic/pgtable.h>287287+288288+/*289289+ * No page table caches to initialise290290+ */291291+#define pgtable_cache_init() do { } while (0)292292+293293+#define pte_to_pgoff(x) (pte_val(x) >> 6)294294+#define pgoff_to_pte(x) __pte(((x) << 6) | _PAGE_FILE)295295+296296+typedef pte_t *pte_addr_t;297297+298298+#endif /* __ASSEMBLY__ */299299+#endif /* _CRIS_PGTABLE_H */
+75
arch/cris/include/asm/processor.h
···11+/*22+ * include/asm-cris/processor.h33+ *44+ * Copyright (C) 2000, 2001 Axis Communications AB55+ *66+ * Authors: Bjorn Wesen Initial version77+ *88+ */99+1010+#ifndef __ASM_CRIS_PROCESSOR_H1111+#define __ASM_CRIS_PROCESSOR_H1212+1313+#include <asm/system.h>1414+#include <asm/page.h>1515+#include <asm/ptrace.h>1616+#include <arch/processor.h>1717+1818+struct task_struct;1919+2020+#define STACK_TOP TASK_SIZE2121+#define STACK_TOP_MAX STACK_TOP2222+2323+/* This decides where the kernel will search for a free chunk of vm2424+ * space during mmap's.2525+ */2626+#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))2727+2828+/* THREAD_SIZE is the size of the task_struct/kernel_stack combo.2929+ * normally, the stack is found by doing something like p + THREAD_SIZE3030+ * in CRIS, a page is 8192 bytes, which seems like a sane size3131+ */3232+3333+#define THREAD_SIZE PAGE_SIZE3434+#define KERNEL_STACK_SIZE PAGE_SIZE3535+3636+/*3737+ * At user->kernel entry, the pt_regs struct is stacked on the top of the kernel-stack.3838+ * This macro allows us to find those regs for a task.3939+ * Notice that subsequent pt_regs stackings, like recursive interrupts occurring while4040+ * we're in the kernel, won't affect this - only the first user->kernel transition4141+ * registers are reached by this.4242+ */4343+4444+#define user_regs(thread_info) (((struct pt_regs *)((unsigned long)(thread_info) + THREAD_SIZE)) - 1)4545+4646+/*4747+ * Dito but for the currently running task4848+ */4949+5050+#define task_pt_regs(task) user_regs(task_thread_info(task))5151+#define current_regs() task_pt_regs(current)5252+5353+static inline void prepare_to_copy(struct task_struct *tsk)5454+{5555+}5656+5757+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);5858+5959+unsigned long get_wchan(struct task_struct *p);6060+6161+#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)6262+6363+extern unsigned long thread_saved_pc(struct task_struct *tsk);6464+6565+/* Free all resources held by a thread. */6666+static inline void release_thread(struct task_struct *dead_task)6767+{6868+ /* Nothing needs to be done. */6969+}7070+7171+#define init_stack (init_thread_union.stack)7272+7373+#define cpu_relax() barrier()7474+7575+#endif /* __ASM_CRIS_PROCESSOR_H */
+16
arch/cris/include/asm/ptrace.h
···11+#ifndef _CRIS_PTRACE_H22+#define _CRIS_PTRACE_H33+44+#include <arch/ptrace.h>55+66+#ifdef __KERNEL__77+88+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */99+#define PTRACE_GETREGS 121010+#define PTRACE_SETREGS 131111+1212+#define profile_pc(regs) instruction_pointer(regs)1313+1414+#endif /* __KERNEL__ */1515+1616+#endif /* _CRIS_PTRACE_H */
···11+#ifndef __ASM_CRIS_SYSTEM_H22+#define __ASM_CRIS_SYSTEM_H33+44+#include <arch/system.h>55+66+/* the switch_to macro calls resume, an asm function in entry.S which does the actual77+ * task switching.88+ */99+1010+extern struct task_struct *resume(struct task_struct *prev, struct task_struct *next, int);1111+#define switch_to(prev,next,last) last = resume(prev,next, \1212+ (int)&((struct task_struct *)0)->thread)1313+1414+#define barrier() __asm__ __volatile__("": : :"memory")1515+#define mb() barrier()1616+#define rmb() mb()1717+#define wmb() mb()1818+#define read_barrier_depends() do { } while(0)1919+#define set_mb(var, value) do { var = value; mb(); } while (0)2020+2121+#ifdef CONFIG_SMP2222+#define smp_mb() mb()2323+#define smp_rmb() rmb()2424+#define smp_wmb() wmb()2525+#define smp_read_barrier_depends() read_barrier_depends()2626+#else2727+#define smp_mb() barrier()2828+#define smp_rmb() barrier()2929+#define smp_wmb() barrier()3030+#define smp_read_barrier_depends() do { } while(0)3131+#endif3232+3333+#define iret()3434+3535+/*3636+ * disable hlt during certain critical i/o operations3737+ */3838+#define HAVE_DISABLE_HLT3939+void disable_hlt(void);4040+void enable_hlt(void);4141+4242+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)4343+{4444+ /* since Etrax doesn't have any atomic xchg instructions, we need to disable4545+ irq's (if enabled) and do it with move.d's */4646+ unsigned long flags,temp;4747+ local_irq_save(flags); /* save flags, including irq enable bit and shut off irqs */4848+ switch (size) {4949+ case 1:5050+ *((unsigned char *)&temp) = x;5151+ x = *(unsigned char *)ptr;5252+ *(unsigned char *)ptr = *((unsigned char *)&temp);5353+ break;5454+ case 2:5555+ *((unsigned short *)&temp) = x;5656+ x = *(unsigned short *)ptr;5757+ *(unsigned short *)ptr = *((unsigned short *)&temp);5858+ break;5959+ case 4:6060+ temp = x;6161+ x = *(unsigned long *)ptr;6262+ *(unsigned long *)ptr = temp;6363+ break;6464+ }6565+ local_irq_restore(flags); /* restore irq enable bit */6666+ return x;6767+}6868+6969+#include <asm-generic/cmpxchg-local.h>7070+7171+/*7272+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make7373+ * them available.7474+ */7575+#define cmpxchg_local(ptr, o, n) \7676+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\7777+ (unsigned long)(n), sizeof(*(ptr))))7878+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))7979+8080+#ifndef CONFIG_SMP8181+#include <asm-generic/cmpxchg.h>8282+#endif8383+8484+#define arch_align_stack(x) (x)8585+8686+void default_idle(void);8787+8888+#endif
+106
arch/cris/include/asm/thread_info.h
···11+/* thread_info.h: CRIS low-level thread information22+ *33+ * Copyright (C) 2002 David Howells (dhowells@redhat.com)44+ * - Incorporating suggestions made by Linus Torvalds and Dave Miller55+ * 66+ * CRIS port by Axis Communications77+ */88+99+#ifndef _ASM_THREAD_INFO_H1010+#define _ASM_THREAD_INFO_H1111+1212+#ifdef __KERNEL__1313+1414+#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR1515+1616+#ifndef __ASSEMBLY__1717+#include <asm/types.h>1818+#include <asm/processor.h>1919+#include <arch/thread_info.h>2020+#include <asm/segment.h>2121+#endif2222+2323+2424+/*2525+ * low level task data that entry.S needs immediate access to2626+ * - this struct should fit entirely inside of one cache line2727+ * - this struct shares the supervisor stack pages2828+ * - if the contents of this structure are changed, the assembly constants must also be changed2929+ */3030+#ifndef __ASSEMBLY__3131+struct thread_info {3232+ struct task_struct *task; /* main task structure */3333+ struct exec_domain *exec_domain; /* execution domain */3434+ unsigned long flags; /* low level flags */3535+ __u32 cpu; /* current CPU */3636+ int preempt_count; /* 0 => preemptable, <0 => BUG */3737+ __u32 tls; /* TLS for this thread */3838+3939+ mm_segment_t addr_limit; /* thread address space:4040+ 0-0xBFFFFFFF for user-thead4141+ 0-0xFFFFFFFF for kernel-thread4242+ */4343+ struct restart_block restart_block;4444+ __u8 supervisor_stack[0];4545+};4646+4747+#endif4848+4949+#define PREEMPT_ACTIVE 0x100000005050+5151+/*5252+ * macros/functions for gaining access to the thread information structure5353+ *5454+ * preempt_count needs to be 1 initially, until the scheduler is functional.5555+ */5656+#ifndef __ASSEMBLY__5757+#define INIT_THREAD_INFO(tsk) \5858+{ \5959+ .task = &tsk, \6060+ .exec_domain = &default_exec_domain, \6161+ .flags = 0, \6262+ .cpu = 0, \6363+ .preempt_count = 1, \6464+ .addr_limit = KERNEL_DS, \6565+ .restart_block = { \6666+ .fn = do_no_restart_syscall, \6767+ }, \6868+}6969+7070+#define init_thread_info (init_thread_union.thread_info)7171+7272+/* thread information allocation */7373+#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))7474+#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)7575+7676+#endif /* !__ASSEMBLY__ */7777+7878+/*7979+ * thread information flags8080+ * - these are process state flags that various assembly files may need to access8181+ * - pending work-to-be-done flags are in LSW8282+ * - other flags in MSW8383+ */8484+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */8585+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */8686+#define TIF_SIGPENDING 2 /* signal pending */8787+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */8888+#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */8989+#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */9090+#define TIF_MEMDIE 179191+#define TIF_FREEZE 18 /* is freezing for suspend */9292+9393+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)9494+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)9595+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)9696+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)9797+#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)9898+#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)9999+#define _TIF_FREEZE (1<<TIF_FREEZE)100100+101101+#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */102102+#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */103103+104104+#endif /* __KERNEL__ */105105+106106+#endif /* _ASM_THREAD_INFO_H */
+24
arch/cris/include/asm/timex.h
···11+/*22+ * linux/include/asm-cris/timex.h33+ *44+ * CRIS architecture timex specifications55+ */66+77+#ifndef _ASM_CRIS_TIMEX_H88+#define _ASM_CRIS_TIMEX_H99+1010+#include <arch/timex.h>1111+1212+/*1313+ * We don't have a cycle-counter.. but we do not support SMP anyway where this is1414+ * used so it does not matter.1515+ */1616+1717+typedef unsigned long long cycles_t;1818+1919+static inline cycles_t get_cycles(void)2020+{2121+ return 0;2222+}2323+2424+#endif
+19
arch/cris/include/asm/tlb.h
···11+#ifndef _CRIS_TLB_H22+#define _CRIS_TLB_H33+44+#include <linux/pagemap.h>55+66+#include <arch/tlb.h>77+88+/*99+ * cris doesn't need any special per-pte or1010+ * per-vma handling..1111+ */1212+#define tlb_start_vma(tlb, vma) do { } while (0)1313+#define tlb_end_vma(tlb, vma) do { } while (0)1414+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)1515+1616+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)1717+#include <asm-generic/tlb.h>1818+1919+#endif
+404
arch/cris/include/asm/uaccess.h
···11+/* 22+ * Authors: Bjorn Wesen (bjornw@axis.com)33+ * Hans-Peter Nilsson (hp@axis.com)44+ */55+66+/* Asm:s have been tweaked (within the domain of correctness) to give77+ satisfactory results for "gcc version 2.96 20000427 (experimental)".88+99+ Check regularly...1010+1111+ Register $r9 is chosen for temporaries, being a call-clobbered register1212+ first in line to be used (notably for local blocks), not colliding with1313+ parameter registers. */1414+1515+#ifndef _CRIS_UACCESS_H1616+#define _CRIS_UACCESS_H1717+1818+#ifndef __ASSEMBLY__1919+#include <linux/sched.h>2020+#include <linux/errno.h>2121+#include <asm/processor.h>2222+#include <asm/page.h>2323+2424+#define VERIFY_READ 02525+#define VERIFY_WRITE 12626+2727+/*2828+ * The fs value determines whether argument validity checking should be2929+ * performed or not. If get_fs() == USER_DS, checking is performed, with3030+ * get_fs() == KERNEL_DS, checking is bypassed.3131+ *3232+ * For historical reasons, these macros are grossly misnamed.3333+ */3434+3535+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })3636+3737+/* addr_limit is the maximum accessible address for the task. we misuse3838+ * the KERNEL_DS and USER_DS values to both assign and compare the 3939+ * addr_limit values through the equally misnamed get/set_fs macros.4040+ * (see above)4141+ */4242+4343+#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)4444+#define USER_DS MAKE_MM_SEG(TASK_SIZE)4545+4646+#define get_ds() (KERNEL_DS)4747+#define get_fs() (current_thread_info()->addr_limit)4848+#define set_fs(x) (current_thread_info()->addr_limit = (x))4949+5050+#define segment_eq(a,b) ((a).seg == (b).seg)5151+5252+#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))5353+#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))5454+#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))5555+#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))5656+5757+#include <arch/uaccess.h>5858+5959+/*6060+ * The exception table consists of pairs of addresses: the first is the6161+ * address of an instruction that is allowed to fault, and the second is6262+ * the address at which the program should continue. No registers are6363+ * modified, so it is entirely up to the continuation code to figure out6464+ * what to do.6565+ *6666+ * All the routines below use bits of fixup code that are out of line6767+ * with the main instruction path. This means when everything is well,6868+ * we don't even have to jump over them. Further, they do not intrude6969+ * on our cache or tlb entries.7070+ */7171+7272+struct exception_table_entry7373+{7474+ unsigned long insn, fixup;7575+};7676+7777+/*7878+ * These are the main single-value transfer routines. They automatically7979+ * use the right size if we just have the right pointer type.8080+ *8181+ * This gets kind of ugly. We want to return _two_ values in "get_user()"8282+ * and yet we don't want to do any pointers, because that is too much8383+ * of a performance impact. Thus we have a few rather ugly macros here,8484+ * and hide all the ugliness from the user.8585+ *8686+ * The "__xxx" versions of the user access functions are versions that8787+ * do not verify the address space, that must have been done previously8888+ * with a separate "access_ok()" call (this is used when we do multiple8989+ * accesses to the same area of user memory).9090+ *9191+ * As we use the same address space for kernel and user data on9292+ * CRIS, we can just do these as direct assignments. (Of course, the9393+ * exception handling means that it's no longer "just"...)9494+ */9595+#define get_user(x,ptr) \9696+ __get_user_check((x),(ptr),sizeof(*(ptr)))9797+#define put_user(x,ptr) \9898+ __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))9999+100100+#define __get_user(x,ptr) \101101+ __get_user_nocheck((x),(ptr),sizeof(*(ptr)))102102+#define __put_user(x,ptr) \103103+ __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))104104+105105+extern long __put_user_bad(void);106106+107107+#define __put_user_size(x,ptr,size,retval) \108108+do { \109109+ retval = 0; \110110+ switch (size) { \111111+ case 1: __put_user_asm(x,ptr,retval,"move.b"); break; \112112+ case 2: __put_user_asm(x,ptr,retval,"move.w"); break; \113113+ case 4: __put_user_asm(x,ptr,retval,"move.d"); break; \114114+ case 8: __put_user_asm_64(x,ptr,retval); break; \115115+ default: __put_user_bad(); \116116+ } \117117+} while (0)118118+119119+#define __get_user_size(x,ptr,size,retval) \120120+do { \121121+ retval = 0; \122122+ switch (size) { \123123+ case 1: __get_user_asm(x,ptr,retval,"move.b"); break; \124124+ case 2: __get_user_asm(x,ptr,retval,"move.w"); break; \125125+ case 4: __get_user_asm(x,ptr,retval,"move.d"); break; \126126+ case 8: __get_user_asm_64(x,ptr,retval); break; \127127+ default: (x) = __get_user_bad(); \128128+ } \129129+} while (0)130130+131131+#define __put_user_nocheck(x,ptr,size) \132132+({ \133133+ long __pu_err; \134134+ __put_user_size((x),(ptr),(size),__pu_err); \135135+ __pu_err; \136136+})137137+138138+#define __put_user_check(x,ptr,size) \139139+({ \140140+ long __pu_err = -EFAULT; \141141+ __typeof__(*(ptr)) *__pu_addr = (ptr); \142142+ if (access_ok(VERIFY_WRITE,__pu_addr,size)) \143143+ __put_user_size((x),__pu_addr,(size),__pu_err); \144144+ __pu_err; \145145+})146146+147147+struct __large_struct { unsigned long buf[100]; };148148+#define __m(x) (*(struct __large_struct *)(x))149149+150150+151151+152152+#define __get_user_nocheck(x,ptr,size) \153153+({ \154154+ long __gu_err, __gu_val; \155155+ __get_user_size(__gu_val,(ptr),(size),__gu_err); \156156+ (x) = (__typeof__(*(ptr)))__gu_val; \157157+ __gu_err; \158158+})159159+160160+#define __get_user_check(x,ptr,size) \161161+({ \162162+ long __gu_err = -EFAULT, __gu_val = 0; \163163+ const __typeof__(*(ptr)) *__gu_addr = (ptr); \164164+ if (access_ok(VERIFY_READ,__gu_addr,size)) \165165+ __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \166166+ (x) = (__typeof__(*(ptr)))__gu_val; \167167+ __gu_err; \168168+})169169+170170+extern long __get_user_bad(void);171171+172172+/* More complex functions. Most are inline, but some call functions that173173+ live in lib/usercopy.c */174174+175175+extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n);176176+extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);177177+extern unsigned long __do_clear_user(void __user *to, unsigned long n);178178+179179+static inline unsigned long180180+__generic_copy_to_user(void __user *to, const void *from, unsigned long n)181181+{182182+ if (access_ok(VERIFY_WRITE, to, n))183183+ return __copy_user(to,from,n);184184+ return n;185185+}186186+187187+static inline unsigned long188188+__generic_copy_from_user(void *to, const void __user *from, unsigned long n)189189+{190190+ if (access_ok(VERIFY_READ, from, n))191191+ return __copy_user_zeroing(to,from,n);192192+ return n;193193+}194194+195195+static inline unsigned long196196+__generic_clear_user(void __user *to, unsigned long n)197197+{198198+ if (access_ok(VERIFY_WRITE, to, n))199199+ return __do_clear_user(to,n);200200+ return n;201201+}202202+203203+static inline long204204+__strncpy_from_user(char *dst, const char __user *src, long count)205205+{206206+ return __do_strncpy_from_user(dst, src, count);207207+}208208+209209+static inline long210210+strncpy_from_user(char *dst, const char __user *src, long count)211211+{212212+ long res = -EFAULT;213213+ if (access_ok(VERIFY_READ, src, 1))214214+ res = __do_strncpy_from_user(dst, src, count);215215+ return res;216216+}217217+218218+219219+/* Note that these expand awfully if made into switch constructs, so220220+ don't do that. */221221+222222+static inline unsigned long223223+__constant_copy_from_user(void *to, const void __user *from, unsigned long n)224224+{225225+ unsigned long ret = 0;226226+ if (n == 0)227227+ ;228228+ else if (n == 1)229229+ __asm_copy_from_user_1(to, from, ret);230230+ else if (n == 2)231231+ __asm_copy_from_user_2(to, from, ret);232232+ else if (n == 3)233233+ __asm_copy_from_user_3(to, from, ret);234234+ else if (n == 4)235235+ __asm_copy_from_user_4(to, from, ret);236236+ else if (n == 5)237237+ __asm_copy_from_user_5(to, from, ret);238238+ else if (n == 6)239239+ __asm_copy_from_user_6(to, from, ret);240240+ else if (n == 7)241241+ __asm_copy_from_user_7(to, from, ret);242242+ else if (n == 8)243243+ __asm_copy_from_user_8(to, from, ret);244244+ else if (n == 9)245245+ __asm_copy_from_user_9(to, from, ret);246246+ else if (n == 10)247247+ __asm_copy_from_user_10(to, from, ret);248248+ else if (n == 11)249249+ __asm_copy_from_user_11(to, from, ret);250250+ else if (n == 12)251251+ __asm_copy_from_user_12(to, from, ret);252252+ else if (n == 13)253253+ __asm_copy_from_user_13(to, from, ret);254254+ else if (n == 14)255255+ __asm_copy_from_user_14(to, from, ret);256256+ else if (n == 15)257257+ __asm_copy_from_user_15(to, from, ret);258258+ else if (n == 16)259259+ __asm_copy_from_user_16(to, from, ret);260260+ else if (n == 20)261261+ __asm_copy_from_user_20(to, from, ret);262262+ else if (n == 24)263263+ __asm_copy_from_user_24(to, from, ret);264264+ else265265+ ret = __generic_copy_from_user(to, from, n);266266+267267+ return ret;268268+}269269+270270+/* Ditto, don't make a switch out of this. */271271+272272+static inline unsigned long273273+__constant_copy_to_user(void __user *to, const void *from, unsigned long n)274274+{275275+ unsigned long ret = 0;276276+ if (n == 0)277277+ ;278278+ else if (n == 1)279279+ __asm_copy_to_user_1(to, from, ret);280280+ else if (n == 2)281281+ __asm_copy_to_user_2(to, from, ret);282282+ else if (n == 3)283283+ __asm_copy_to_user_3(to, from, ret);284284+ else if (n == 4)285285+ __asm_copy_to_user_4(to, from, ret);286286+ else if (n == 5)287287+ __asm_copy_to_user_5(to, from, ret);288288+ else if (n == 6)289289+ __asm_copy_to_user_6(to, from, ret);290290+ else if (n == 7)291291+ __asm_copy_to_user_7(to, from, ret);292292+ else if (n == 8)293293+ __asm_copy_to_user_8(to, from, ret);294294+ else if (n == 9)295295+ __asm_copy_to_user_9(to, from, ret);296296+ else if (n == 10)297297+ __asm_copy_to_user_10(to, from, ret);298298+ else if (n == 11)299299+ __asm_copy_to_user_11(to, from, ret);300300+ else if (n == 12)301301+ __asm_copy_to_user_12(to, from, ret);302302+ else if (n == 13)303303+ __asm_copy_to_user_13(to, from, ret);304304+ else if (n == 14)305305+ __asm_copy_to_user_14(to, from, ret);306306+ else if (n == 15)307307+ __asm_copy_to_user_15(to, from, ret);308308+ else if (n == 16)309309+ __asm_copy_to_user_16(to, from, ret);310310+ else if (n == 20)311311+ __asm_copy_to_user_20(to, from, ret);312312+ else if (n == 24)313313+ __asm_copy_to_user_24(to, from, ret);314314+ else315315+ ret = __generic_copy_to_user(to, from, n);316316+317317+ return ret;318318+}319319+320320+/* No switch, please. */321321+322322+static inline unsigned long323323+__constant_clear_user(void __user *to, unsigned long n)324324+{325325+ unsigned long ret = 0;326326+ if (n == 0)327327+ ;328328+ else if (n == 1)329329+ __asm_clear_1(to, ret);330330+ else if (n == 2)331331+ __asm_clear_2(to, ret);332332+ else if (n == 3)333333+ __asm_clear_3(to, ret);334334+ else if (n == 4)335335+ __asm_clear_4(to, ret);336336+ else if (n == 8)337337+ __asm_clear_8(to, ret);338338+ else if (n == 12)339339+ __asm_clear_12(to, ret);340340+ else if (n == 16)341341+ __asm_clear_16(to, ret);342342+ else if (n == 20)343343+ __asm_clear_20(to, ret);344344+ else if (n == 24)345345+ __asm_clear_24(to, ret);346346+ else347347+ ret = __generic_clear_user(to, n);348348+349349+ return ret;350350+}351351+352352+353353+#define clear_user(to, n) \354354+(__builtin_constant_p(n) ? \355355+ __constant_clear_user(to, n) : \356356+ __generic_clear_user(to, n))357357+358358+#define copy_from_user(to, from, n) \359359+(__builtin_constant_p(n) ? \360360+ __constant_copy_from_user(to, from, n) : \361361+ __generic_copy_from_user(to, from, n))362362+363363+#define copy_to_user(to, from, n) \364364+(__builtin_constant_p(n) ? \365365+ __constant_copy_to_user(to, from, n) : \366366+ __generic_copy_to_user(to, from, n))367367+368368+/* We let the __ versions of copy_from/to_user inline, because they're often369369+ * used in fast paths and have only a small space overhead.370370+ */371371+372372+static inline unsigned long373373+__generic_copy_from_user_nocheck(void *to, const void __user *from,374374+ unsigned long n)375375+{376376+ return __copy_user_zeroing(to,from,n);377377+}378378+379379+static inline unsigned long380380+__generic_copy_to_user_nocheck(void __user *to, const void *from,381381+ unsigned long n)382382+{383383+ return __copy_user(to,from,n);384384+}385385+386386+static inline unsigned long387387+__generic_clear_user_nocheck(void __user *to, unsigned long n)388388+{389389+ return __do_clear_user(to,n);390390+}391391+392392+/* without checking */393393+394394+#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n))395395+#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))396396+#define __copy_to_user_inatomic __copy_to_user397397+#define __copy_from_user_inatomic __copy_from_user398398+#define __clear_user(to,n) __generic_clear_user_nocheck((to),(n))399399+400400+#define strlen_user(str) strnlen_user((str), 0x7ffffffe)401401+402402+#endif /* __ASSEMBLY__ */403403+404404+#endif /* _CRIS_UACCESS_H */
···11+#ifndef __ASM_CRIS_USER_H22+#define __ASM_CRIS_USER_H33+44+#include <linux/types.h>55+#include <asm/ptrace.h>66+#include <asm/page.h>77+#include <arch/user.h>88+99+/*1010+ * Core file format: The core file is written in such a way that gdb1111+ * can understand it and provide useful information to the user (under1212+ * linux we use the `trad-core' bfd). The file contents are as follows:1313+ *1414+ * upage: 1 page consisting of a user struct that tells gdb1515+ * what is present in the file. Directly after this is a1616+ * copy of the task_struct, which is currently not used by gdb,1717+ * but it may come in handy at some point. All of the registers1818+ * are stored as part of the upage. The upage should always be1919+ * only one page long.2020+ * data: The data segment follows next. We use current->end_text to2121+ * current->brk to pick up all of the user variables, plus any memory2222+ * that may have been sbrk'ed. No attempt is made to determine if a2323+ * page is demand-zero or if a page is totally unused, we just cover2424+ * the entire range. All of the addresses are rounded in such a way2525+ * that an integral number of pages is written.2626+ * stack: We need the stack information in order to get a meaningful2727+ * backtrace. We need to write the data from usp to2828+ * current->start_stack, so we round each of these in order to be able2929+ * to write an integer number of pages.3030+ */3131+3232+struct user {3333+ struct user_regs_struct regs; /* entire machine state */3434+ size_t u_tsize; /* text size (pages) */3535+ size_t u_dsize; /* data size (pages) */3636+ size_t u_ssize; /* stack size (pages) */3737+ unsigned long start_code; /* text starting address */3838+ unsigned long start_data; /* data starting address */3939+ unsigned long start_stack; /* stack starting address */4040+ long int signal; /* signal causing core dump */4141+ unsigned long u_ar0; /* help gdb find registers */4242+ unsigned long magic; /* identifies a core file */4343+ char u_comm[32]; /* user command name */4444+};4545+4646+#define NBPG PAGE_SIZE4747+#define UPAGES 14848+#define HOST_TEXT_START_ADDR (u.start_code)4949+#define HOST_DATA_START_ADDR (u.start_data)5050+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)5151+5252+#endif /* __ASM_CRIS_USER_H */
···3434#include <asm/system.h>3535#include <linux/delay.h>36363737-#include <asm/arch/svinto.h>3737+#include <arch/svinto.h>38383939/* non-arch dependent serial structures are in linux/serial.h */4040#include <linux/serial.h>4141/* while we keep our own stuff (struct e100_serial) in a local .h file */4242#include "crisv10.h"4343#include <asm/fasttimer.h>4444-#include <asm/arch/io_interface_mux.h>4444+#include <arch/io_interface_mux.h>45454646#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER4747#ifndef CONFIG_ETRAX_FAST_TIMER
+1-1
drivers/serial/crisv10.h
···1010#include <linux/circ_buf.h>1111#include <asm/termios.h>1212#include <asm/dma.h>1313-#include <asm/arch/io_interface_mux.h>1313+#include <arch/io_interface_mux.h>14141515/* Software state per channel */1616
···11-/*22- * Interrupt handling assembler and defines for Linux/CRISv1033- */44-55-#ifndef _ASM_ARCH_IRQ_H66-#define _ASM_ARCH_IRQ_H77-88-#include <asm/arch/sv_addr_ag.h>99-1010-#define NR_IRQS 321111-1212-/* The first vector number used for IRQs in v10 is really 0x20 */1313-/* but all the code and constants are offseted to make 0 the first */1414-#define FIRST_IRQ 01515-1616-#define SOME_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, some) /* 0 ? */1717-#define NMI_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, nmi) /* 1 */1818-#define TIMER0_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, timer0) /* 2 */1919-#define TIMER1_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, timer1) /* 3 */2020-/* mio, ata, par0, scsi0 on 4 */2121-/* par1, scsi1 on 5 */2222-#define NETWORK_STATUS_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, network) /* 6 */2323-2424-#define SERIAL_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, serial) /* 8 */2525-#define PA_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, pa) /* 11 */2626-/* extdma0 and extdma1 is at irq 12 and 13 and/or same as dma5 and dma6 ? */2727-#define EXTDMA0_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, ext_dma0)2828-#define EXTDMA1_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, ext_dma1)2929-3030-/* dma0-9 is irq 16..25 */3131-/* 16,17: network */3232-#define DMA0_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma0)3333-#define DMA1_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma1)3434-#define NETWORK_DMA_TX_IRQ_NBR DMA0_TX_IRQ_NBR3535-#define NETWORK_DMA_RX_IRQ_NBR DMA1_RX_IRQ_NBR3636-3737-/* 18,19: dma2 and dma3 shared by par0, scsi0, ser2 and ata */3838-#define DMA2_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma2)3939-#define DMA3_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma3)4040-#define SER2_DMA_TX_IRQ_NBR DMA2_TX_IRQ_NBR4141-#define SER2_DMA_RX_IRQ_NBR DMA3_RX_IRQ_NBR4242-4343-/* 20,21: dma4 and dma5 shared by par1, scsi1, ser3 and extdma0 */4444-#define DMA4_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma4)4545-#define DMA5_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma5)4646-#define SER3_DMA_TX_IRQ_NBR DMA4_TX_IRQ_NBR4747-#define SER3_DMA_RX_IRQ_NBR DMA5_RX_IRQ_NBR4848-4949-/* 22,23: dma6 and dma7 shared by ser0, extdma1 and mem2mem */5050-#define DMA6_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma6)5151-#define DMA7_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma7)5252-#define SER0_DMA_TX_IRQ_NBR DMA6_TX_IRQ_NBR5353-#define SER0_DMA_RX_IRQ_NBR DMA7_RX_IRQ_NBR5454-#define MEM2MEM_DMA_TX_IRQ_NBR DMA6_TX_IRQ_NBR5555-#define MEM2MEM_DMA_RX_IRQ_NBR DMA7_RX_IRQ_NBR5656-5757-/* 24,25: dma8 and dma9 shared by ser1 and usb */5858-#define DMA8_TX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma8)5959-#define DMA9_RX_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, dma9)6060-#define SER1_DMA_TX_IRQ_NBR DMA8_TX_IRQ_NBR6161-#define SER1_DMA_RX_IRQ_NBR DMA9_RX_IRQ_NBR6262-#define USB_DMA_TX_IRQ_NBR DMA8_TX_IRQ_NBR6363-#define USB_DMA_RX_IRQ_NBR DMA9_RX_IRQ_NBR6464-6565-/* usb: controller at irq 31 + uses DMA8 and DMA9 */6666-#define USB_HC_IRQ_NBR IO_BITNR(R_VECT_MASK_RD, usb)6767-6868-/* our fine, global, etrax irq vector! the pointer lives in the head.S file. */6969-7070-typedef void (*irqvectptr)(void);7171-7272-struct etrax_interrupt_vector {7373- irqvectptr v[256];7474-};7575-7676-extern struct etrax_interrupt_vector *etrax_irv;7777-void set_int_vector(int n, irqvectptr addr);7878-void set_break_vector(int n, irqvectptr addr);7979-8080-#define __STR(x) #x8181-#define STR(x) __STR(x)8282-8383-/* SAVE_ALL saves registers so they match pt_regs */8484-8585-#define SAVE_ALL \8686- "move $irp,[$sp=$sp-16]\n\t" /* push instruction pointer and fake SBFS struct */ \8787- "push $srp\n\t" /* push subroutine return pointer */ \8888- "push $dccr\n\t" /* push condition codes */ \8989- "push $mof\n\t" /* push multiply overflow reg */ \9090- "di\n\t" /* need to disable irq's at this point */\9191- "subq 14*4,$sp\n\t" /* make room for r0-r13 */ \9292- "movem $r13,[$sp]\n\t" /* push the r0-r13 registers */ \9393- "push $r10\n\t" /* push orig_r10 */ \9494- "clear.d [$sp=$sp-4]\n\t" /* frametype - this is a normal stackframe */9595-9696- /* BLOCK_IRQ and UNBLOCK_IRQ do the same as mask_irq and unmask_irq */9797-9898-#define BLOCK_IRQ(mask,nr) \9999- "move.d " #mask ",$r0\n\t" \100100- "move.d $r0,[0xb00000d8]\n\t" 101101-102102-#define UNBLOCK_IRQ(mask) \103103- "move.d " #mask ",$r0\n\t" \104104- "move.d $r0,[0xb00000dc]\n\t" 105105-106106-#define IRQ_NAME2(nr) nr##_interrupt(void)107107-#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)108108-#define sIRQ_NAME(nr) IRQ_NAME2(sIRQ##nr)109109-#define BAD_IRQ_NAME(nr) IRQ_NAME2(bad_IRQ##nr)110110-111111- /* the asm IRQ handler makes sure the causing IRQ is blocked, then it calls112112- * do_IRQ (with irq disabled still). after that it unblocks and jumps to113113- * ret_from_intr (entry.S)114114- *115115- * The reason the IRQ is blocked is to allow an sti() before the handler which116116- * will acknowledge the interrupt is run.117117- */118118-119119-#define BUILD_IRQ(nr,mask) \120120-void IRQ_NAME(nr); \121121-__asm__ ( \122122- ".text\n\t" \123123- "IRQ" #nr "_interrupt:\n\t" \124124- SAVE_ALL \125125- BLOCK_IRQ(mask,nr) /* this must be done to prevent irq loops when we ei later */ \126126- "moveq "#nr",$r10\n\t" \127127- "move.d $sp,$r11\n\t" \128128- "jsr do_IRQ\n\t" /* irq.c, r10 and r11 are arguments */ \129129- UNBLOCK_IRQ(mask) \130130- "moveq 0,$r9\n\t" /* make ret_from_intr realise we came from an irq */ \131131- "jump ret_from_intr\n\t");132132-133133-/* This is subtle. The timer interrupt is crucial and it should not be disabled for 134134- * too long. However, if it had been a normal interrupt as per BUILD_IRQ, it would135135- * have been BLOCK'ed, and then softirq's are run before we return here to UNBLOCK.136136- * If the softirq's take too much time to run, the timer irq won't run and the 137137- * watchdog will kill us.138138- *139139- * Furthermore, if a lot of other irq's occur before we return here, the multiple_irq140140- * handler is run and it prioritizes the timer interrupt. However if we had BLOCK'ed141141- * it here, we would not get the multiple_irq at all.142142- *143143- * The non-blocking here is based on the knowledge that the timer interrupt is 144144- * registred as a fast interrupt (IRQF_DISABLED) so that we _know_ there will not145145- * be an sti() before the timer irq handler is run to acknowledge the interrupt.146146- */147147-148148-#define BUILD_TIMER_IRQ(nr,mask) \149149-void IRQ_NAME(nr); \150150-__asm__ ( \151151- ".text\n\t" \152152- "IRQ" #nr "_interrupt:\n\t" \153153- SAVE_ALL \154154- "moveq "#nr",$r10\n\t" \155155- "move.d $sp,$r11\n\t" \156156- "jsr do_IRQ\n\t" /* irq.c, r10 and r11 are arguments */ \157157- "moveq 0,$r9\n\t" /* make ret_from_intr realise we came from an irq */ \158158- "jump ret_from_intr\n\t");159159-160160-#endif
···11-/* asm/bitops.h for Linux/CRIS22- *33- * TODO: asm versions if speed is needed44- *55- * All bit operations return 0 if the bit was cleared before the66- * operation and != 0 if it was not.77- *88- * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).99- */1010-1111-#ifndef _CRIS_BITOPS_H1212-#define _CRIS_BITOPS_H1313-1414-/* Currently this is unsuitable for consumption outside the kernel. */1515-#ifdef __KERNEL__ 1616-1717-#ifndef _LINUX_BITOPS_H1818-#error only <linux/bitops.h> can be included directly1919-#endif2020-2121-#include <asm/arch/bitops.h>2222-#include <asm/system.h>2323-#include <asm/atomic.h>2424-#include <linux/compiler.h>2525-2626-/*2727- * set_bit - Atomically set a bit in memory2828- * @nr: the bit to set2929- * @addr: the address to start counting from3030- *3131- * This function is atomic and may not be reordered. See __set_bit()3232- * if you do not require the atomic guarantees.3333- * Note that @nr may be almost arbitrarily large; this function is not3434- * restricted to acting on a single-word quantity.3535- */3636-3737-#define set_bit(nr, addr) (void)test_and_set_bit(nr, addr)3838-3939-/*4040- * clear_bit - Clears a bit in memory4141- * @nr: Bit to clear4242- * @addr: Address to start counting from4343- *4444- * clear_bit() is atomic and may not be reordered. However, it does4545- * not contain a memory barrier, so if it is used for locking purposes,4646- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()4747- * in order to ensure changes are visible on other processors.4848- */4949-5050-#define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr)5151-5252-/*5353- * change_bit - Toggle a bit in memory5454- * @nr: Bit to change5555- * @addr: Address to start counting from5656- *5757- * change_bit() is atomic and may not be reordered.5858- * Note that @nr may be almost arbitrarily large; this function is not5959- * restricted to acting on a single-word quantity.6060- */6161-6262-#define change_bit(nr, addr) (void)test_and_change_bit(nr, addr)6363-6464-/**6565- * test_and_set_bit - Set a bit and return its old value6666- * @nr: Bit to set6767- * @addr: Address to count from6868- *6969- * This operation is atomic and cannot be reordered. 7070- * It also implies a memory barrier.7171- */7272-7373-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)7474-{7575- unsigned int mask, retval;7676- unsigned long flags;7777- unsigned int *adr = (unsigned int *)addr;7878-7979- adr += nr >> 5;8080- mask = 1 << (nr & 0x1f);8181- cris_atomic_save(addr, flags);8282- retval = (mask & *adr) != 0;8383- *adr |= mask;8484- cris_atomic_restore(addr, flags);8585- return retval;8686-}8787-8888-/*8989- * clear_bit() doesn't provide any barrier for the compiler.9090- */9191-#define smp_mb__before_clear_bit() barrier()9292-#define smp_mb__after_clear_bit() barrier()9393-9494-/**9595- * test_and_clear_bit - Clear a bit and return its old value9696- * @nr: Bit to clear9797- * @addr: Address to count from9898- *9999- * This operation is atomic and cannot be reordered. 100100- * It also implies a memory barrier.101101- */102102-103103-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)104104-{105105- unsigned int mask, retval;106106- unsigned long flags;107107- unsigned int *adr = (unsigned int *)addr;108108-109109- adr += nr >> 5;110110- mask = 1 << (nr & 0x1f);111111- cris_atomic_save(addr, flags);112112- retval = (mask & *adr) != 0;113113- *adr &= ~mask;114114- cris_atomic_restore(addr, flags);115115- return retval;116116-}117117-118118-/**119119- * test_and_change_bit - Change a bit and return its old value120120- * @nr: Bit to change121121- * @addr: Address to count from122122- *123123- * This operation is atomic and cannot be reordered. 124124- * It also implies a memory barrier.125125- */126126-127127-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)128128-{129129- unsigned int mask, retval;130130- unsigned long flags;131131- unsigned int *adr = (unsigned int *)addr;132132- adr += nr >> 5;133133- mask = 1 << (nr & 0x1f);134134- cris_atomic_save(addr, flags);135135- retval = (mask & *adr) != 0;136136- *adr ^= mask;137137- cris_atomic_restore(addr, flags);138138- return retval;139139-}140140-141141-#include <asm-generic/bitops/non-atomic.h>142142-143143-/*144144- * Since we define it "external", it collides with the built-in145145- * definition, which doesn't have the same semantics. We don't want to146146- * use -fno-builtin, so just hide the name ffs.147147- */148148-#define ffs kernel_ffs149149-150150-#include <asm-generic/bitops/fls.h>151151-#include <asm-generic/bitops/fls64.h>152152-#include <asm-generic/bitops/hweight.h>153153-#include <asm-generic/bitops/find.h>154154-#include <asm-generic/bitops/lock.h>155155-156156-#include <asm-generic/bitops/ext2-non-atomic.h>157157-158158-#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)159159-#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)160160-161161-#include <asm-generic/bitops/minix.h>162162-#include <asm-generic/bitops/sched.h>163163-164164-#endif /* __KERNEL__ */165165-166166-#endif /* _CRIS_BITOPS_H */
···11-#ifndef _CRIS_BYTEORDER_H22-#define _CRIS_BYTEORDER_H33-44-#ifdef __GNUC__55-66-#ifdef __KERNEL__77-#include <asm/arch/byteorder.h>88-99-/* defines are necessary because the other files detect the presence1010- * of a defined __arch_swab32, not an inline1111- */1212-#define __arch__swab32(x) ___arch__swab32(x)1313-#define __arch__swab16(x) ___arch__swab16(x)1414-#endif /* __KERNEL__ */1515-1616-#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)1717-# define __BYTEORDER_HAS_U64__1818-# define __SWAB_64_THRU_32__1919-#endif2020-2121-#endif /* __GNUC__ */2222-2323-#include <linux/byteorder/little_endian.h>2424-2525-#endif2626-2727-
···11-/* TODO: csum_tcpudp_magic could be speeded up, and csum_fold as well */22-33-#ifndef _CRIS_CHECKSUM_H44-#define _CRIS_CHECKSUM_H55-66-#include <asm/arch/checksum.h>77-88-/*99- * computes the checksum of a memory block at buff, length len,1010- * and adds in "sum" (32-bit)1111- *1212- * returns a 32-bit number suitable for feeding into itself1313- * or csum_tcpudp_magic1414- *1515- * this function must be called with even lengths, except1616- * for the last fragment, which may be odd1717- *1818- * it's best to have buff aligned on a 32-bit boundary1919- */2020-__wsum csum_partial(const void *buff, int len, __wsum sum);2121-2222-/*2323- * the same as csum_partial, but copies from src while it2424- * checksums2525- *2626- * here even more important to align src and dst on a 32-bit (or even2727- * better 64-bit) boundary2828- */2929-3030-__wsum csum_partial_copy_nocheck(const void *src, void *dst,3131- int len, __wsum sum);3232-3333-/*3434- * Fold a partial checksum into a word3535- */3636-3737-static inline __sum16 csum_fold(__wsum csum)3838-{3939- u32 sum = (__force u32)csum;4040- sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */4141- sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */4242- return (__force __sum16)~sum;4343-}4444-4545-extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,4646- int len, __wsum sum,4747- int *errptr);4848-4949-/*5050- * This is a version of ip_compute_csum() optimized for IP headers,5151- * which always checksum on 4 octet boundaries.5252- *5353- */5454-5555-static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)5656-{5757- return csum_fold(csum_partial(iph, ihl * 4, 0));5858-}5959-6060-/*6161- * computes the checksum of the TCP/UDP pseudo-header6262- * returns a 16-bit checksum, already complemented6363- */6464-6565-static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,6666- unsigned short len,6767- unsigned short proto,6868- __wsum sum)6969-{7070- return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));7171-}7272-7373-/*7474- * this routine is used for miscellaneous IP-like checksums, mainly7575- * in icmp.c7676- */7777-7878-static inline __sum16 ip_compute_csum(const void *buff, int len)7979-{8080- return csum_fold (csum_partial(buff, len, 0));8181-}8282-8383-#endif
···11-#ifndef _CRIS_DELAY_H22-#define _CRIS_DELAY_H33-44-/*55- * Copyright (C) 1998-2002 Axis Communications AB66- *77- * Delay routines, using a pre-computed "loops_per_second" value.88- */99-1010-#include <asm/arch/delay.h>1111-1212-/* Use only for very small delays ( < 1 msec). */1313-1414-extern unsigned long loops_per_usec; /* arch/cris/mm/init.c */1515-1616-/* May be defined by arch/delay.h. */1717-#ifndef udelay1818-static inline void udelay(unsigned long usecs)1919-{2020- __delay(usecs * loops_per_usec);2121-}2222-#endif2323-2424-#endif /* defined(_CRIS_DELAY_H) */2525-2626-2727-
···11-/* $Id: dma.h,v 1.2 2001/05/09 12:17:42 johana Exp $ */22-33-#ifndef _ASM_DMA_H44-#define _ASM_DMA_H55-66-#include <asm/arch/dma.h>77-88-/* it's useless on the Etrax, but unfortunately needed by the new99- bootmem allocator (but this should do it for this) */1010-1111-#define MAX_DMA_ADDRESS PAGE_OFFSET1212-1313-/* From PCI */1414-1515-#ifdef CONFIG_PCI1616-extern int isa_dma_bridge_buggy;1717-#else1818-#define isa_dma_bridge_buggy (0)1919-#endif2020-2121-#endif /* _ASM_DMA_H */
-93
include/asm-cris/elf.h
···11-#ifndef __ASMCRIS_ELF_H22-#define __ASMCRIS_ELF_H33-44-/*55- * ELF register definitions..66- */77-88-#include <asm/user.h>99-1010-#define R_CRIS_NONE 01111-#define R_CRIS_8 11212-#define R_CRIS_16 21313-#define R_CRIS_32 31414-#define R_CRIS_8_PCREL 41515-#define R_CRIS_16_PCREL 51616-#define R_CRIS_32_PCREL 61717-#define R_CRIS_GNU_VTINHERIT 71818-#define R_CRIS_GNU_VTENTRY 81919-#define R_CRIS_COPY 92020-#define R_CRIS_GLOB_DAT 102121-#define R_CRIS_JUMP_SLOT 112222-#define R_CRIS_RELATIVE 122323-#define R_CRIS_16_GOT 132424-#define R_CRIS_32_GOT 142525-#define R_CRIS_16_GOTPLT 152626-#define R_CRIS_32_GOTPLT 162727-#define R_CRIS_32_GOTREL 172828-#define R_CRIS_32_PLT_GOTREL 182929-#define R_CRIS_32_PLT_PCREL 193030-3131-typedef unsigned long elf_greg_t;3232-3333-/* Note that NGREG is defined to ELF_NGREG in include/linux/elfcore.h, and is3434- thus exposed to user-space. */3535-#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))3636-typedef elf_greg_t elf_gregset_t[ELF_NGREG];3737-3838-/* A placeholder; CRIS does not have any fp regs. */3939-typedef unsigned long elf_fpregset_t;4040-4141-/*4242- * These are used to set parameters in the core dumps.4343- */4444-#define ELF_CLASS ELFCLASS324545-#define ELF_DATA ELFDATA2LSB4646-#define ELF_ARCH EM_CRIS4747-4848-#include <asm/arch/elf.h>4949-5050-/* The master for these definitions is {binutils}/include/elf/cris.h: */5151-/* User symbols in this file have a leading underscore. */5252-#define EF_CRIS_UNDERSCORE 0x000000015353-5454-/* This is a mask for different incompatible machine variants. */5555-#define EF_CRIS_VARIANT_MASK 0x0000000e5656-5757-/* Variant 0; may contain v0..10 object. */5858-#define EF_CRIS_VARIANT_ANY_V0_V10 0x000000005959-6060-/* Variant 1; contains v32 object. */6161-#define EF_CRIS_VARIANT_V32 0x000000026262-6363-/* Variant 2; contains object compatible with v32 and v10. */6464-#define EF_CRIS_VARIANT_COMMON_V10_V32 0x000000046565-/* End of excerpt from {binutils}/include/elf/cris.h. */6666-6767-#define USE_ELF_CORE_DUMP6868-6969-#define ELF_EXEC_PAGESIZE 81927070-7171-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical7272- use of this is to invoke "./ld.so someprog" to test out a new version of7373- the loader. We need to make sure that it is out of the way of the program7474- that it will "exec", and that there is sufficient room for the brk. */7575-7676-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)7777-7878-/* This yields a mask that user programs can use to figure out what7979- instruction set this CPU supports. This could be done in user space,8080- but it's not easy, and we've already done it here. */8181-8282-#define ELF_HWCAP (0)8383-8484-/* This yields a string that ld.so will use to load implementation8585- specific libraries for optimization. This is more specific in8686- intent than poking at uname or /proc/cpuinfo.8787-*/8888-8989-#define ELF_PLATFORM (NULL)9090-9191-#define SET_PERSONALITY(ex) set_personality(PER_LINUX)9292-9393-#endif
···11-#ifndef _CRIS_PAGE_H22-#define _CRIS_PAGE_H33-44-#include <asm/arch/page.h>55-#include <linux/const.h>66-77-/* PAGE_SHIFT determines the page size */88-#define PAGE_SHIFT 1399-#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)1010-#define PAGE_MASK (~(PAGE_SIZE-1))1111-1212-#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)1313-#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)1414-1515-#define clear_user_page(page, vaddr, pg) clear_page(page)1616-#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)1717-1818-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \1919- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)2020-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE2121-2222-/*2323- * These are used to make use of C type-checking..2424- */2525-#ifndef __ASSEMBLY__2626-typedef struct { unsigned long pte; } pte_t;2727-typedef struct { unsigned long pgd; } pgd_t;2828-typedef struct { unsigned long pgprot; } pgprot_t;2929-typedef struct page *pgtable_t;3030-#endif3131-3232-#define pte_val(x) ((x).pte)3333-#define pgd_val(x) ((x).pgd)3434-#define pgprot_val(x) ((x).pgprot)3535-3636-#define __pte(x) ((pte_t) { (x) } )3737-#define __pgd(x) ((pgd_t) { (x) } )3838-#define __pgprot(x) ((pgprot_t) { (x) } )3939-4040-/* On CRIS the PFN numbers doesn't start at 0 so we have to compensate */4141-/* for that before indexing into the page table starting at mem_map */4242-#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)4343-#define pfn_valid(pfn) (((pfn) - (PAGE_OFFSET >> PAGE_SHIFT)) < max_mapnr)4444-4545-/* to index into the page map. our pages all start at physical addr PAGE_OFFSET so4646- * we can let the map start there. notice that we subtract PAGE_OFFSET because4747- * we start our mem_map there - in other ports they map mem_map physically and4848- * use __pa instead. in our system both the physical and virtual address of DRAM4949- * is too high to let mem_map start at 0, so we do it this way instead (similar5050- * to arm and m68k I think)5151- */ 5252-5353-#define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT))5454-#define VALID_PAGE(page) (((page) - mem_map) < max_mapnr)5555-#define virt_addr_valid(kaddr) pfn_valid((unsigned)(kaddr) >> PAGE_SHIFT)5656-5757-/* convert a page (based on mem_map and forward) to a physical address5858- * do this by figuring out the virtual address and then use __pa5959- */6060-6161-#define page_to_phys(page) __pa((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)6262-6363-#ifndef __ASSEMBLY__6464-6565-#endif /* __ASSEMBLY__ */6666-6767-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \6868- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)6969-7070-#include <asm-generic/memory_model.h>7171-#include <asm-generic/page.h>7272-7373-#endif /* _CRIS_PAGE_H */7474-
···11-/*22- * CRIS pgtable.h - macros and functions to manipulate page tables.33- */44-55-#ifndef _CRIS_PGTABLE_H66-#define _CRIS_PGTABLE_H77-88-#include <asm/page.h>99-#include <asm-generic/pgtable-nopmd.h>1010-1111-#ifndef __ASSEMBLY__1212-#include <linux/sched.h>1313-#include <asm/mmu.h>1414-#endif1515-#include <asm/arch/pgtable.h>1616-1717-/*1818- * The Linux memory management assumes a three-level page table setup. On1919- * CRIS, we use that, but "fold" the mid level into the top-level page2020- * table. Since the MMU TLB is software loaded through an interrupt, it2121- * supports any page table structure, so we could have used a three-level2222- * setup, but for the amounts of memory we normally use, a two-level is2323- * probably more efficient.2424- *2525- * This file contains the functions and defines necessary to modify and use2626- * the CRIS page table tree.2727- */2828-#ifndef __ASSEMBLY__2929-extern void paging_init(void);3030-#endif3131-3232-/* Certain architectures need to do special things when pte's3333- * within a page table are directly modified. Thus, the following3434- * hook is made available.3535- */3636-#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))3737-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)3838-3939-/*4040- * (pmds are folded into pgds so this doesn't get actually called,4141- * but the define is needed for a generic inline function.)4242- */4343-#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)4444-#define set_pgu(pudptr, pudval) (*(pudptr) = pudval)4545-4646-/* PGDIR_SHIFT determines the size of the area a second-level page table can4747- * map. It is equal to the page size times the number of PTE's that fit in4848- * a PMD page. A PTE is 4-bytes in CRIS. Hence the following number.4949- */5050-5151-#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2))5252-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)5353-#define PGDIR_MASK (~(PGDIR_SIZE-1))5454-5555-/*5656- * entries per page directory level: we use a two-level, so5757- * we don't really have any PMD directory physically.5858- * pointers are 4 bytes so we can use the page size and 5959- * divide it by 4 (shift by 2).6060- */6161-#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2))6262-#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2))6363-6464-/* calculate how many PGD entries a user-level program can use6565- * the first mappable virtual address is 06666- * (TASK_SIZE is the maximum virtual address space)6767- */6868-6969-#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)7070-#define FIRST_USER_ADDRESS 07171-7272-/* zero page used for uninitialized stuff */7373-#ifndef __ASSEMBLY__7474-extern unsigned long empty_zero_page;7575-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))7676-#endif7777-7878-/* number of bits that fit into a memory pointer */7979-#define BITS_PER_PTR (8*sizeof(unsigned long))8080-8181-/* to align the pointer to a pointer address */8282-#define PTR_MASK (~(sizeof(void*)-1))8383-8484-/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */8585-/* 64-bit machines, beware! SRB. */8686-#define SIZEOF_PTR_LOG2 28787-8888-/* to find an entry in a page-table */8989-#define PAGE_PTR(address) \9090-((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)9191-9292-/* to set the page-dir */9393-#define SET_PAGE_DIR(tsk,pgdir)9494-9595-#define pte_none(x) (!pte_val(x))9696-#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)9797-#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0)9898-9999-#define pmd_none(x) (!pmd_val(x))100100-/* by removing the _PAGE_KERNEL bit from the comparision, the same pmd_bad101101- * works for both _PAGE_TABLE and _KERNPG_TABLE pmd entries.102102- */103103-#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_KERNEL)) != _PAGE_TABLE)104104-#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)105105-#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)106106-107107-#ifndef __ASSEMBLY__108108-109109-/*110110- * The following only work if pte_present() is true.111111- * Undefined behaviour if not..112112- */113113-114114-static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }115115-static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }116116-static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }117117-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }118118-static inline int pte_special(pte_t pte) { return 0; }119119-120120-static inline pte_t pte_wrprotect(pte_t pte)121121-{122122- pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);123123- return pte;124124-}125125-126126-static inline pte_t pte_mkclean(pte_t pte)127127-{128128- pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 129129- return pte; 130130-}131131-132132-static inline pte_t pte_mkold(pte_t pte)133133-{134134- pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);135135- return pte;136136-}137137-138138-static inline pte_t pte_mkwrite(pte_t pte)139139-{140140- pte_val(pte) |= _PAGE_WRITE;141141- if (pte_val(pte) & _PAGE_MODIFIED)142142- pte_val(pte) |= _PAGE_SILENT_WRITE;143143- return pte;144144-}145145-146146-static inline pte_t pte_mkdirty(pte_t pte)147147-{148148- pte_val(pte) |= _PAGE_MODIFIED;149149- if (pte_val(pte) & _PAGE_WRITE)150150- pte_val(pte) |= _PAGE_SILENT_WRITE;151151- return pte;152152-}153153-154154-static inline pte_t pte_mkyoung(pte_t pte)155155-{156156- pte_val(pte) |= _PAGE_ACCESSED;157157- if (pte_val(pte) & _PAGE_READ)158158- {159159- pte_val(pte) |= _PAGE_SILENT_READ;160160- if ((pte_val(pte) & (_PAGE_WRITE | _PAGE_MODIFIED)) ==161161- (_PAGE_WRITE | _PAGE_MODIFIED))162162- pte_val(pte) |= _PAGE_SILENT_WRITE;163163- }164164- return pte;165165-}166166-static inline pte_t pte_mkspecial(pte_t pte) { return pte; }167167-168168-/*169169- * Conversion functions: convert a page and protection to a page entry,170170- * and a page entry and page directory to the page they refer to.171171- */172172-173173-/* What actually goes as arguments to the various functions is less than174174- * obvious, but a rule of thumb is that struct page's goes as struct page *,175175- * really physical DRAM addresses are unsigned long's, and DRAM "virtual"176176- * addresses (the 0xc0xxxxxx's) goes as void *'s.177177- */178178-179179-static inline pte_t __mk_pte(void * page, pgprot_t pgprot)180180-{181181- pte_t pte;182182- /* the PTE needs a physical address */183183- pte_val(pte) = __pa(page) | pgprot_val(pgprot);184184- return pte;185185-}186186-187187-#define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))188188-189189-#define mk_pte_phys(physpage, pgprot) \190190-({ \191191- pte_t __pte; \192192- \193193- pte_val(__pte) = (physpage) + pgprot_val(pgprot); \194194- __pte; \195195-})196196-197197-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)198198-{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }199199-200200-201201-/* pte_val refers to a page in the 0x4xxxxxxx physical DRAM interval202202- * __pte_page(pte_val) refers to the "virtual" DRAM interval203203- * pte_pagenr refers to the page-number counted starting from the virtual DRAM start204204- */205205-206206-static inline unsigned long __pte_page(pte_t pte)207207-{208208- /* the PTE contains a physical address */209209- return (unsigned long)__va(pte_val(pte) & PAGE_MASK);210210-}211211-212212-#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)213213-214214-/* permanent address of a page */215215-216216-#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))217217-#define pte_page(pte) (mem_map+pte_pagenr(pte))218218-219219-/* only the pte's themselves need to point to physical DRAM (see above)220220- * the pagetable links are purely handled within the kernel SW and thus221221- * don't need the __pa and __va transformations.222222- */223223-224224-static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)225225-{ pmd_val(*pmdp) = _PAGE_TABLE | (unsigned long) ptep; }226226-227227-#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))228228-#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))229229-230230-/* to find an entry in a page-table-directory. */231231-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))232232-233233-/* to find an entry in a page-table-directory */234234-static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long address)235235-{236236- return mm->pgd + pgd_index(address);237237-}238238-239239-/* to find an entry in a kernel page-table-directory */240240-#define pgd_offset_k(address) pgd_offset(&init_mm, address)241241-242242-/* Find an entry in the third-level page table.. */243243-#define __pte_offset(address) \244244- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))245245-#define pte_offset_kernel(dir, address) \246246- ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))247247-#define pte_offset_map(dir, address) \248248- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))249249-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)250250-251251-#define pte_unmap(pte) do { } while (0)252252-#define pte_unmap_nested(pte) do { } while (0)253253-#define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT)254254-#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))255255-256256-#define pte_ERROR(e) \257257- printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), pte_val(e))258258-#define pgd_ERROR(e) \259259- printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))260260-261261-262262-extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */263263-264264-/*265265- * CRIS doesn't have any external MMU info: the kernel page266266- * tables contain all the necessary information.267267- * 268268- * Actually I am not sure on what this could be used for.269269- */270270-static inline void update_mmu_cache(struct vm_area_struct * vma,271271- unsigned long address, pte_t pte)272272-{273273-}274274-275275-/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */276276-/* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */277277-278278-#define __swp_type(x) (((x).val >> 5) & 0x7f)279279-#define __swp_offset(x) ((x).val >> 12)280280-#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 5) | ((offset) << 12) })281281-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })282282-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })283283-284284-#define kern_addr_valid(addr) (1)285285-286286-#include <asm-generic/pgtable.h>287287-288288-/*289289- * No page table caches to initialise290290- */291291-#define pgtable_cache_init() do { } while (0)292292-293293-#define pte_to_pgoff(x) (pte_val(x) >> 6)294294-#define pgoff_to_pte(x) __pte(((x) << 6) | _PAGE_FILE)295295-296296-typedef pte_t *pte_addr_t;297297-298298-#endif /* __ASSEMBLY__ */299299-#endif /* _CRIS_PGTABLE_H */
···11-/*22- * include/asm-cris/processor.h33- *44- * Copyright (C) 2000, 2001 Axis Communications AB55- *66- * Authors: Bjorn Wesen Initial version77- *88- */99-1010-#ifndef __ASM_CRIS_PROCESSOR_H1111-#define __ASM_CRIS_PROCESSOR_H1212-1313-#include <asm/system.h>1414-#include <asm/page.h>1515-#include <asm/ptrace.h>1616-#include <asm/arch/processor.h>1717-1818-struct task_struct;1919-2020-#define STACK_TOP TASK_SIZE2121-#define STACK_TOP_MAX STACK_TOP2222-2323-/* This decides where the kernel will search for a free chunk of vm2424- * space during mmap's.2525- */2626-#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))2727-2828-/* THREAD_SIZE is the size of the task_struct/kernel_stack combo.2929- * normally, the stack is found by doing something like p + THREAD_SIZE3030- * in CRIS, a page is 8192 bytes, which seems like a sane size3131- */3232-3333-#define THREAD_SIZE PAGE_SIZE3434-#define KERNEL_STACK_SIZE PAGE_SIZE3535-3636-/*3737- * At user->kernel entry, the pt_regs struct is stacked on the top of the kernel-stack.3838- * This macro allows us to find those regs for a task.3939- * Notice that subsequent pt_regs stackings, like recursive interrupts occurring while4040- * we're in the kernel, won't affect this - only the first user->kernel transition4141- * registers are reached by this.4242- */4343-4444-#define user_regs(thread_info) (((struct pt_regs *)((unsigned long)(thread_info) + THREAD_SIZE)) - 1)4545-4646-/*4747- * Dito but for the currently running task4848- */4949-5050-#define task_pt_regs(task) user_regs(task_thread_info(task))5151-#define current_regs() task_pt_regs(current)5252-5353-static inline void prepare_to_copy(struct task_struct *tsk)5454-{5555-}5656-5757-extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);5858-5959-unsigned long get_wchan(struct task_struct *p);6060-6161-#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)6262-6363-extern unsigned long thread_saved_pc(struct task_struct *tsk);6464-6565-/* Free all resources held by a thread. */6666-static inline void release_thread(struct task_struct *dead_task)6767-{6868- /* Nothing needs to be done. */6969-}7070-7171-#define init_stack (init_thread_union.stack)7272-7373-#define cpu_relax() barrier()7474-7575-#endif /* __ASM_CRIS_PROCESSOR_H */
-16
include/asm-cris/ptrace.h
···11-#ifndef _CRIS_PTRACE_H22-#define _CRIS_PTRACE_H33-44-#include <asm/arch/ptrace.h>55-66-#ifdef __KERNEL__77-88-/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */99-#define PTRACE_GETREGS 121010-#define PTRACE_SETREGS 131111-1212-#define profile_pc(regs) instruction_pointer(regs)1313-1414-#endif /* __KERNEL__ */1515-1616-#endif /* _CRIS_PTRACE_H */
···11-#ifndef __ASM_CRIS_SYSTEM_H22-#define __ASM_CRIS_SYSTEM_H33-44-#include <asm/arch/system.h>55-66-/* the switch_to macro calls resume, an asm function in entry.S which does the actual77- * task switching.88- */99-1010-extern struct task_struct *resume(struct task_struct *prev, struct task_struct *next, int);1111-#define switch_to(prev,next,last) last = resume(prev,next, \1212- (int)&((struct task_struct *)0)->thread)1313-1414-#define barrier() __asm__ __volatile__("": : :"memory")1515-#define mb() barrier()1616-#define rmb() mb()1717-#define wmb() mb()1818-#define read_barrier_depends() do { } while(0)1919-#define set_mb(var, value) do { var = value; mb(); } while (0)2020-2121-#ifdef CONFIG_SMP2222-#define smp_mb() mb()2323-#define smp_rmb() rmb()2424-#define smp_wmb() wmb()2525-#define smp_read_barrier_depends() read_barrier_depends()2626-#else2727-#define smp_mb() barrier()2828-#define smp_rmb() barrier()2929-#define smp_wmb() barrier()3030-#define smp_read_barrier_depends() do { } while(0)3131-#endif3232-3333-#define iret()3434-3535-/*3636- * disable hlt during certain critical i/o operations3737- */3838-#define HAVE_DISABLE_HLT3939-void disable_hlt(void);4040-void enable_hlt(void);4141-4242-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)4343-{4444- /* since Etrax doesn't have any atomic xchg instructions, we need to disable4545- irq's (if enabled) and do it with move.d's */4646- unsigned long flags,temp;4747- local_irq_save(flags); /* save flags, including irq enable bit and shut off irqs */4848- switch (size) {4949- case 1:5050- *((unsigned char *)&temp) = x;5151- x = *(unsigned char *)ptr;5252- *(unsigned char *)ptr = *((unsigned char *)&temp);5353- break;5454- case 2:5555- *((unsigned short *)&temp) = x;5656- x = *(unsigned short *)ptr;5757- *(unsigned short *)ptr = *((unsigned short *)&temp);5858- break;5959- case 4:6060- temp = x;6161- x = *(unsigned long *)ptr;6262- *(unsigned long *)ptr = temp;6363- break;6464- }6565- local_irq_restore(flags); /* restore irq enable bit */6666- return x;6767-}6868-6969-#include <asm-generic/cmpxchg-local.h>7070-7171-/*7272- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make7373- * them available.7474- */7575-#define cmpxchg_local(ptr, o, n) \7676- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\7777- (unsigned long)(n), sizeof(*(ptr))))7878-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))7979-8080-#ifndef CONFIG_SMP8181-#include <asm-generic/cmpxchg.h>8282-#endif8383-8484-#define arch_align_stack(x) (x)8585-8686-void default_idle(void);8787-8888-#endif
···11-/* thread_info.h: CRIS low-level thread information22- *33- * Copyright (C) 2002 David Howells (dhowells@redhat.com)44- * - Incorporating suggestions made by Linus Torvalds and Dave Miller55- * 66- * CRIS port by Axis Communications77- */88-99-#ifndef _ASM_THREAD_INFO_H1010-#define _ASM_THREAD_INFO_H1111-1212-#ifdef __KERNEL__1313-1414-#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR1515-1616-#ifndef __ASSEMBLY__1717-#include <asm/types.h>1818-#include <asm/processor.h>1919-#include <asm/arch/thread_info.h>2020-#include <asm/segment.h>2121-#endif2222-2323-2424-/*2525- * low level task data that entry.S needs immediate access to2626- * - this struct should fit entirely inside of one cache line2727- * - this struct shares the supervisor stack pages2828- * - if the contents of this structure are changed, the assembly constants must also be changed2929- */3030-#ifndef __ASSEMBLY__3131-struct thread_info {3232- struct task_struct *task; /* main task structure */3333- struct exec_domain *exec_domain; /* execution domain */3434- unsigned long flags; /* low level flags */3535- __u32 cpu; /* current CPU */3636- int preempt_count; /* 0 => preemptable, <0 => BUG */3737- __u32 tls; /* TLS for this thread */3838-3939- mm_segment_t addr_limit; /* thread address space:4040- 0-0xBFFFFFFF for user-thead4141- 0-0xFFFFFFFF for kernel-thread4242- */4343- struct restart_block restart_block;4444- __u8 supervisor_stack[0];4545-};4646-4747-#endif4848-4949-#define PREEMPT_ACTIVE 0x100000005050-5151-/*5252- * macros/functions for gaining access to the thread information structure5353- *5454- * preempt_count needs to be 1 initially, until the scheduler is functional.5555- */5656-#ifndef __ASSEMBLY__5757-#define INIT_THREAD_INFO(tsk) \5858-{ \5959- .task = &tsk, \6060- .exec_domain = &default_exec_domain, \6161- .flags = 0, \6262- .cpu = 0, \6363- .preempt_count = 1, \6464- .addr_limit = KERNEL_DS, \6565- .restart_block = { \6666- .fn = do_no_restart_syscall, \6767- }, \6868-}6969-7070-#define init_thread_info (init_thread_union.thread_info)7171-7272-/* thread information allocation */7373-#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))7474-#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)7575-7676-#endif /* !__ASSEMBLY__ */7777-7878-/*7979- * thread information flags8080- * - these are process state flags that various assembly files may need to access8181- * - pending work-to-be-done flags are in LSW8282- * - other flags in MSW8383- */8484-#define TIF_SYSCALL_TRACE 0 /* syscall trace active */8585-#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */8686-#define TIF_SIGPENDING 2 /* signal pending */8787-#define TIF_NEED_RESCHED 3 /* rescheduling necessary */8888-#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */8989-#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */9090-#define TIF_MEMDIE 179191-#define TIF_FREEZE 18 /* is freezing for suspend */9292-9393-#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)9494-#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)9595-#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)9696-#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)9797-#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)9898-#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)9999-#define _TIF_FREEZE (1<<TIF_FREEZE)100100-101101-#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */102102-#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */103103-104104-#endif /* __KERNEL__ */105105-106106-#endif /* _ASM_THREAD_INFO_H */
-24
include/asm-cris/timex.h
···11-/*22- * linux/include/asm-cris/timex.h33- *44- * CRIS architecture timex specifications55- */66-77-#ifndef _ASM_CRIS_TIMEX_H88-#define _ASM_CRIS_TIMEX_H99-1010-#include <asm/arch/timex.h>1111-1212-/*1313- * We don't have a cycle-counter.. but we do not support SMP anyway where this is1414- * used so it does not matter.1515- */1616-1717-typedef unsigned long long cycles_t;1818-1919-static inline cycles_t get_cycles(void)2020-{2121- return 0;2222-}2323-2424-#endif
-19
include/asm-cris/tlb.h
···11-#ifndef _CRIS_TLB_H22-#define _CRIS_TLB_H33-44-#include <linux/pagemap.h>55-66-#include <asm/arch/tlb.h>77-88-/*99- * cris doesn't need any special per-pte or1010- * per-vma handling..1111- */1212-#define tlb_start_vma(tlb, vma) do { } while (0)1313-#define tlb_end_vma(tlb, vma) do { } while (0)1414-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)1515-1616-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)1717-#include <asm-generic/tlb.h>1818-1919-#endif
···11-/* 22- * Authors: Bjorn Wesen (bjornw@axis.com)33- * Hans-Peter Nilsson (hp@axis.com)44- */55-66-/* Asm:s have been tweaked (within the domain of correctness) to give77- satisfactory results for "gcc version 2.96 20000427 (experimental)".88-99- Check regularly...1010-1111- Register $r9 is chosen for temporaries, being a call-clobbered register1212- first in line to be used (notably for local blocks), not colliding with1313- parameter registers. */1414-1515-#ifndef _CRIS_UACCESS_H1616-#define _CRIS_UACCESS_H1717-1818-#ifndef __ASSEMBLY__1919-#include <linux/sched.h>2020-#include <linux/errno.h>2121-#include <asm/processor.h>2222-#include <asm/page.h>2323-2424-#define VERIFY_READ 02525-#define VERIFY_WRITE 12626-2727-/*2828- * The fs value determines whether argument validity checking should be2929- * performed or not. If get_fs() == USER_DS, checking is performed, with3030- * get_fs() == KERNEL_DS, checking is bypassed.3131- *3232- * For historical reasons, these macros are grossly misnamed.3333- */3434-3535-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })3636-3737-/* addr_limit is the maximum accessible address for the task. we misuse3838- * the KERNEL_DS and USER_DS values to both assign and compare the 3939- * addr_limit values through the equally misnamed get/set_fs macros.4040- * (see above)4141- */4242-4343-#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)4444-#define USER_DS MAKE_MM_SEG(TASK_SIZE)4545-4646-#define get_ds() (KERNEL_DS)4747-#define get_fs() (current_thread_info()->addr_limit)4848-#define set_fs(x) (current_thread_info()->addr_limit = (x))4949-5050-#define segment_eq(a,b) ((a).seg == (b).seg)5151-5252-#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))5353-#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))5454-#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))5555-#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))5656-5757-#include <asm/arch/uaccess.h>5858-5959-/*6060- * The exception table consists of pairs of addresses: the first is the6161- * address of an instruction that is allowed to fault, and the second is6262- * the address at which the program should continue. No registers are6363- * modified, so it is entirely up to the continuation code to figure out6464- * what to do.6565- *6666- * All the routines below use bits of fixup code that are out of line6767- * with the main instruction path. This means when everything is well,6868- * we don't even have to jump over them. Further, they do not intrude6969- * on our cache or tlb entries.7070- */7171-7272-struct exception_table_entry7373-{7474- unsigned long insn, fixup;7575-};7676-7777-/*7878- * These are the main single-value transfer routines. They automatically7979- * use the right size if we just have the right pointer type.8080- *8181- * This gets kind of ugly. We want to return _two_ values in "get_user()"8282- * and yet we don't want to do any pointers, because that is too much8383- * of a performance impact. Thus we have a few rather ugly macros here,8484- * and hide all the ugliness from the user.8585- *8686- * The "__xxx" versions of the user access functions are versions that8787- * do not verify the address space, that must have been done previously8888- * with a separate "access_ok()" call (this is used when we do multiple8989- * accesses to the same area of user memory).9090- *9191- * As we use the same address space for kernel and user data on9292- * CRIS, we can just do these as direct assignments. (Of course, the9393- * exception handling means that it's no longer "just"...)9494- */9595-#define get_user(x,ptr) \9696- __get_user_check((x),(ptr),sizeof(*(ptr)))9797-#define put_user(x,ptr) \9898- __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))9999-100100-#define __get_user(x,ptr) \101101- __get_user_nocheck((x),(ptr),sizeof(*(ptr)))102102-#define __put_user(x,ptr) \103103- __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))104104-105105-extern long __put_user_bad(void);106106-107107-#define __put_user_size(x,ptr,size,retval) \108108-do { \109109- retval = 0; \110110- switch (size) { \111111- case 1: __put_user_asm(x,ptr,retval,"move.b"); break; \112112- case 2: __put_user_asm(x,ptr,retval,"move.w"); break; \113113- case 4: __put_user_asm(x,ptr,retval,"move.d"); break; \114114- case 8: __put_user_asm_64(x,ptr,retval); break; \115115- default: __put_user_bad(); \116116- } \117117-} while (0)118118-119119-#define __get_user_size(x,ptr,size,retval) \120120-do { \121121- retval = 0; \122122- switch (size) { \123123- case 1: __get_user_asm(x,ptr,retval,"move.b"); break; \124124- case 2: __get_user_asm(x,ptr,retval,"move.w"); break; \125125- case 4: __get_user_asm(x,ptr,retval,"move.d"); break; \126126- case 8: __get_user_asm_64(x,ptr,retval); break; \127127- default: (x) = __get_user_bad(); \128128- } \129129-} while (0)130130-131131-#define __put_user_nocheck(x,ptr,size) \132132-({ \133133- long __pu_err; \134134- __put_user_size((x),(ptr),(size),__pu_err); \135135- __pu_err; \136136-})137137-138138-#define __put_user_check(x,ptr,size) \139139-({ \140140- long __pu_err = -EFAULT; \141141- __typeof__(*(ptr)) *__pu_addr = (ptr); \142142- if (access_ok(VERIFY_WRITE,__pu_addr,size)) \143143- __put_user_size((x),__pu_addr,(size),__pu_err); \144144- __pu_err; \145145-})146146-147147-struct __large_struct { unsigned long buf[100]; };148148-#define __m(x) (*(struct __large_struct *)(x))149149-150150-151151-152152-#define __get_user_nocheck(x,ptr,size) \153153-({ \154154- long __gu_err, __gu_val; \155155- __get_user_size(__gu_val,(ptr),(size),__gu_err); \156156- (x) = (__typeof__(*(ptr)))__gu_val; \157157- __gu_err; \158158-})159159-160160-#define __get_user_check(x,ptr,size) \161161-({ \162162- long __gu_err = -EFAULT, __gu_val = 0; \163163- const __typeof__(*(ptr)) *__gu_addr = (ptr); \164164- if (access_ok(VERIFY_READ,__gu_addr,size)) \165165- __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \166166- (x) = (__typeof__(*(ptr)))__gu_val; \167167- __gu_err; \168168-})169169-170170-extern long __get_user_bad(void);171171-172172-/* More complex functions. Most are inline, but some call functions that173173- live in lib/usercopy.c */174174-175175-extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n);176176-extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);177177-extern unsigned long __do_clear_user(void __user *to, unsigned long n);178178-179179-static inline unsigned long180180-__generic_copy_to_user(void __user *to, const void *from, unsigned long n)181181-{182182- if (access_ok(VERIFY_WRITE, to, n))183183- return __copy_user(to,from,n);184184- return n;185185-}186186-187187-static inline unsigned long188188-__generic_copy_from_user(void *to, const void __user *from, unsigned long n)189189-{190190- if (access_ok(VERIFY_READ, from, n))191191- return __copy_user_zeroing(to,from,n);192192- return n;193193-}194194-195195-static inline unsigned long196196-__generic_clear_user(void __user *to, unsigned long n)197197-{198198- if (access_ok(VERIFY_WRITE, to, n))199199- return __do_clear_user(to,n);200200- return n;201201-}202202-203203-static inline long204204-__strncpy_from_user(char *dst, const char __user *src, long count)205205-{206206- return __do_strncpy_from_user(dst, src, count);207207-}208208-209209-static inline long210210-strncpy_from_user(char *dst, const char __user *src, long count)211211-{212212- long res = -EFAULT;213213- if (access_ok(VERIFY_READ, src, 1))214214- res = __do_strncpy_from_user(dst, src, count);215215- return res;216216-}217217-218218-219219-/* Note that these expand awfully if made into switch constructs, so220220- don't do that. */221221-222222-static inline unsigned long223223-__constant_copy_from_user(void *to, const void __user *from, unsigned long n)224224-{225225- unsigned long ret = 0;226226- if (n == 0)227227- ;228228- else if (n == 1)229229- __asm_copy_from_user_1(to, from, ret);230230- else if (n == 2)231231- __asm_copy_from_user_2(to, from, ret);232232- else if (n == 3)233233- __asm_copy_from_user_3(to, from, ret);234234- else if (n == 4)235235- __asm_copy_from_user_4(to, from, ret);236236- else if (n == 5)237237- __asm_copy_from_user_5(to, from, ret);238238- else if (n == 6)239239- __asm_copy_from_user_6(to, from, ret);240240- else if (n == 7)241241- __asm_copy_from_user_7(to, from, ret);242242- else if (n == 8)243243- __asm_copy_from_user_8(to, from, ret);244244- else if (n == 9)245245- __asm_copy_from_user_9(to, from, ret);246246- else if (n == 10)247247- __asm_copy_from_user_10(to, from, ret);248248- else if (n == 11)249249- __asm_copy_from_user_11(to, from, ret);250250- else if (n == 12)251251- __asm_copy_from_user_12(to, from, ret);252252- else if (n == 13)253253- __asm_copy_from_user_13(to, from, ret);254254- else if (n == 14)255255- __asm_copy_from_user_14(to, from, ret);256256- else if (n == 15)257257- __asm_copy_from_user_15(to, from, ret);258258- else if (n == 16)259259- __asm_copy_from_user_16(to, from, ret);260260- else if (n == 20)261261- __asm_copy_from_user_20(to, from, ret);262262- else if (n == 24)263263- __asm_copy_from_user_24(to, from, ret);264264- else265265- ret = __generic_copy_from_user(to, from, n);266266-267267- return ret;268268-}269269-270270-/* Ditto, don't make a switch out of this. */271271-272272-static inline unsigned long273273-__constant_copy_to_user(void __user *to, const void *from, unsigned long n)274274-{275275- unsigned long ret = 0;276276- if (n == 0)277277- ;278278- else if (n == 1)279279- __asm_copy_to_user_1(to, from, ret);280280- else if (n == 2)281281- __asm_copy_to_user_2(to, from, ret);282282- else if (n == 3)283283- __asm_copy_to_user_3(to, from, ret);284284- else if (n == 4)285285- __asm_copy_to_user_4(to, from, ret);286286- else if (n == 5)287287- __asm_copy_to_user_5(to, from, ret);288288- else if (n == 6)289289- __asm_copy_to_user_6(to, from, ret);290290- else if (n == 7)291291- __asm_copy_to_user_7(to, from, ret);292292- else if (n == 8)293293- __asm_copy_to_user_8(to, from, ret);294294- else if (n == 9)295295- __asm_copy_to_user_9(to, from, ret);296296- else if (n == 10)297297- __asm_copy_to_user_10(to, from, ret);298298- else if (n == 11)299299- __asm_copy_to_user_11(to, from, ret);300300- else if (n == 12)301301- __asm_copy_to_user_12(to, from, ret);302302- else if (n == 13)303303- __asm_copy_to_user_13(to, from, ret);304304- else if (n == 14)305305- __asm_copy_to_user_14(to, from, ret);306306- else if (n == 15)307307- __asm_copy_to_user_15(to, from, ret);308308- else if (n == 16)309309- __asm_copy_to_user_16(to, from, ret);310310- else if (n == 20)311311- __asm_copy_to_user_20(to, from, ret);312312- else if (n == 24)313313- __asm_copy_to_user_24(to, from, ret);314314- else315315- ret = __generic_copy_to_user(to, from, n);316316-317317- return ret;318318-}319319-320320-/* No switch, please. */321321-322322-static inline unsigned long323323-__constant_clear_user(void __user *to, unsigned long n)324324-{325325- unsigned long ret = 0;326326- if (n == 0)327327- ;328328- else if (n == 1)329329- __asm_clear_1(to, ret);330330- else if (n == 2)331331- __asm_clear_2(to, ret);332332- else if (n == 3)333333- __asm_clear_3(to, ret);334334- else if (n == 4)335335- __asm_clear_4(to, ret);336336- else if (n == 8)337337- __asm_clear_8(to, ret);338338- else if (n == 12)339339- __asm_clear_12(to, ret);340340- else if (n == 16)341341- __asm_clear_16(to, ret);342342- else if (n == 20)343343- __asm_clear_20(to, ret);344344- else if (n == 24)345345- __asm_clear_24(to, ret);346346- else347347- ret = __generic_clear_user(to, n);348348-349349- return ret;350350-}351351-352352-353353-#define clear_user(to, n) \354354-(__builtin_constant_p(n) ? \355355- __constant_clear_user(to, n) : \356356- __generic_clear_user(to, n))357357-358358-#define copy_from_user(to, from, n) \359359-(__builtin_constant_p(n) ? \360360- __constant_copy_from_user(to, from, n) : \361361- __generic_copy_from_user(to, from, n))362362-363363-#define copy_to_user(to, from, n) \364364-(__builtin_constant_p(n) ? \365365- __constant_copy_to_user(to, from, n) : \366366- __generic_copy_to_user(to, from, n))367367-368368-/* We let the __ versions of copy_from/to_user inline, because they're often369369- * used in fast paths and have only a small space overhead.370370- */371371-372372-static inline unsigned long373373-__generic_copy_from_user_nocheck(void *to, const void __user *from,374374- unsigned long n)375375-{376376- return __copy_user_zeroing(to,from,n);377377-}378378-379379-static inline unsigned long380380-__generic_copy_to_user_nocheck(void __user *to, const void *from,381381- unsigned long n)382382-{383383- return __copy_user(to,from,n);384384-}385385-386386-static inline unsigned long387387-__generic_clear_user_nocheck(void __user *to, unsigned long n)388388-{389389- return __do_clear_user(to,n);390390-}391391-392392-/* without checking */393393-394394-#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n))395395-#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))396396-#define __copy_to_user_inatomic __copy_to_user397397-#define __copy_from_user_inatomic __copy_from_user398398-#define __clear_user(to,n) __generic_clear_user_nocheck((to),(n))399399-400400-#define strlen_user(str) strnlen_user((str), 0x7ffffffe)401401-402402-#endif /* __ASSEMBLY__ */403403-404404-#endif /* _CRIS_UACCESS_H */
···11-#ifndef __ASM_CRIS_USER_H22-#define __ASM_CRIS_USER_H33-44-#include <linux/types.h>55-#include <asm/ptrace.h>66-#include <asm/page.h>77-#include <asm/arch/user.h>88-99-/*1010- * Core file format: The core file is written in such a way that gdb1111- * can understand it and provide useful information to the user (under1212- * linux we use the `trad-core' bfd). The file contents are as follows:1313- *1414- * upage: 1 page consisting of a user struct that tells gdb1515- * what is present in the file. Directly after this is a1616- * copy of the task_struct, which is currently not used by gdb,1717- * but it may come in handy at some point. All of the registers1818- * are stored as part of the upage. The upage should always be1919- * only one page long.2020- * data: The data segment follows next. We use current->end_text to2121- * current->brk to pick up all of the user variables, plus any memory2222- * that may have been sbrk'ed. No attempt is made to determine if a2323- * page is demand-zero or if a page is totally unused, we just cover2424- * the entire range. All of the addresses are rounded in such a way2525- * that an integral number of pages is written.2626- * stack: We need the stack information in order to get a meaningful2727- * backtrace. We need to write the data from usp to2828- * current->start_stack, so we round each of these in order to be able2929- * to write an integer number of pages.3030- */3131-3232-struct user {3333- struct user_regs_struct regs; /* entire machine state */3434- size_t u_tsize; /* text size (pages) */3535- size_t u_dsize; /* data size (pages) */3636- size_t u_ssize; /* stack size (pages) */3737- unsigned long start_code; /* text starting address */3838- unsigned long start_data; /* data starting address */3939- unsigned long start_stack; /* stack starting address */4040- long int signal; /* signal causing core dump */4141- unsigned long u_ar0; /* help gdb find registers */4242- unsigned long magic; /* identifies a core file */4343- char u_comm[32]; /* user command name */4444-};4545-4646-#define NBPG PAGE_SIZE4747-#define UPAGES 14848-#define HOST_TEXT_START_ADDR (u.start_code)4949-#define HOST_DATA_START_ADDR (u.start_data)5050-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)5151-5252-#endif /* __ASM_CRIS_USER_H */