···66#define _ASM_PARISC_ATOMIC_H_7788#include <linux/types.h>99-#include <asm/system.h>1091110/*1211 * Atomic operations that C can't guarantee us. Useful for
+35
arch/parisc/include/asm/barrier.h
···11+#ifndef __PARISC_BARRIER_H22+#define __PARISC_BARRIER_H33+44+/*55+** This is simply the barrier() macro from linux/kernel.h but when serial.c66+** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h77+** hasn't yet been included yet so it fails, thus repeating the macro here.88+**99+** PA-RISC architecture allows for weakly ordered memory accesses although1010+** none of the processors use it. There is a strong ordered bit that is1111+** set in the O-bit of the page directory entry. Operating systems that1212+** can not tolerate out of order accesses should set this bit when mapping1313+** pages. The O-bit of the PSW should also be set to 1 (I don't believe any1414+** of the processor implemented the PSW O-bit). The PCX-W ERS states that1515+** the TLB O-bit is not implemented so the page directory does not need to1616+** have the O-bit set when mapping pages (section 3.1). This section also1717+** states that the PSW Y, Z, G, and O bits are not implemented.1818+** So it looks like nothing needs to be done for parisc-linux (yet).1919+** (thanks to chada for the above comment -ggg)2020+**2121+** The __asm__ op below simple prevents gcc/ld from reordering2222+** instructions across the mb() "call".2323+*/2424+#define mb() __asm__ __volatile__("":::"memory") /* barrier() */2525+#define rmb() mb()2626+#define wmb() mb()2727+#define smp_mb() mb()2828+#define smp_rmb() mb()2929+#define smp_wmb() mb()3030+#define smp_read_barrier_depends() do { } while(0)3131+#define read_barrier_depends() do { } while(0)3232+3333+#define set_mb(var, value) do { var = value; mb(); } while (0)3434+3535+#endif /* __PARISC_BARRIER_H */
+1-1
arch/parisc/include/asm/delay.h
···11#ifndef _PARISC_DELAY_H22#define _PARISC_DELAY_H3344-#include <asm/system.h> /* for mfctl() */44+#include <asm/special_insns.h> /* for mfctl() */55#include <asm/processor.h> /* for boot_cpu_data */6677
···11+#ifndef __PARISC_LDCW_H22+#define __PARISC_LDCW_H33+44+#ifndef CONFIG_PA2055+/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,66+ and GCC only guarantees 8-byte alignment for stack locals, we can't77+ be assured of 16-byte alignment for atomic lock data even if we88+ specify "__attribute ((aligned(16)))" in the type declaration. So,99+ we use a struct containing an array of four ints for the atomic lock1010+ type and dynamically select the 16-byte aligned int from the array1111+ for the semaphore. */1212+1313+#define __PA_LDCW_ALIGNMENT 161414+#define __ldcw_align(a) ({ \1515+ unsigned long __ret = (unsigned long) &(a)->lock[0]; \1616+ __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \1717+ & ~(__PA_LDCW_ALIGNMENT - 1); \1818+ (volatile unsigned int *) __ret; \1919+})2020+#define __LDCW "ldcw"2121+2222+#else /*CONFIG_PA20*/2323+/* From: "Jim Hull" <jim.hull of hp.com>2424+ I've attached a summary of the change, but basically, for PA 2.0, as2525+ long as the ",CO" (coherent operation) completer is specified, then the2626+ 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead2727+ they only require "natural" alignment (4-byte for ldcw, 8-byte for2828+ ldcd). */2929+3030+#define __PA_LDCW_ALIGNMENT 43131+#define __ldcw_align(a) (&(a)->slock)3232+#define __LDCW "ldcw,co"3333+3434+#endif /*!CONFIG_PA20*/3535+3636+/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */3737+#define __ldcw(a) ({ \3838+ unsigned __ret; \3939+ __asm__ __volatile__(__LDCW " 0(%2),%0" \4040+ : "=r" (__ret), "+m" (*(a)) : "r" (a)); \4141+ __ret; \4242+})4343+4444+#ifdef CONFIG_SMP4545+# define __lock_aligned __attribute__((__section__(".data..lock_aligned")))4646+#endif4747+4848+#endif /* __PARISC_LDCW_H */
+1-1
arch/parisc/include/asm/processor.h
···1616#include <asm/pdc.h>1717#include <asm/ptrace.h>1818#include <asm/types.h>1919-#include <asm/system.h>2019#include <asm/percpu.h>21202221#endif /* __ASSEMBLY__ */···168169 * Return saved PC of a blocked thread. This is used by ps mostly.169170 */170171172172+struct task_struct;171173unsigned long thread_saved_pc(struct task_struct *t);172174void show_trace(struct task_struct *task, unsigned long *stack);173175
+41
arch/parisc/include/asm/psw.h
···5959#define USER_PSW_MASK (WIDE_PSW | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB)6060#define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I)61616262+#ifndef __ASSEMBLY__6363+6464+/* The program status word as bitfields. */6565+struct pa_psw {6666+ unsigned int y:1;6767+ unsigned int z:1;6868+ unsigned int rv:2;6969+ unsigned int w:1;7070+ unsigned int e:1;7171+ unsigned int s:1;7272+ unsigned int t:1;7373+7474+ unsigned int h:1;7575+ unsigned int l:1;7676+ unsigned int n:1;7777+ unsigned int x:1;7878+ unsigned int b:1;7979+ unsigned int c:1;8080+ unsigned int v:1;8181+ unsigned int m:1;8282+8383+ unsigned int cb:8;8484+8585+ unsigned int o:1;8686+ unsigned int g:1;8787+ unsigned int f:1;8888+ unsigned int r:1;8989+ unsigned int q:1;9090+ unsigned int p:1;9191+ unsigned int d:1;9292+ unsigned int i:1;9393+};9494+9595+#ifdef CONFIG_64BIT9696+#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW + 4))9797+#else9898+#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW))9999+#endif100100+101101+#endif /* !__ASSEMBLY__ */102102+62103#endif
+40
arch/parisc/include/asm/special_insns.h
···11+#ifndef __PARISC_SPECIAL_INSNS_H22+#define __PARISC_SPECIAL_INSNS_H33+44+#define mfctl(reg) ({ \55+ unsigned long cr; \66+ __asm__ __volatile__( \77+ "mfctl " #reg ",%0" : \88+ "=r" (cr) \99+ ); \1010+ cr; \1111+})1212+1313+#define mtctl(gr, cr) \1414+ __asm__ __volatile__("mtctl %0,%1" \1515+ : /* no outputs */ \1616+ : "r" (gr), "i" (cr) : "memory")1717+1818+/* these are here to de-mystefy the calling code, and to provide hooks */1919+/* which I needed for debugging EIEM problems -PB */2020+#define get_eiem() mfctl(15)2121+static inline void set_eiem(unsigned long val)2222+{2323+ mtctl(val, 15);2424+}2525+2626+#define mfsp(reg) ({ \2727+ unsigned long cr; \2828+ __asm__ __volatile__( \2929+ "mfsp " #reg ",%0" : \3030+ "=r" (cr) \3131+ ); \3232+ cr; \3333+})3434+3535+#define mtsp(gr, cr) \3636+ __asm__ __volatile__("mtsp %0,%1" \3737+ : /* no outputs */ \3838+ : "r" (gr), "i" (cr) : "memory")3939+4040+#endif /* __PARISC_SPECIAL_INSNS_H */
···11-#ifndef __PARISC_SYSTEM_H22-#define __PARISC_SYSTEM_H33-44-#include <linux/irqflags.h>55-66-/* The program status word as bitfields. */77-struct pa_psw {88- unsigned int y:1;99- unsigned int z:1;1010- unsigned int rv:2;1111- unsigned int w:1;1212- unsigned int e:1;1313- unsigned int s:1;1414- unsigned int t:1;1515-1616- unsigned int h:1;1717- unsigned int l:1;1818- unsigned int n:1;1919- unsigned int x:1;2020- unsigned int b:1;2121- unsigned int c:1;2222- unsigned int v:1;2323- unsigned int m:1;2424-2525- unsigned int cb:8;2626-2727- unsigned int o:1;2828- unsigned int g:1;2929- unsigned int f:1;3030- unsigned int r:1;3131- unsigned int q:1;3232- unsigned int p:1;3333- unsigned int d:1;3434- unsigned int i:1;3535-};3636-3737-#ifdef CONFIG_64BIT3838-#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW + 4))3939-#else4040-#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW))4141-#endif4242-4343-struct task_struct;4444-4545-extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *);4646-4747-#define switch_to(prev, next, last) do { \4848- (last) = _switch_to(prev, next); \4949-} while(0)5050-5151-#define mfctl(reg) ({ \5252- unsigned long cr; \5353- __asm__ __volatile__( \5454- "mfctl " #reg ",%0" : \5555- "=r" (cr) \5656- ); \5757- cr; \5858-})5959-6060-#define mtctl(gr, cr) \6161- __asm__ __volatile__("mtctl %0,%1" \6262- : /* no outputs */ \6363- : "r" (gr), "i" (cr) : "memory")6464-6565-/* these are here to de-mystefy the calling code, and to provide hooks */6666-/* which I needed for debugging EIEM problems -PB */6767-#define get_eiem() mfctl(15)6868-static inline void set_eiem(unsigned long val)6969-{7070- mtctl(val, 15);7171-}7272-7373-#define mfsp(reg) ({ \7474- unsigned long cr; \7575- __asm__ __volatile__( \7676- "mfsp " #reg ",%0" : \7777- "=r" (cr) \7878- ); \7979- cr; \8080-})8181-8282-#define mtsp(gr, cr) \8383- __asm__ __volatile__("mtsp %0,%1" \8484- : /* no outputs */ \8585- : "r" (gr), "i" (cr) : "memory")8686-8787-8888-/*8989-** This is simply the barrier() macro from linux/kernel.h but when serial.c9090-** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h9191-** hasn't yet been included yet so it fails, thus repeating the macro here.9292-**9393-** PA-RISC architecture allows for weakly ordered memory accesses although9494-** none of the processors use it. There is a strong ordered bit that is9595-** set in the O-bit of the page directory entry. Operating systems that9696-** can not tolerate out of order accesses should set this bit when mapping9797-** pages. The O-bit of the PSW should also be set to 1 (I don't believe any9898-** of the processor implemented the PSW O-bit). The PCX-W ERS states that9999-** the TLB O-bit is not implemented so the page directory does not need to100100-** have the O-bit set when mapping pages (section 3.1). This section also101101-** states that the PSW Y, Z, G, and O bits are not implemented.102102-** So it looks like nothing needs to be done for parisc-linux (yet).103103-** (thanks to chada for the above comment -ggg)104104-**105105-** The __asm__ op below simple prevents gcc/ld from reordering106106-** instructions across the mb() "call".107107-*/108108-#define mb() __asm__ __volatile__("":::"memory") /* barrier() */109109-#define rmb() mb()110110-#define wmb() mb()111111-#define smp_mb() mb()112112-#define smp_rmb() mb()113113-#define smp_wmb() mb()114114-#define smp_read_barrier_depends() do { } while(0)115115-#define read_barrier_depends() do { } while(0)116116-117117-#define set_mb(var, value) do { var = value; mb(); } while (0)118118-119119-#ifndef CONFIG_PA20120120-/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,121121- and GCC only guarantees 8-byte alignment for stack locals, we can't122122- be assured of 16-byte alignment for atomic lock data even if we123123- specify "__attribute ((aligned(16)))" in the type declaration. So,124124- we use a struct containing an array of four ints for the atomic lock125125- type and dynamically select the 16-byte aligned int from the array126126- for the semaphore. */127127-128128-#define __PA_LDCW_ALIGNMENT 16129129-#define __ldcw_align(a) ({ \130130- unsigned long __ret = (unsigned long) &(a)->lock[0]; \131131- __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \132132- & ~(__PA_LDCW_ALIGNMENT - 1); \133133- (volatile unsigned int *) __ret; \134134-})135135-#define __LDCW "ldcw"136136-137137-#else /*CONFIG_PA20*/138138-/* From: "Jim Hull" <jim.hull of hp.com>139139- I've attached a summary of the change, but basically, for PA 2.0, as140140- long as the ",CO" (coherent operation) completer is specified, then the141141- 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead142142- they only require "natural" alignment (4-byte for ldcw, 8-byte for143143- ldcd). */144144-145145-#define __PA_LDCW_ALIGNMENT 4146146-#define __ldcw_align(a) (&(a)->slock)147147-#define __LDCW "ldcw,co"148148-149149-#endif /*!CONFIG_PA20*/150150-151151-/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */152152-#define __ldcw(a) ({ \153153- unsigned __ret; \154154- __asm__ __volatile__(__LDCW " 0(%2),%0" \155155- : "=r" (__ret), "+m" (*(a)) : "r" (a)); \156156- __ret; \157157-})158158-159159-#ifdef CONFIG_SMP160160-# define __lock_aligned __attribute__((__section__(".data..lock_aligned")))161161-#endif162162-163163-#define arch_align_stack(x) (x)164164-165165-#endif11+/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */22+#include <asm/barrier.h>33+#include <asm/exec.h>44+#include <asm/ldcw.h>55+#include <asm/special_insns.h>66+#include <asm/switch_to.h>