Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] CRIS update: updates for 2.6.12

Patches to make CRIS work with 2.6.12.

Signed-off-by: Mikael Starvik <starvik@axis.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Mikael Starvik and committed by
Linus Torvalds
5d01e6ce dcf1310b

+88 -119
+7 -2
arch/cris/arch-v10/kernel/ptrace.c
··· 11 11 #include <linux/ptrace.h> 12 12 #include <linux/user.h> 13 13 #include <linux/signal.h> 14 + #include <linux/security.h> 14 15 15 16 #include <asm/uaccess.h> 16 17 #include <asm/page.h> ··· 87 86 ret = -EPERM; 88 87 89 88 if (request == PTRACE_TRACEME) { 89 + /* are we already being traced? */ 90 90 if (current->ptrace & PT_PTRACED) 91 91 goto out; 92 - 92 + ret = security_ptrace(current->parent, current); 93 + if (ret) 94 + goto out; 95 + /* set the ptrace bit in the process flags. */ 93 96 current->ptrace |= PT_PTRACED; 94 97 ret = 0; 95 98 goto out; ··· 212 207 case PTRACE_KILL: 213 208 ret = 0; 214 209 215 - if (child->state == TASK_ZOMBIE) 210 + if (child->exit_state == EXIT_ZOMBIE) 216 211 break; 217 212 218 213 child->exit_code = SIGKILL;
+16 -23
arch/cris/kernel/module.c
··· 32 32 { 33 33 if (size == 0) 34 34 return NULL; 35 - return vmalloc(size); 35 + return vmalloc_exec(size); 36 36 } 37 37 38 38 ··· 59 59 unsigned int relsec, 60 60 struct module *me) 61 61 { 62 - unsigned int i; 63 - Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; 64 - Elf32_Sym *sym; 65 - uint32_t *location; 66 - 67 - DEBUGP("Applying relocate section %u to %u\n", relsec, 68 - sechdrs[relsec].sh_info); 69 - for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 70 - /* This is where to make the change */ 71 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_offset 72 - + rel[i].r_offset; 73 - /* This is the symbol it is referring to. Note that all 74 - undefined symbols have been resolved. */ 75 - sym = (Elf32_Sym *)sechdrs[symindex].sh_addr 76 - + ELF32_R_SYM(rel[i].r_info); 77 - 78 - /* We add the value into the location given */ 79 - *location += sym->st_value; 80 - } 81 - return 0; 62 + printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name); 63 + return -ENOEXEC; 82 64 } 83 65 84 66 int apply_relocate_add(Elf32_Shdr *sechdrs, ··· 72 90 unsigned int i; 73 91 Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; 74 92 75 - DEBUGP ("Applying relocate section %u to %u\n", relsec, 93 + DEBUGP ("Applying add relocate section %u to %u\n", relsec, 76 94 sechdrs[relsec].sh_info); 77 95 78 96 for (i = 0; i < sechdrs[relsec].sh_size / sizeof (*rela); i++) { ··· 85 103 Elf32_Sym *sym 86 104 = ((Elf32_Sym *)sechdrs[symindex].sh_addr 87 105 + ELF32_R_SYM (rela[i].r_info)); 88 - *loc = sym->st_value + rela[i].r_addend; 106 + switch (ELF32_R_TYPE(rela[i].r_info)) { 107 + case R_CRIS_32: 108 + *loc = sym->st_value + rela[i].r_addend; 109 + break; 110 + case R_CRIS_32_PCREL: 111 + *loc = sym->st_value - (unsigned)loc + rela[i].r_addend - 4; 112 + break; 113 + default: 114 + printk(KERN_ERR "module %s: Unknown relocation: %u\n", 115 + me->name, ELF32_R_TYPE(rela[i].r_info)); 116 + return -ENOEXEC; 117 + } 89 118 } 90 119 91 120 return 0;
+23 -8
arch/cris/kernel/process.c
··· 1 - /* $Id: process.c,v 1.17 2004/04/05 13:53:48 starvik Exp $ 1 + /* $Id: process.c,v 1.21 2005/03/04 08:16:17 starvik Exp $ 2 2 * 3 3 * linux/arch/cris/kernel/process.c 4 4 * ··· 8 8 * Authors: Bjorn Wesen (bjornw@axis.com) 9 9 * 10 10 * $Log: process.c,v $ 11 + * Revision 1.21 2005/03/04 08:16:17 starvik 12 + * Merge of Linux 2.6.11. 13 + * 14 + * Revision 1.20 2005/01/18 05:57:22 starvik 15 + * Renamed hlt_counter to cris_hlt_counter and made it global. 16 + * 17 + * Revision 1.19 2004/10/19 13:07:43 starvik 18 + * Merge of Linux 2.6.9 19 + * 20 + * Revision 1.18 2004/08/16 12:37:23 starvik 21 + * Merge of Linux 2.6.8 22 + * 11 23 * Revision 1.17 2004/04/05 13:53:48 starvik 12 24 * Merge of Linux 2.6.5 13 25 * ··· 173 161 * region by enable_hlt/disable_hlt. 174 162 */ 175 163 176 - static int hlt_counter=0; 164 + int cris_hlt_counter=0; 177 165 178 166 void disable_hlt(void) 179 167 { 180 - hlt_counter++; 168 + cris_hlt_counter++; 181 169 } 182 170 183 171 EXPORT_SYMBOL(disable_hlt); 184 172 185 173 void enable_hlt(void) 186 174 { 187 - hlt_counter--; 175 + cris_hlt_counter--; 188 176 } 189 177 190 178 EXPORT_SYMBOL(enable_hlt); ··· 207 195 /* endless idle loop with no priority at all */ 208 196 while (1) { 209 197 while (!need_resched()) { 210 - void (*idle)(void) = pm_idle; 211 - 198 + void (*idle)(void); 199 + /* 200 + * Mark this as an RCU critical section so that 201 + * synchronize_kernel() in the unload path waits 202 + * for our completion. 203 + */ 204 + idle = pm_idle; 212 205 if (!idle) 213 206 idle = default_idle; 214 - 215 207 idle(); 216 208 } 217 209 schedule(); 218 210 } 219 - 220 211 } 221 212 222 213 void hard_reset_now (void);
+1 -1
include/asm-cris/arch-v10/bitops.h
··· 51 51 * 52 52 * Undefined if no bit exists, so code should check against 0 first. 53 53 */ 54 - extern __inline__ unsigned long __ffs(unsigned long word) 54 + extern inline unsigned long __ffs(unsigned long word) 55 55 { 56 56 return cris_swapnwbrlz(~word); 57 57 }
+1 -1
include/asm-cris/arch-v10/offset.h
··· 25 25 #define THREAD_usp 4 /* offsetof(struct thread_struct, usp) */ 26 26 #define THREAD_dccr 8 /* offsetof(struct thread_struct, dccr) */ 27 27 28 - #define TASK_pid 133 /* offsetof(struct task_struct, pid) */ 28 + #define TASK_pid 141 /* offsetof(struct task_struct, pid) */ 29 29 30 30 #define LCLONE_VM 256 /* CLONE_VM */ 31 31 #define LCLONE_UNTRACED 8388608 /* CLONE_UNTRACED */
+17 -18
include/asm-cris/bitops.h
··· 16 16 17 17 #include <asm/arch/bitops.h> 18 18 #include <asm/system.h> 19 + #include <asm/atomic.h> 19 20 #include <linux/compiler.h> 20 21 21 22 /* ··· 89 88 * It also implies a memory barrier. 90 89 */ 91 90 92 - extern inline int test_and_set_bit(int nr, void *addr) 91 + extern inline int test_and_set_bit(int nr, volatile unsigned long *addr) 93 92 { 94 93 unsigned int mask, retval; 95 94 unsigned long flags; ··· 97 96 98 97 adr += nr >> 5; 99 98 mask = 1 << (nr & 0x1f); 100 - local_save_flags(flags); 101 - local_irq_disable(); 99 + cris_atomic_save(addr, flags); 102 100 retval = (mask & *adr) != 0; 103 101 *adr |= mask; 102 + cris_atomic_restore(addr, flags); 104 103 local_irq_restore(flags); 105 104 return retval; 106 105 } 107 106 108 - extern inline int __test_and_set_bit(int nr, void *addr) 107 + extern inline int __test_and_set_bit(int nr, volatile unsigned long *addr) 109 108 { 110 109 unsigned int mask, retval; 111 110 unsigned int *adr = (unsigned int *)addr; ··· 132 131 * It also implies a memory barrier. 133 132 */ 134 133 135 - extern inline int test_and_clear_bit(int nr, void *addr) 134 + extern inline int test_and_clear_bit(int nr, volatile unsigned long *addr) 136 135 { 137 136 unsigned int mask, retval; 138 137 unsigned long flags; ··· 140 139 141 140 adr += nr >> 5; 142 141 mask = 1 << (nr & 0x1f); 143 - local_save_flags(flags); 144 - local_irq_disable(); 142 + cris_atomic_save(addr, flags); 145 143 retval = (mask & *adr) != 0; 146 144 *adr &= ~mask; 147 - local_irq_restore(flags); 145 + cris_atomic_restore(addr, flags); 148 146 return retval; 149 147 } 150 148 ··· 157 157 * but actually fail. You must protect multiple accesses with a lock. 158 158 */ 159 159 160 - extern inline int __test_and_clear_bit(int nr, void *addr) 160 + extern inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) 161 161 { 162 162 unsigned int mask, retval; 163 163 unsigned int *adr = (unsigned int *)addr; ··· 177 177 * It also implies a memory barrier. 178 178 */ 179 179 180 - extern inline int test_and_change_bit(int nr, void *addr) 180 + extern inline int test_and_change_bit(int nr, volatile unsigned long *addr) 181 181 { 182 182 unsigned int mask, retval; 183 183 unsigned long flags; 184 184 unsigned int *adr = (unsigned int *)addr; 185 185 adr += nr >> 5; 186 186 mask = 1 << (nr & 0x1f); 187 - local_save_flags(flags); 188 - local_irq_disable(); 187 + cris_atomic_save(addr, flags); 189 188 retval = (mask & *adr) != 0; 190 189 *adr ^= mask; 191 - local_irq_restore(flags); 190 + cris_atomic_restore(addr, flags); 192 191 return retval; 193 192 } 194 193 195 194 /* WARNING: non atomic and it can be reordered! */ 196 195 197 - extern inline int __test_and_change_bit(int nr, void *addr) 196 + extern inline int __test_and_change_bit(int nr, volatile unsigned long *addr) 198 197 { 199 198 unsigned int mask, retval; 200 199 unsigned int *adr = (unsigned int *)addr; ··· 214 215 * This routine doesn't need to be atomic. 215 216 */ 216 217 217 - extern inline int test_bit(int nr, const void *addr) 218 + extern inline int test_bit(int nr, const volatile unsigned long *addr) 218 219 { 219 220 unsigned int mask; 220 221 unsigned int *adr = (unsigned int *)addr; ··· 258 259 * @offset: The bitnumber to start searching at 259 260 * @size: The maximum size to search 260 261 */ 261 - extern inline int find_next_zero_bit (void * addr, int size, int offset) 262 + extern inline int find_next_zero_bit (const unsigned long * addr, int size, int offset) 262 263 { 263 264 unsigned long *p = ((unsigned long *) addr) + (offset >> 5); 264 265 unsigned long result = offset & ~31UL; ··· 300 301 * @offset: The bitnumber to start searching at 301 302 * @size: The maximum size to search 302 303 */ 303 - static __inline__ int find_next_bit(void *addr, int size, int offset) 304 + static __inline__ int find_next_bit(const unsigned long *addr, int size, int offset) 304 305 { 305 306 unsigned long *p = ((unsigned long *) addr) + (offset >> 5); 306 307 unsigned long result = offset & ~31UL; ··· 366 367 #define minix_test_bit(nr,addr) test_bit(nr,addr) 367 368 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) 368 369 369 - extern inline int sched_find_first_bit(unsigned long *b) 370 + extern inline int sched_find_first_bit(const unsigned long *b) 370 371 { 371 372 if (unlikely(b[0])) 372 373 return __ffs(b[0]);
+2 -2
include/asm-cris/kmap_types.h
··· 17 17 KM_PTE1, 18 18 KM_IRQ0, 19 19 KM_IRQ1, 20 - KM_CRYPTO_USER, 21 - KM_CRYPTO_SOFTIRQ, 20 + KM_SOFTIRQ0, 21 + KM_SOFTIRQ1, 22 22 KM_TYPE_NR 23 23 }; 24 24
-7
include/asm-cris/page.h
··· 29 29 */ 30 30 #ifndef __ASSEMBLY__ 31 31 typedef struct { unsigned long pte; } pte_t; 32 - typedef struct { unsigned long pmd; } pmd_t; 33 32 typedef struct { unsigned long pgd; } pgd_t; 34 33 typedef struct { unsigned long pgprot; } pgprot_t; 35 34 #endif 36 35 37 36 #define pte_val(x) ((x).pte) 38 - #define pmd_val(x) ((x).pmd) 39 37 #define pgd_val(x) ((x).pgd) 40 38 #define pgprot_val(x) ((x).pgprot) 41 39 42 40 #define __pte(x) ((pte_t) { (x) } ) 43 - #define __pmd(x) ((pmd_t) { (x) } ) 44 41 #define __pgd(x) ((pgd_t) { (x) } ) 45 42 #define __pgprot(x) ((pgprot_t) { (x) } ) 46 43 ··· 69 72 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) 70 73 71 74 #ifndef __ASSEMBLY__ 72 - 73 - #define BUG() do { \ 74 - printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ 75 - } while (0) 76 75 77 76 /* Pure 2^n version of get_order */ 78 77 static inline int get_order(unsigned long size)
-10
include/asm-cris/pgalloc.h
··· 47 47 48 48 #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) 49 49 50 - /* 51 - * We don't have any real pmd's, and this code never triggers because 52 - * the pgd will always be present.. 53 - */ 54 - 55 - #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) 56 - #define pmd_free(x) do { } while (0) 57 - #define __pmd_free_tlb(tlb,x) do { } while (0) 58 - #define pgd_populate(mm, pmd, pte) BUG() 59 - 60 50 #define check_pgt_cache() do { } while (0) 61 51 62 52 #endif
+9 -33
include/asm-cris/pgtable.h
··· 5 5 #ifndef _CRIS_PGTABLE_H 6 6 #define _CRIS_PGTABLE_H 7 7 8 - #include <asm-generic/4level-fixup.h> 8 + #include <asm/page.h> 9 + #include <asm-generic/pgtable-nopmd.h> 9 10 10 11 #ifndef __ASSEMBLY__ 11 12 #include <linux/config.h> ··· 42 41 * but the define is needed for a generic inline function.) 43 42 */ 44 43 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) 45 - #define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval) 44 + #define set_pgu(pudptr, pudval) (*(pudptr) = pudval) 46 45 47 - /* PMD_SHIFT determines the size of the area a second-level page table can 46 + /* PGDIR_SHIFT determines the size of the area a second-level page table can 48 47 * map. It is equal to the page size times the number of PTE's that fit in 49 48 * a PMD page. A PTE is 4-bytes in CRIS. Hence the following number. 50 49 */ 51 50 52 - #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2)) 53 - #define PMD_SIZE (1UL << PMD_SHIFT) 54 - #define PMD_MASK (~(PMD_SIZE-1)) 55 - 56 - /* PGDIR_SHIFT determines what a third-level page table entry can map. 57 - * Since we fold into a two-level structure, this is the same as PMD_SHIFT. 58 - */ 59 - 60 - #define PGDIR_SHIFT PMD_SHIFT 51 + #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2)) 61 52 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 62 53 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 63 54 ··· 60 67 * divide it by 4 (shift by 2). 61 68 */ 62 69 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2)) 63 - #define PTRS_PER_PMD 1 64 70 #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2)) 65 71 66 72 /* calculate how many PGD entries a user-level program can use ··· 97 105 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 98 106 #define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) 99 107 100 - #define pmd_none(x) (!pmd_val(x)) 108 + #define pmd_none(x) (!pmd_val(x)) 101 109 /* by removing the _PAGE_KERNEL bit from the comparision, the same pmd_bad 102 110 * works for both _PAGE_TABLE and _KERNPG_TABLE pmd entries. 103 111 */ ··· 106 114 #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) 107 115 108 116 #ifndef __ASSEMBLY__ 109 - 110 - /* 111 - * The "pgd_xxx()" functions here are trivial for a folded two-level 112 - * setup: the pgd is never bad, and a pmd always exists (as it's folded 113 - * into the pgd entry) 114 - */ 115 - extern inline int pgd_none(pgd_t pgd) { return 0; } 116 - extern inline int pgd_bad(pgd_t pgd) { return 0; } 117 - extern inline int pgd_present(pgd_t pgd) { return 1; } 118 - extern inline void pgd_clear(pgd_t * pgdp) { } 119 117 120 118 /* 121 119 * The following only work if pte_present() is true. ··· 257 275 #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 258 276 259 277 /* to find an entry in a page-table-directory. */ 260 - #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 278 + #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 261 279 262 280 /* to find an entry in a page-table-directory */ 263 281 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address) ··· 267 285 268 286 /* to find an entry in a kernel page-table-directory */ 269 287 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 270 - 271 - /* Find an entry in the second-level page table.. */ 272 - extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) 273 - { 274 - return (pmd_t *) dir; 275 - } 276 288 277 289 /* Find an entry in the third-level page table.. */ 278 290 #define __pte_offset(address) \ ··· 284 308 285 309 #define pte_ERROR(e) \ 286 310 printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) 287 - #define pmd_ERROR(e) \ 288 - printk("%s:%d: bad pmd %p(%08lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) 289 311 #define pgd_ERROR(e) \ 290 312 printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) 291 313 ··· 321 347 322 348 #define pte_to_pgoff(x) (pte_val(x) >> 6) 323 349 #define pgoff_to_pte(x) __pte(((x) << 6) | _PAGE_FILE) 350 + 351 + typedef pte_t *pte_addr_t; 324 352 325 353 #endif /* __ASSEMBLY__ */ 326 354 #endif /* _CRIS_PGTABLE_H */
-9
include/asm-cris/processor.h
··· 55 55 56 56 #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) 57 57 58 - /* 59 - * Free current thread data structures etc.. 60 - */ 61 - 62 - extern inline void exit_thread(void) 63 - { 64 - /* Nothing needs to be done. */ 65 - } 66 - 67 58 extern unsigned long thread_saved_pc(struct task_struct *tsk); 68 59 69 60 /* Free all resources held by a thread. */
+1 -1
include/asm-cris/thread_info.h
··· 43 43 44 44 #endif 45 45 46 - #define PREEMPT_ACTIVE 0x4000000 46 + #define PREEMPT_ACTIVE 0x10000000 47 47 48 48 /* 49 49 * macros/functions for gaining access to the thread information structure
+1 -1
include/asm-cris/timex.h
··· 14 14 * used so it does not matter. 15 15 */ 16 16 17 - typedef unsigned int cycles_t; 17 + typedef unsigned long long cycles_t; 18 18 19 19 extern inline cycles_t get_cycles(void) 20 20 {
+1 -1
include/asm-cris/types.h
··· 52 52 typedef u32 dma_addr_t; 53 53 typedef u32 dma64_addr_t; 54 54 55 - typedef unsigned int kmem_bufctl_t; 55 + typedef unsigned short kmem_bufctl_t; 56 56 57 57 #endif /* __ASSEMBLY__ */ 58 58
+9 -2
include/asm-cris/unistd.h
··· 288 288 #define __NR_mq_timedreceive (__NR_mq_open+3) 289 289 #define __NR_mq_notify (__NR_mq_open+4) 290 290 #define __NR_mq_getsetattr (__NR_mq_open+5) 291 - 292 - #define NR_syscalls 283 291 + #define __NR_sys_kexec_load 283 292 + #define __NR_waitid 284 293 + /* #define __NR_sys_setaltroot 285 */ 294 + #define __NR_add_key 286 295 + #define __NR_request_key 287 296 + #define __NR_keyctl 288 297 + 298 + #define NR_syscalls 289 299 + 293 300 294 301 295 302 #ifdef __KERNEL__