Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.25 946 lines 25 kB view raw
1/* 2 * linux/arch/arm/mm/proc-xscale.S 3 * 4 * Author: Nicolas Pitre 5 * Created: November 2000 6 * Copyright: (C) 2000, 2001 MontaVista Software Inc. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * MMU functions for the Intel XScale CPUs 13 * 14 * 2001 Aug 21: 15 * some contributions by Brett Gaines <brett.w.gaines@intel.com> 16 * Copyright 2001 by Intel Corp. 17 * 18 * 2001 Sep 08: 19 * Completely revisited, many important fixes 20 * Nicolas Pitre <nico@cam.org> 21 */ 22 23#include <linux/linkage.h> 24#include <linux/init.h> 25#include <asm/assembler.h> 26#include <asm/elf.h> 27#include <asm/pgtable.h> 28#include <asm/pgtable-hwdef.h> 29#include <asm/page.h> 30#include <asm/ptrace.h> 31#include "proc-macros.S" 32 33/* 34 * This is the maximum size of an area which will be flushed. If the area 35 * is larger than this, then we flush the whole cache 36 */ 37#define MAX_AREA_SIZE 32768 38 39/* 40 * the cache line size of the I and D cache 41 */ 42#define CACHELINESIZE 32 43 44/* 45 * the size of the data cache 46 */ 47#define CACHESIZE 32768 48 49/* 50 * Virtual address used to allocate the cache when flushed 51 * 52 * This must be an address range which is _never_ used. It should 53 * apparently have a mapping in the corresponding page table for 54 * compatibility with future CPUs that _could_ require it. For instance we 55 * don't care. 56 * 57 * This must be aligned on a 2*CACHESIZE boundary. The code selects one of 58 * the 2 areas in alternance each time the clean_d_cache macro is used. 59 * Without this the XScale core exhibits cache eviction problems and no one 60 * knows why. 61 * 62 * Reminder: the vector table is located at 0xffff0000-0xffff0fff. 63 */ 64#define CLEAN_ADDR 0xfffe0000 65 66/* 67 * This macro is used to wait for a CP15 write and is needed 68 * when we have to ensure that the last operation to the co-pro 69 * was completed before continuing with operation. 70 */ 71 .macro cpwait, rd 72 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 73 mov \rd, \rd @ wait for completion 74 sub pc, pc, #4 @ flush instruction pipeline 75 .endm 76 77 .macro cpwait_ret, lr, rd 78 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 79 sub pc, \lr, \rd, LSR #32 @ wait for completion and 80 @ flush instruction pipeline 81 .endm 82 83/* 84 * This macro cleans the entire dcache using line allocate. 85 * The main loop has been unrolled to reduce loop overhead. 86 * rd and rs are two scratch registers. 87 */ 88 .macro clean_d_cache, rd, rs 89 ldr \rs, =clean_addr 90 ldr \rd, [\rs] 91 eor \rd, \rd, #CACHESIZE 92 str \rd, [\rs] 93 add \rs, \rd, #CACHESIZE 941: mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 95 add \rd, \rd, #CACHELINESIZE 96 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 97 add \rd, \rd, #CACHELINESIZE 98 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 99 add \rd, \rd, #CACHELINESIZE 100 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 101 add \rd, \rd, #CACHELINESIZE 102 teq \rd, \rs 103 bne 1b 104 .endm 105 106 .data 107clean_addr: .word CLEAN_ADDR 108 109 .text 110 111/* 112 * cpu_xscale_proc_init() 113 * 114 * Nothing too exciting at the moment 115 */ 116ENTRY(cpu_xscale_proc_init) 117 @ enable write buffer coalescing. Some bootloader disable it 118 mrc p15, 0, r1, c1, c0, 1 119 bic r1, r1, #1 120 mcr p15, 0, r1, c1, c0, 1 121 mov pc, lr 122 123/* 124 * cpu_xscale_proc_fin() 125 */ 126ENTRY(cpu_xscale_proc_fin) 127 str lr, [sp, #-4]! 128 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 129 msr cpsr_c, r0 130 bl xscale_flush_kern_cache_all @ clean caches 131 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 132 bic r0, r0, #0x1800 @ ...IZ........... 133 bic r0, r0, #0x0006 @ .............CA. 134 mcr p15, 0, r0, c1, c0, 0 @ disable caches 135 ldr pc, [sp], #4 136 137/* 138 * cpu_xscale_reset(loc) 139 * 140 * Perform a soft reset of the system. Put the CPU into the 141 * same state as it would be if it had been reset, and branch 142 * to what would be the reset vector. 143 * 144 * loc: location to jump to for soft reset 145 * 146 * Beware PXA270 erratum E7. 147 */ 148 .align 5 149ENTRY(cpu_xscale_reset) 150 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 151 msr cpsr_c, r1 @ reset CPSR 152 mcr p15, 0, r1, c10, c4, 1 @ unlock I-TLB 153 mcr p15, 0, r1, c8, c5, 0 @ invalidate I-TLB 154 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 155 bic r1, r1, #0x0086 @ ........B....CA. 156 bic r1, r1, #0x3900 @ ..VIZ..S........ 157 sub pc, pc, #4 @ flush pipeline 158 @ *** cache line aligned *** 159 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 160 bic r1, r1, #0x0001 @ ...............M 161 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB 162 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 163 @ CAUTION: MMU turned off from this point. We count on the pipeline 164 @ already containing those two last instructions to survive. 165 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 166 mov pc, r0 167 168/* 169 * cpu_xscale_do_idle() 170 * 171 * Cause the processor to idle 172 * 173 * For now we do nothing but go to idle mode for every case 174 * 175 * XScale supports clock switching, but using idle mode support 176 * allows external hardware to react to system state changes. 177 */ 178 .align 5 179 180ENTRY(cpu_xscale_do_idle) 181 mov r0, #1 182 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE 183 mov pc, lr 184 185/* ================================= CACHE ================================ */ 186 187/* 188 * flush_user_cache_all() 189 * 190 * Invalidate all cache entries in a particular address 191 * space. 192 */ 193ENTRY(xscale_flush_user_cache_all) 194 /* FALLTHROUGH */ 195 196/* 197 * flush_kern_cache_all() 198 * 199 * Clean and invalidate the entire cache. 200 */ 201ENTRY(xscale_flush_kern_cache_all) 202 mov r2, #VM_EXEC 203 mov ip, #0 204__flush_whole_cache: 205 clean_d_cache r0, r1 206 tst r2, #VM_EXEC 207 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 208 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 209 mov pc, lr 210 211/* 212 * flush_user_cache_range(start, end, vm_flags) 213 * 214 * Invalidate a range of cache entries in the specified 215 * address space. 216 * 217 * - start - start address (may not be aligned) 218 * - end - end address (exclusive, may not be aligned) 219 * - vma - vma_area_struct describing address space 220 */ 221 .align 5 222ENTRY(xscale_flush_user_cache_range) 223 mov ip, #0 224 sub r3, r1, r0 @ calculate total size 225 cmp r3, #MAX_AREA_SIZE 226 bhs __flush_whole_cache 227 2281: tst r2, #VM_EXEC 229 mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line 230 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line 231 mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line 232 add r0, r0, #CACHELINESIZE 233 cmp r0, r1 234 blo 1b 235 tst r2, #VM_EXEC 236 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB 237 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 238 mov pc, lr 239 240/* 241 * coherent_kern_range(start, end) 242 * 243 * Ensure coherency between the Icache and the Dcache in the 244 * region described by start. If you have non-snooping 245 * Harvard caches, you need to implement this function. 246 * 247 * - start - virtual start address 248 * - end - virtual end address 249 * 250 * Note: single I-cache line invalidation isn't used here since 251 * it also trashes the mini I-cache used by JTAG debuggers. 252 */ 253ENTRY(xscale_coherent_kern_range) 254 bic r0, r0, #CACHELINESIZE - 1 2551: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 256 add r0, r0, #CACHELINESIZE 257 cmp r0, r1 258 blo 1b 259 mov r0, #0 260 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 261 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 262 mov pc, lr 263 264/* 265 * coherent_user_range(start, end) 266 * 267 * Ensure coherency between the Icache and the Dcache in the 268 * region described by start. If you have non-snooping 269 * Harvard caches, you need to implement this function. 270 * 271 * - start - virtual start address 272 * - end - virtual end address 273 */ 274ENTRY(xscale_coherent_user_range) 275 bic r0, r0, #CACHELINESIZE - 1 2761: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 277 mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry 278 add r0, r0, #CACHELINESIZE 279 cmp r0, r1 280 blo 1b 281 mov r0, #0 282 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB 283 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 284 mov pc, lr 285 286/* 287 * flush_kern_dcache_page(void *page) 288 * 289 * Ensure no D cache aliasing occurs, either with itself or 290 * the I cache 291 * 292 * - addr - page aligned address 293 */ 294ENTRY(xscale_flush_kern_dcache_page) 295 add r1, r0, #PAGE_SZ 2961: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 297 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 298 add r0, r0, #CACHELINESIZE 299 cmp r0, r1 300 blo 1b 301 mov r0, #0 302 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 303 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 304 mov pc, lr 305 306/* 307 * dma_inv_range(start, end) 308 * 309 * Invalidate (discard) the specified virtual address range. 310 * May not write back any entries. If 'start' or 'end' 311 * are not cache line aligned, those lines must be written 312 * back. 313 * 314 * - start - virtual start address 315 * - end - virtual end address 316 */ 317ENTRY(xscale_dma_inv_range) 318 tst r0, #CACHELINESIZE - 1 319 bic r0, r0, #CACHELINESIZE - 1 320 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 321 tst r1, #CACHELINESIZE - 1 322 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 3231: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 324 add r0, r0, #CACHELINESIZE 325 cmp r0, r1 326 blo 1b 327 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 328 mov pc, lr 329 330/* 331 * dma_clean_range(start, end) 332 * 333 * Clean the specified virtual address range. 334 * 335 * - start - virtual start address 336 * - end - virtual end address 337 */ 338ENTRY(xscale_dma_clean_range) 339 bic r0, r0, #CACHELINESIZE - 1 3401: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 341 add r0, r0, #CACHELINESIZE 342 cmp r0, r1 343 blo 1b 344 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 345 mov pc, lr 346 347/* 348 * dma_flush_range(start, end) 349 * 350 * Clean and invalidate the specified virtual address range. 351 * 352 * - start - virtual start address 353 * - end - virtual end address 354 */ 355ENTRY(xscale_dma_flush_range) 356 bic r0, r0, #CACHELINESIZE - 1 3571: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 358 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 359 add r0, r0, #CACHELINESIZE 360 cmp r0, r1 361 blo 1b 362 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 363 mov pc, lr 364 365ENTRY(xscale_cache_fns) 366 .long xscale_flush_kern_cache_all 367 .long xscale_flush_user_cache_all 368 .long xscale_flush_user_cache_range 369 .long xscale_coherent_kern_range 370 .long xscale_coherent_user_range 371 .long xscale_flush_kern_dcache_page 372 .long xscale_dma_inv_range 373 .long xscale_dma_clean_range 374 .long xscale_dma_flush_range 375 376/* 377 * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't 378 * clear the dirty bits, which means that if we invalidate a dirty line, 379 * the dirty data can still be written back to external memory later on. 380 * 381 * The recommended workaround is to always do a clean D-cache line before 382 * doing an invalidate D-cache line, so on the affected processors, 383 * dma_inv_range() is implemented as dma_flush_range(). 384 * 385 * See erratum #25 of "Intel 80200 Processor Specification Update", 386 * revision January 22, 2003, available at: 387 * http://www.intel.com/design/iio/specupdt/273415.htm 388 */ 389ENTRY(xscale_80200_A0_A1_cache_fns) 390 .long xscale_flush_kern_cache_all 391 .long xscale_flush_user_cache_all 392 .long xscale_flush_user_cache_range 393 .long xscale_coherent_kern_range 394 .long xscale_coherent_user_range 395 .long xscale_flush_kern_dcache_page 396 .long xscale_dma_flush_range 397 .long xscale_dma_clean_range 398 .long xscale_dma_flush_range 399 400ENTRY(cpu_xscale_dcache_clean_area) 4011: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 402 add r0, r0, #CACHELINESIZE 403 subs r1, r1, #CACHELINESIZE 404 bhi 1b 405 mov pc, lr 406 407/* =============================== PageTable ============================== */ 408 409#define PTE_CACHE_WRITE_ALLOCATE 0 410 411/* 412 * cpu_xscale_switch_mm(pgd) 413 * 414 * Set the translation base pointer to be as described by pgd. 415 * 416 * pgd: new page tables 417 */ 418 .align 5 419ENTRY(cpu_xscale_switch_mm) 420 clean_d_cache r1, r2 421 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 422 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 423 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 424 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 425 cpwait_ret lr, ip 426 427/* 428 * cpu_xscale_set_pte_ext(ptep, pte, ext) 429 * 430 * Set a PTE and flush it out 431 * 432 * Errata 40: must set memory to write-through for user read-only pages. 433 */ 434 .align 5 435ENTRY(cpu_xscale_set_pte_ext) 436 str r1, [r0], #-2048 @ linux version 437 438 bic r2, r1, #0xff0 439 orr r2, r2, #PTE_TYPE_EXT @ extended page 440 441 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY 442 443 tst r3, #L_PTE_USER @ User? 444 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w 445 446 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? 447 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w 448 @ combined with user -> user r/w 449 450 @ 451 @ Handle the X bit. We want to set this bit for the minicache 452 @ (U = E = B = W = 0, C = 1) or when write allocate is enabled, 453 @ and we have a writeable, cacheable region. If we ignore the 454 @ U and E bits, we can allow user space to use the minicache as 455 @ well. 456 @ 457 @ X = (C & ~W & ~B) | (C & W & B & write_allocate) 458 @ 459 eor ip, r1, #L_PTE_CACHEABLE 460 tst ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE 461#if PTE_CACHE_WRITE_ALLOCATE 462 eorne ip, r1, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE 463 tstne ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE 464#endif 465 orreq r2, r2, #PTE_EXT_TEX(1) 466 467 @ 468 @ Erratum 40: The B bit must be cleared for a user read-only 469 @ cacheable page. 470 @ 471 @ B = B & ~(U & C & ~W) 472 @ 473 and ip, r1, #L_PTE_USER | L_PTE_WRITE | L_PTE_CACHEABLE 474 teq ip, #L_PTE_USER | L_PTE_CACHEABLE 475 biceq r2, r2, #PTE_BUFFERABLE 476 477 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? 478 movne r2, #0 @ no -> fault 479 480 str r2, [r0] @ hardware version 481 mov ip, #0 482 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line 483 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 484 mov pc, lr 485 486 487 .ltorg 488 489 .align 490 491 __INIT 492 493 .type __xscale_setup, #function 494__xscale_setup: 495 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB 496 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 497 mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs 498 mov r0, #1 << 6 @ cp6 for IOP3xx and Bulverde 499 orr r0, r0, #1 << 13 @ Its undefined whether this 500 mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes 501 502 adr r5, xscale_crval 503 ldmia r5, {r5, r6} 504 mrc p15, 0, r0, c1, c0, 0 @ get control register 505 bic r0, r0, r5 506 orr r0, r0, r6 507 mov pc, lr 508 .size __xscale_setup, . - __xscale_setup 509 510 /* 511 * R 512 * .RVI ZFRS BLDP WCAM 513 * ..11 1.01 .... .101 514 * 515 */ 516 .type xscale_crval, #object 517xscale_crval: 518 crval clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900 519 520 __INITDATA 521 522/* 523 * Purpose : Function pointers used to access above functions - all calls 524 * come through these 525 */ 526 527 .type xscale_processor_functions, #object 528ENTRY(xscale_processor_functions) 529 .word v5t_early_abort 530 .word cpu_xscale_proc_init 531 .word cpu_xscale_proc_fin 532 .word cpu_xscale_reset 533 .word cpu_xscale_do_idle 534 .word cpu_xscale_dcache_clean_area 535 .word cpu_xscale_switch_mm 536 .word cpu_xscale_set_pte_ext 537 .size xscale_processor_functions, . - xscale_processor_functions 538 539 .section ".rodata" 540 541 .type cpu_arch_name, #object 542cpu_arch_name: 543 .asciz "armv5te" 544 .size cpu_arch_name, . - cpu_arch_name 545 546 .type cpu_elf_name, #object 547cpu_elf_name: 548 .asciz "v5" 549 .size cpu_elf_name, . - cpu_elf_name 550 551 .type cpu_80200_A0_A1_name, #object 552cpu_80200_A0_A1_name: 553 .asciz "XScale-80200 A0/A1" 554 .size cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name 555 556 .type cpu_80200_name, #object 557cpu_80200_name: 558 .asciz "XScale-80200" 559 .size cpu_80200_name, . - cpu_80200_name 560 561 .type cpu_80219_name, #object 562cpu_80219_name: 563 .asciz "XScale-80219" 564 .size cpu_80219_name, . - cpu_80219_name 565 566 .type cpu_8032x_name, #object 567cpu_8032x_name: 568 .asciz "XScale-IOP8032x Family" 569 .size cpu_8032x_name, . - cpu_8032x_name 570 571 .type cpu_8033x_name, #object 572cpu_8033x_name: 573 .asciz "XScale-IOP8033x Family" 574 .size cpu_8033x_name, . - cpu_8033x_name 575 576 .type cpu_pxa250_name, #object 577cpu_pxa250_name: 578 .asciz "XScale-PXA250" 579 .size cpu_pxa250_name, . - cpu_pxa250_name 580 581 .type cpu_pxa210_name, #object 582cpu_pxa210_name: 583 .asciz "XScale-PXA210" 584 .size cpu_pxa210_name, . - cpu_pxa210_name 585 586 .type cpu_ixp42x_name, #object 587cpu_ixp42x_name: 588 .asciz "XScale-IXP42x Family" 589 .size cpu_ixp42x_name, . - cpu_ixp42x_name 590 591 .type cpu_ixp43x_name, #object 592cpu_ixp43x_name: 593 .asciz "XScale-IXP43x Family" 594 .size cpu_ixp43x_name, . - cpu_ixp43x_name 595 596 .type cpu_ixp46x_name, #object 597cpu_ixp46x_name: 598 .asciz "XScale-IXP46x Family" 599 .size cpu_ixp46x_name, . - cpu_ixp46x_name 600 601 .type cpu_ixp2400_name, #object 602cpu_ixp2400_name: 603 .asciz "XScale-IXP2400" 604 .size cpu_ixp2400_name, . - cpu_ixp2400_name 605 606 .type cpu_ixp2800_name, #object 607cpu_ixp2800_name: 608 .asciz "XScale-IXP2800" 609 .size cpu_ixp2800_name, . - cpu_ixp2800_name 610 611 .type cpu_pxa255_name, #object 612cpu_pxa255_name: 613 .asciz "XScale-PXA255" 614 .size cpu_pxa255_name, . - cpu_pxa255_name 615 616 .type cpu_pxa270_name, #object 617cpu_pxa270_name: 618 .asciz "XScale-PXA270" 619 .size cpu_pxa270_name, . - cpu_pxa270_name 620 621 .align 622 623 .section ".proc.info.init", #alloc, #execinstr 624 625 .type __80200_A0_A1_proc_info,#object 626__80200_A0_A1_proc_info: 627 .long 0x69052000 628 .long 0xfffffffe 629 .long PMD_TYPE_SECT | \ 630 PMD_SECT_BUFFERABLE | \ 631 PMD_SECT_CACHEABLE | \ 632 PMD_SECT_AP_WRITE | \ 633 PMD_SECT_AP_READ 634 .long PMD_TYPE_SECT | \ 635 PMD_SECT_AP_WRITE | \ 636 PMD_SECT_AP_READ 637 b __xscale_setup 638 .long cpu_arch_name 639 .long cpu_elf_name 640 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 641 .long cpu_80200_name 642 .long xscale_processor_functions 643 .long v4wbi_tlb_fns 644 .long xscale_mc_user_fns 645 .long xscale_80200_A0_A1_cache_fns 646 .size __80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info 647 648 .type __80200_proc_info,#object 649__80200_proc_info: 650 .long 0x69052000 651 .long 0xfffffff0 652 .long PMD_TYPE_SECT | \ 653 PMD_SECT_BUFFERABLE | \ 654 PMD_SECT_CACHEABLE | \ 655 PMD_SECT_AP_WRITE | \ 656 PMD_SECT_AP_READ 657 .long PMD_TYPE_SECT | \ 658 PMD_SECT_AP_WRITE | \ 659 PMD_SECT_AP_READ 660 b __xscale_setup 661 .long cpu_arch_name 662 .long cpu_elf_name 663 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 664 .long cpu_80200_name 665 .long xscale_processor_functions 666 .long v4wbi_tlb_fns 667 .long xscale_mc_user_fns 668 .long xscale_cache_fns 669 .size __80200_proc_info, . - __80200_proc_info 670 671 .type __80219_proc_info,#object 672__80219_proc_info: 673 .long 0x69052e20 674 .long 0xffffffe0 675 .long PMD_TYPE_SECT | \ 676 PMD_SECT_BUFFERABLE | \ 677 PMD_SECT_CACHEABLE | \ 678 PMD_SECT_AP_WRITE | \ 679 PMD_SECT_AP_READ 680 .long PMD_TYPE_SECT | \ 681 PMD_SECT_AP_WRITE | \ 682 PMD_SECT_AP_READ 683 b __xscale_setup 684 .long cpu_arch_name 685 .long cpu_elf_name 686 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 687 .long cpu_80219_name 688 .long xscale_processor_functions 689 .long v4wbi_tlb_fns 690 .long xscale_mc_user_fns 691 .long xscale_cache_fns 692 .size __80219_proc_info, . - __80219_proc_info 693 694 .type __8032x_proc_info,#object 695__8032x_proc_info: 696 .long 0x69052420 697 .long 0xfffff7e0 698 .long PMD_TYPE_SECT | \ 699 PMD_SECT_BUFFERABLE | \ 700 PMD_SECT_CACHEABLE | \ 701 PMD_SECT_AP_WRITE | \ 702 PMD_SECT_AP_READ 703 .long PMD_TYPE_SECT | \ 704 PMD_SECT_AP_WRITE | \ 705 PMD_SECT_AP_READ 706 b __xscale_setup 707 .long cpu_arch_name 708 .long cpu_elf_name 709 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 710 .long cpu_8032x_name 711 .long xscale_processor_functions 712 .long v4wbi_tlb_fns 713 .long xscale_mc_user_fns 714 .long xscale_cache_fns 715 .size __8032x_proc_info, . - __8032x_proc_info 716 717 .type __8033x_proc_info,#object 718__8033x_proc_info: 719 .long 0x69054010 720 .long 0xfffffd30 721 .long PMD_TYPE_SECT | \ 722 PMD_SECT_BUFFERABLE | \ 723 PMD_SECT_CACHEABLE | \ 724 PMD_SECT_AP_WRITE | \ 725 PMD_SECT_AP_READ 726 .long PMD_TYPE_SECT | \ 727 PMD_SECT_AP_WRITE | \ 728 PMD_SECT_AP_READ 729 b __xscale_setup 730 .long cpu_arch_name 731 .long cpu_elf_name 732 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 733 .long cpu_8033x_name 734 .long xscale_processor_functions 735 .long v4wbi_tlb_fns 736 .long xscale_mc_user_fns 737 .long xscale_cache_fns 738 .size __8033x_proc_info, . - __8033x_proc_info 739 740 .type __pxa250_proc_info,#object 741__pxa250_proc_info: 742 .long 0x69052100 743 .long 0xfffff7f0 744 .long PMD_TYPE_SECT | \ 745 PMD_SECT_BUFFERABLE | \ 746 PMD_SECT_CACHEABLE | \ 747 PMD_SECT_AP_WRITE | \ 748 PMD_SECT_AP_READ 749 .long PMD_TYPE_SECT | \ 750 PMD_SECT_AP_WRITE | \ 751 PMD_SECT_AP_READ 752 b __xscale_setup 753 .long cpu_arch_name 754 .long cpu_elf_name 755 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 756 .long cpu_pxa250_name 757 .long xscale_processor_functions 758 .long v4wbi_tlb_fns 759 .long xscale_mc_user_fns 760 .long xscale_cache_fns 761 .size __pxa250_proc_info, . - __pxa250_proc_info 762 763 .type __pxa210_proc_info,#object 764__pxa210_proc_info: 765 .long 0x69052120 766 .long 0xfffff3f0 767 .long PMD_TYPE_SECT | \ 768 PMD_SECT_BUFFERABLE | \ 769 PMD_SECT_CACHEABLE | \ 770 PMD_SECT_AP_WRITE | \ 771 PMD_SECT_AP_READ 772 .long PMD_TYPE_SECT | \ 773 PMD_SECT_AP_WRITE | \ 774 PMD_SECT_AP_READ 775 b __xscale_setup 776 .long cpu_arch_name 777 .long cpu_elf_name 778 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 779 .long cpu_pxa210_name 780 .long xscale_processor_functions 781 .long v4wbi_tlb_fns 782 .long xscale_mc_user_fns 783 .long xscale_cache_fns 784 .size __pxa210_proc_info, . - __pxa210_proc_info 785 786 .type __ixp2400_proc_info, #object 787__ixp2400_proc_info: 788 .long 0x69054190 789 .long 0xfffffff0 790 .long PMD_TYPE_SECT | \ 791 PMD_SECT_BUFFERABLE | \ 792 PMD_SECT_CACHEABLE | \ 793 PMD_SECT_AP_WRITE | \ 794 PMD_SECT_AP_READ 795 .long PMD_TYPE_SECT | \ 796 PMD_SECT_AP_WRITE | \ 797 PMD_SECT_AP_READ 798 b __xscale_setup 799 .long cpu_arch_name 800 .long cpu_elf_name 801 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 802 .long cpu_ixp2400_name 803 .long xscale_processor_functions 804 .long v4wbi_tlb_fns 805 .long xscale_mc_user_fns 806 .long xscale_cache_fns 807 .size __ixp2400_proc_info, . - __ixp2400_proc_info 808 809 .type __ixp2800_proc_info, #object 810__ixp2800_proc_info: 811 .long 0x690541a0 812 .long 0xfffffff0 813 .long PMD_TYPE_SECT | \ 814 PMD_SECT_BUFFERABLE | \ 815 PMD_SECT_CACHEABLE | \ 816 PMD_SECT_AP_WRITE | \ 817 PMD_SECT_AP_READ 818 .long PMD_TYPE_SECT | \ 819 PMD_SECT_AP_WRITE | \ 820 PMD_SECT_AP_READ 821 b __xscale_setup 822 .long cpu_arch_name 823 .long cpu_elf_name 824 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 825 .long cpu_ixp2800_name 826 .long xscale_processor_functions 827 .long v4wbi_tlb_fns 828 .long xscale_mc_user_fns 829 .long xscale_cache_fns 830 .size __ixp2800_proc_info, . - __ixp2800_proc_info 831 832 .type __ixp42x_proc_info, #object 833__ixp42x_proc_info: 834 .long 0x690541c0 835 .long 0xffffffc0 836 .long PMD_TYPE_SECT | \ 837 PMD_SECT_BUFFERABLE | \ 838 PMD_SECT_CACHEABLE | \ 839 PMD_SECT_AP_WRITE | \ 840 PMD_SECT_AP_READ 841 .long PMD_TYPE_SECT | \ 842 PMD_SECT_AP_WRITE | \ 843 PMD_SECT_AP_READ 844 b __xscale_setup 845 .long cpu_arch_name 846 .long cpu_elf_name 847 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 848 .long cpu_ixp42x_name 849 .long xscale_processor_functions 850 .long v4wbi_tlb_fns 851 .long xscale_mc_user_fns 852 .long xscale_cache_fns 853 .size __ixp42x_proc_info, . - __ixp42x_proc_info 854 855 .type __ixp43x_proc_info, #object 856__ixp43x_proc_info: 857 .long 0x69054040 858 .long 0xfffffff0 859 .long PMD_TYPE_SECT | \ 860 PMD_SECT_BUFFERABLE | \ 861 PMD_SECT_CACHEABLE | \ 862 PMD_SECT_AP_WRITE | \ 863 PMD_SECT_AP_READ 864 .long PMD_TYPE_SECT | \ 865 PMD_SECT_AP_WRITE | \ 866 PMD_SECT_AP_READ 867 b __xscale_setup 868 .long cpu_arch_name 869 .long cpu_elf_name 870 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 871 .long cpu_ixp43x_name 872 .long xscale_processor_functions 873 .long v4wbi_tlb_fns 874 .long xscale_mc_user_fns 875 .long xscale_cache_fns 876 .size __ixp43x_proc_info, . - __ixp43x_proc_info 877 878 .type __ixp46x_proc_info, #object 879__ixp46x_proc_info: 880 .long 0x69054200 881 .long 0xffffff00 882 .long PMD_TYPE_SECT | \ 883 PMD_SECT_BUFFERABLE | \ 884 PMD_SECT_CACHEABLE | \ 885 PMD_SECT_AP_WRITE | \ 886 PMD_SECT_AP_READ 887 .long PMD_TYPE_SECT | \ 888 PMD_SECT_AP_WRITE | \ 889 PMD_SECT_AP_READ 890 b __xscale_setup 891 .long cpu_arch_name 892 .long cpu_elf_name 893 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 894 .long cpu_ixp46x_name 895 .long xscale_processor_functions 896 .long v4wbi_tlb_fns 897 .long xscale_mc_user_fns 898 .long xscale_cache_fns 899 .size __ixp46x_proc_info, . - __ixp46x_proc_info 900 901 .type __pxa255_proc_info,#object 902__pxa255_proc_info: 903 .long 0x69052d00 904 .long 0xfffffff0 905 .long PMD_TYPE_SECT | \ 906 PMD_SECT_BUFFERABLE | \ 907 PMD_SECT_CACHEABLE | \ 908 PMD_SECT_AP_WRITE | \ 909 PMD_SECT_AP_READ 910 .long PMD_TYPE_SECT | \ 911 PMD_SECT_AP_WRITE | \ 912 PMD_SECT_AP_READ 913 b __xscale_setup 914 .long cpu_arch_name 915 .long cpu_elf_name 916 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 917 .long cpu_pxa255_name 918 .long xscale_processor_functions 919 .long v4wbi_tlb_fns 920 .long xscale_mc_user_fns 921 .long xscale_cache_fns 922 .size __pxa255_proc_info, . - __pxa255_proc_info 923 924 .type __pxa270_proc_info,#object 925__pxa270_proc_info: 926 .long 0x69054110 927 .long 0xfffffff0 928 .long PMD_TYPE_SECT | \ 929 PMD_SECT_BUFFERABLE | \ 930 PMD_SECT_CACHEABLE | \ 931 PMD_SECT_AP_WRITE | \ 932 PMD_SECT_AP_READ 933 .long PMD_TYPE_SECT | \ 934 PMD_SECT_AP_WRITE | \ 935 PMD_SECT_AP_READ 936 b __xscale_setup 937 .long cpu_arch_name 938 .long cpu_elf_name 939 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 940 .long cpu_pxa270_name 941 .long xscale_processor_functions 942 .long v4wbi_tlb_fns 943 .long xscale_mc_user_fns 944 .long xscale_cache_fns 945 .size __pxa270_proc_info, . - __pxa270_proc_info 946