Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'parisc-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:
"Lots of small fixes and enhancements, most noteably:

- Many TLB and cache flush optimizations (Dave)

- Fixed HPMC/crash handler on 64-bit kernel (Dave and myself)

- Added alternative infrastructre. The kernel now live-patches itself
for various situations, e.g. replace SMP code when running on one
CPU only or drop cache flushes when system has no cache installed.

- vmlinuz now contains a full copy of the compressed vmlinux file.
This simplifies debugging the currently booted kernel.

- Unused driver removal (Christoph)

- Reduced warnings of Dino PCI bridge when running in qemu

- Removed gcc version check (Masahiro)"

* 'parisc-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: (23 commits)
parisc: Retrieve and display the PDC PAT capabilities
parisc: Optimze cache flush algorithms
parisc: Remove pte_inserted define
parisc: Add PDC PAT cell_info() and pd_get_pdc_revisions() functions
parisc: Drop two instructions from pte lookup code
parisc: Use zdep for shlw macro on PA1.1 and PA2.0
parisc: Add alternative coding infrastructure
parisc: Include compressed vmlinux file in vmlinuz boot kernel
extract-vmlinux: Check for uncompressed image as fallback
parisc: Fix address in HPMC IVA
parisc: Fix exported address of os_hpmc handler
parisc: Fix map_pages() to not overwrite existing pte entries
parisc: Purge TLB entries after updating page table entry and set page accessed flag in TLB handler
parisc: Release spinlocks using ordered store
parisc: Ratelimit dino stuck interrupt warnings
parisc: dino: Utilize DINO_MASK_IRQ() macro
parisc: Clean up crash header output
parisc: Add SYSTEM_INFO and REGISTER TOC PAT functions
parisc: Remove PTE load and fault check from L2_ptep macro
parisc: Reorder TLB flush timing calculation
...

+736 -429
-9
arch/parisc/Makefile
··· 156 156 @echo ' copy to $$(INSTALL_PATH)' 157 157 @echo ' zinstall - Install compressed vmlinuz kernel' 158 158 endef 159 - 160 - # we require gcc 3.3 or above to compile the kernel 161 - archprepare: checkbin 162 - checkbin: 163 - @if test "$(cc-version)" -lt "0303"; then \ 164 - echo -n "Sorry, GCC v3.3 or above is required to build " ; \ 165 - echo "the kernel." ; \ 166 - false ; \ 167 - fi
+1 -3
arch/parisc/boot/compressed/Makefile
··· 14 14 15 15 KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER 16 16 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING 17 - KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks 17 + KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -fno-builtin-printf 18 18 KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os 19 19 ifndef CONFIG_64BIT 20 20 KBUILD_CFLAGS += -mfast-indirect-calls ··· 22 22 23 23 OBJECTS += $(obj)/head.o $(obj)/real2.o $(obj)/firmware.o $(obj)/misc.o $(obj)/piggy.o 24 24 25 - # LDFLAGS_vmlinux := -X --whole-archive -e startup -T 26 25 LDFLAGS_vmlinux := -X -e startup --as-needed -T 27 26 $(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) $(LIBGCC) 28 27 $(call if_changed,ld) ··· 54 55 CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER 55 56 $(obj)/vmlinux.lds: $(obj)/sizes.h 56 57 57 - OBJCOPYFLAGS_vmlinux.bin := -O binary -R .comment -S 58 58 $(obj)/vmlinux.bin: vmlinux 59 59 $(call if_changed,objcopy) 60 60
+77 -18
arch/parisc/boot/compressed/misc.c
··· 5 5 */ 6 6 7 7 #include <linux/uaccess.h> 8 + #include <linux/elf.h> 8 9 #include <asm/unaligned.h> 9 10 #include <asm/page.h> 10 11 #include "sizes.h" ··· 228 227 asm ("sync"); 229 228 } 230 229 230 + static void parse_elf(void *output) 231 + { 232 + #ifdef CONFIG_64BIT 233 + Elf64_Ehdr ehdr; 234 + Elf64_Phdr *phdrs, *phdr; 235 + #else 236 + Elf32_Ehdr ehdr; 237 + Elf32_Phdr *phdrs, *phdr; 238 + #endif 239 + void *dest; 240 + int i; 241 + 242 + memcpy(&ehdr, output, sizeof(ehdr)); 243 + if (ehdr.e_ident[EI_MAG0] != ELFMAG0 || 244 + ehdr.e_ident[EI_MAG1] != ELFMAG1 || 245 + ehdr.e_ident[EI_MAG2] != ELFMAG2 || 246 + ehdr.e_ident[EI_MAG3] != ELFMAG3) { 247 + error("Kernel is not a valid ELF file"); 248 + return; 249 + } 250 + 251 + #ifdef DEBUG 252 + printf("Parsing ELF... "); 253 + #endif 254 + 255 + phdrs = malloc(sizeof(*phdrs) * ehdr.e_phnum); 256 + if (!phdrs) 257 + error("Failed to allocate space for phdrs"); 258 + 259 + memcpy(phdrs, output + ehdr.e_phoff, sizeof(*phdrs) * ehdr.e_phnum); 260 + 261 + for (i = 0; i < ehdr.e_phnum; i++) { 262 + phdr = &phdrs[i]; 263 + 264 + switch (phdr->p_type) { 265 + case PT_LOAD: 266 + dest = (void *)((unsigned long) phdr->p_paddr & 267 + (__PAGE_OFFSET_DEFAULT-1)); 268 + memmove(dest, output + phdr->p_offset, phdr->p_filesz); 269 + break; 270 + default: 271 + break; 272 + } 273 + } 274 + 275 + free(phdrs); 276 + } 277 + 231 278 unsigned long decompress_kernel(unsigned int started_wide, 232 279 unsigned int command_line, 233 280 const unsigned int rd_start, 234 281 const unsigned int rd_end) 235 282 { 236 283 char *output; 237 - unsigned long len, len_all; 284 + unsigned long vmlinux_addr, vmlinux_len; 285 + unsigned long kernel_addr, kernel_len; 238 286 239 287 #ifdef CONFIG_64BIT 240 288 parisc_narrow_firmware = 0; ··· 291 241 292 242 set_firmware_width_unlocked(); 293 243 294 - putchar('U'); /* if you get this p and no more, string storage */ 244 + putchar('D'); /* if you get this D and no more, string storage */ 295 245 /* in $GLOBAL$ is wrong or %dp is wrong */ 296 - puts("ncompressing ...\n"); 246 + puts("ecompressing Linux... "); 297 247 298 - output = (char *) KERNEL_BINARY_TEXT_START; 299 - len_all = __pa(SZ_end) - __pa(SZparisc_kernel_start); 300 - 301 - if ((unsigned long) &_startcode_end > (unsigned long) output) 248 + /* where the final bits are stored */ 249 + kernel_addr = KERNEL_BINARY_TEXT_START; 250 + kernel_len = __pa(SZ_end) - __pa(SZparisc_kernel_start); 251 + if ((unsigned long) &_startcode_end > kernel_addr) 302 252 error("Bootcode overlaps kernel code"); 303 253 304 - len = get_unaligned_le32(&output_len); 305 - if (len > len_all) 306 - error("Output len too big."); 307 - else 308 - memset(&output[len], 0, len_all - len); 254 + /* 255 + * Calculate addr to where the vmlinux ELF file shall be decompressed. 256 + * Assembly code in head.S positioned the stack directly behind bss, so 257 + * leave 2 MB for the stack. 258 + */ 259 + vmlinux_addr = (unsigned long) &_ebss + 2*1024*1024; 260 + vmlinux_len = get_unaligned_le32(&output_len); 261 + output = (char *) vmlinux_addr; 309 262 310 263 /* 311 264 * Initialize free_mem_ptr and free_mem_end_ptr. 312 265 */ 313 - free_mem_ptr = (unsigned long) &_ebss; 314 - free_mem_ptr += 2*1024*1024; /* leave 2 MB for stack */ 266 + free_mem_ptr = vmlinux_addr + vmlinux_len; 315 267 316 268 /* Limit memory for bootoader to 1GB */ 317 269 #define ARTIFICIAL_LIMIT (1*1024*1024*1024) ··· 327 275 free_mem_end_ptr = rd_start; 328 276 #endif 329 277 278 + if (free_mem_ptr >= free_mem_end_ptr) 279 + error("Kernel too big for machine."); 280 + 330 281 #ifdef DEBUG 282 + printf("\n"); 331 283 printf("startcode_end = %x\n", &_startcode_end); 332 284 printf("commandline = %x\n", command_line); 333 285 printf("rd_start = %x\n", rd_start); ··· 343 287 printf("input_data = %x\n", input_data); 344 288 printf("input_len = %x\n", input_len); 345 289 printf("output = %x\n", output); 346 - printf("output_len = %x\n", len); 347 - printf("output_max = %x\n", len_all); 290 + printf("output_len = %x\n", vmlinux_len); 291 + printf("kernel_addr = %x\n", kernel_addr); 292 + printf("kernel_len = %x\n", kernel_len); 348 293 #endif 349 294 350 295 __decompress(input_data, input_len, NULL, NULL, 351 296 output, 0, NULL, error); 297 + parse_elf(output); 352 298 353 - flush_data_cache(output, len); 299 + output = (char *) kernel_addr; 300 + flush_data_cache(output, kernel_len); 354 301 355 - printf("Booting kernel ...\n\n"); 302 + printf("done.\nBooting the kernel.\n"); 356 303 357 304 return (unsigned long) output; 358 305 }
+6 -4
arch/parisc/boot/compressed/vmlinux.lds.S
··· 42 42 #endif 43 43 _startcode_end = .; 44 44 45 + /* vmlinux.bin.gz is here */ 46 + . = ALIGN(8); 47 + .rodata.compressed : { 48 + *(.rodata.compressed) 49 + } 50 + 45 51 /* bootloader code and data starts behind area of extracted kernel */ 46 52 . = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START); 47 53 ··· 72 66 *(.rodata) /* read-only data */ 73 67 *(.rodata.*) 74 68 _erodata = . ; 75 - } 76 - . = ALIGN(8); 77 - .rodata.compressed : { 78 - *(.rodata.compressed) 79 69 } 80 70 . = ALIGN(8); 81 71 .bss : {
+47
arch/parisc/include/asm/alternative.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __ASM_PARISC_ALTERNATIVE_H 3 + #define __ASM_PARISC_ALTERNATIVE_H 4 + 5 + #define ALT_COND_NO_SMP 0x01 /* when running UP instead of SMP */ 6 + #define ALT_COND_NO_DCACHE 0x02 /* if system has no d-cache */ 7 + #define ALT_COND_NO_ICACHE 0x04 /* if system has no i-cache */ 8 + #define ALT_COND_NO_SPLIT_TLB 0x08 /* if split_tlb == 0 */ 9 + #define ALT_COND_NO_IOC_FDC 0x10 /* if I/O cache does not need flushes */ 10 + 11 + #define INSN_PxTLB 0x02 /* modify pdtlb, pitlb */ 12 + #define INSN_NOP 0x08000240 /* nop */ 13 + 14 + #ifndef __ASSEMBLY__ 15 + 16 + #include <linux/init.h> 17 + #include <linux/types.h> 18 + #include <linux/stddef.h> 19 + #include <linux/stringify.h> 20 + 21 + struct alt_instr { 22 + s32 orig_offset; /* offset to original instructions */ 23 + u32 len; /* end of original instructions */ 24 + u32 cond; /* see ALT_COND_XXX */ 25 + u32 replacement; /* replacement instruction or code */ 26 + }; 27 + 28 + void set_kernel_text_rw(int enable_read_write); 29 + 30 + /* Alternative SMP implementation. */ 31 + #define ALTERNATIVE(cond, replacement) "!0:" \ 32 + ".section .altinstructions, \"aw\" !" \ 33 + ".word (0b-4-.), 1, " __stringify(cond) "," \ 34 + __stringify(replacement) " !" \ 35 + ".previous" 36 + 37 + #else 38 + 39 + #define ALTERNATIVE(from, to, cond, replacement)\ 40 + .section .altinstructions, "aw" ! \ 41 + .word (from - .), (to - from)/4 ! \ 42 + .word cond, replacement ! \ 43 + .previous 44 + 45 + #endif /* __ASSEMBLY__ */ 46 + 47 + #endif /* __ASM_PARISC_ALTERNATIVE_H */
+1 -8
arch/parisc/include/asm/assembly.h
··· 129 129 .macro debug value 130 130 .endm 131 131 132 - 133 - /* Shift Left - note the r and t can NOT be the same! */ 134 - .macro shl r, sa, t 135 - dep,z \r, 31-(\sa), 32-(\sa), \t 136 - .endm 137 - 138 - /* The PA 2.0 shift left */ 139 132 .macro shlw r, sa, t 140 - depw,z \r, 31-(\sa), 32-(\sa), \t 133 + zdep \r, 31-(\sa), 32-(\sa), \t 141 134 .endm 142 135 143 136 /* And the PA 2.0W shift left */
+19 -3
arch/parisc/include/asm/cache.h
··· 6 6 #ifndef __ARCH_PARISC_CACHE_H 7 7 #define __ARCH_PARISC_CACHE_H 8 8 9 + #include <asm/alternative.h> 9 10 10 11 /* 11 12 * PA 2.0 processors have 64 and 128-byte L2 cachelines; PA 1.1 processors ··· 42 41 extern struct pdc_cache_info cache_info; 43 42 void parisc_setup_cache_timing(void); 44 43 45 - #define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" : : "r" (addr)); 46 - #define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" : : "r" (addr)); 47 - #define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" : : "r" (addr)); 44 + #define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" \ 45 + ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \ 46 + : : "r" (addr)) 47 + #define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" \ 48 + ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \ 49 + ALTERNATIVE(ALT_COND_NO_SPLIT_TLB, INSN_NOP) \ 50 + : : "r" (addr)) 51 + #define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" \ 52 + ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \ 53 + : : "r" (addr)) 54 + 55 + #define asm_io_fdc(addr) asm volatile("fdc %%r0(%0)" \ 56 + ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \ 57 + ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) \ 58 + : : "r" (addr)) 59 + #define asm_io_sync() asm volatile("sync" \ 60 + ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \ 61 + ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :: ) 48 62 49 63 #endif /* ! __ASSEMBLY__ */ 50 64
+7 -5
arch/parisc/include/asm/page.h
··· 117 117 /* This governs the relationship between virtual and physical addresses. 118 118 * If you alter it, make sure to take care of our various fixed mapping 119 119 * segments in fixmap.h */ 120 - #if defined(BOOTLOADER) 121 - #define __PAGE_OFFSET (0) /* bootloader uses physical addresses */ 122 - #else 123 120 #ifdef CONFIG_64BIT 124 - #define __PAGE_OFFSET (0x40000000) /* 1GB */ 121 + #define __PAGE_OFFSET_DEFAULT (0x40000000) /* 1GB */ 125 122 #else 126 - #define __PAGE_OFFSET (0x10000000) /* 256MB */ 123 + #define __PAGE_OFFSET_DEFAULT (0x10000000) /* 256MB */ 127 124 #endif 125 + 126 + #if defined(BOOTLOADER) 127 + #define __PAGE_OFFSET (0) /* bootloader uses physical addresses */ 128 + #else 129 + #define __PAGE_OFFSET __PAGE_OFFSET_DEFAULT 128 130 #endif /* BOOTLOADER */ 129 131 130 132 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+1
arch/parisc/include/asm/pdc.h
··· 11 11 extern int pdc_type; 12 12 extern unsigned long parisc_cell_num; /* cell number the CPU runs on (PAT) */ 13 13 extern unsigned long parisc_cell_loc; /* cell location of CPU (PAT) */ 14 + extern unsigned long parisc_pat_pdc_cap; /* PDC capabilities (PAT) */ 14 15 15 16 /* Values for pdc_type */ 16 17 #define PDC_TYPE_ILLEGAL -1
+49 -13
arch/parisc/include/asm/pdcpat.h
··· 173 173 /* PDC PAT PD */ 174 174 #define PDC_PAT_PD 74L /* Protection Domain Info */ 175 175 #define PDC_PAT_PD_GET_ADDR_MAP 0L /* Get Address Map */ 176 + #define PDC_PAT_PD_GET_PDC_INTERF_REV 1L /* Get PDC Interface Revisions */ 177 + 178 + #define PDC_PAT_CAPABILITY_BIT_PDC_SERIALIZE (1UL << 0) 179 + #define PDC_PAT_CAPABILITY_BIT_PDC_POLLING (1UL << 1) 180 + #define PDC_PAT_CAPABILITY_BIT_PDC_NBC (1UL << 2) /* non-blocking calls */ 181 + #define PDC_PAT_CAPABILITY_BIT_PDC_UFO (1UL << 3) 182 + #define PDC_PAT_CAPABILITY_BIT_PDC_IODC_32 (1UL << 4) 183 + #define PDC_PAT_CAPABILITY_BIT_PDC_IODC_64 (1UL << 5) 184 + #define PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ (1UL << 6) 185 + #define PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB (1UL << 7) 176 186 177 187 /* PDC_PAT_PD_GET_ADDR_MAP entry types */ 178 188 #define PAT_MEMORY_DESCRIPTOR 1 ··· 196 186 #define PAT_MEMUSE_GI 128 197 187 #define PAT_MEMUSE_GNI 129 198 188 189 + /* PDC PAT REGISTER TOC */ 190 + #define PDC_PAT_REGISTER_TOC 75L 191 + #define PDC_PAT_TOC_REGISTER_VECTOR 0L /* Register TOC Vector */ 192 + #define PDC_PAT_TOC_READ_VECTOR 1L /* Read TOC Vector */ 193 + 194 + /* PDC PAT SYSTEM_INFO */ 195 + #define PDC_PAT_SYSTEM_INFO 76L 196 + /* PDC_PAT_SYSTEM_INFO uses the same options as PDC_SYSTEM_INFO function. */ 199 197 200 198 #ifndef __ASSEMBLY__ 201 199 #include <linux/types.h> ··· 315 297 ** PDC_PAT_CELL_GET_INFO return block 316 298 */ 317 299 typedef struct pdc_pat_cell_info_rtn_block { 318 - unsigned long cpu_info; 319 - unsigned long cell_info; 320 - unsigned long cell_location; 321 - unsigned long reo_location; 322 - unsigned long mem_size; 323 - unsigned long dimm_status; 324 300 unsigned long pdc_rev; 325 - unsigned long fabric_info0; 326 - unsigned long fabric_info1; 327 - unsigned long fabric_info2; 328 - unsigned long fabric_info3; 329 - unsigned long reserved[21]; 301 + unsigned long capabilities; /* see PDC_PAT_CAPABILITY_BIT_* */ 302 + unsigned long reserved0[2]; 303 + unsigned long cell_info; /* 0x20 */ 304 + unsigned long cell_phys_location; 305 + unsigned long cpu_info; 306 + unsigned long cpu_speed; 307 + unsigned long io_chassis_phys_location; 308 + unsigned long cell_io_information; 309 + unsigned long reserved1[2]; 310 + unsigned long io_slot_info_size; /* 0x60 */ 311 + struct { 312 + unsigned long header, info0, info1; 313 + unsigned long phys_loc, hw_path; 314 + } io_slot[16]; 315 + unsigned long cell_mem_size; /* 0x2e8 */ 316 + unsigned long cell_dimm_info_size; 317 + unsigned long dimm_info[16]; 318 + unsigned long fabric_info_size; /* 0x3f8 */ 319 + struct { /* 0x380 */ 320 + unsigned long fabric_info_xbc_port; 321 + unsigned long rc_attached_to_xbc; 322 + } xbc[8*4]; 330 323 } pdc_pat_cell_info_rtn_block_t; 331 324 332 325 ··· 355 326 356 327 extern int pdc_pat_chassis_send_log(unsigned long status, unsigned long data); 357 328 extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info); 358 - extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod, unsigned long view_type, void *mem_addr); 329 + extern int pdc_pat_cell_info(struct pdc_pat_cell_info_rtn_block *info, 330 + unsigned long *actcnt, unsigned long offset, 331 + unsigned long cell_number); 332 + extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, 333 + unsigned long mod, unsigned long view_type, void *mem_addr); 359 334 extern int pdc_pat_cell_num_to_loc(void *, unsigned long); 360 335 361 336 extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, unsigned long hpa); 362 337 363 - extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, unsigned long count, unsigned long offset); 338 + extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, 339 + unsigned long count, unsigned long offset); 340 + extern int pdc_pat_pd_get_pdc_revisions(unsigned long *legacy_rev, 341 + unsigned long *pat_rev, unsigned long *pdc_cap); 364 342 365 343 extern int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *val); 366 344 extern int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val);
+13 -20
arch/parisc/include/asm/pgtable.h
··· 43 43 { 44 44 mtsp(mm->context, 1); 45 45 pdtlb(addr); 46 - if (unlikely(split_tlb)) 47 - pitlb(addr); 46 + pitlb(addr); 48 47 } 49 48 50 49 /* Certain architectures need to do special things when PTEs ··· 55 56 *(pteptr) = (pteval); \ 56 57 } while(0) 57 58 58 - #define pte_inserted(x) \ 59 - ((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED)) \ 60 - == (_PAGE_PRESENT|_PAGE_ACCESSED)) 61 - 62 59 #define set_pte_at(mm, addr, ptep, pteval) \ 63 60 do { \ 64 61 pte_t old_pte; \ 65 62 unsigned long flags; \ 66 63 spin_lock_irqsave(&pa_tlb_lock, flags); \ 67 64 old_pte = *ptep; \ 68 - if (pte_inserted(old_pte)) \ 69 - purge_tlb_entries(mm, addr); \ 70 65 set_pte(ptep, pteval); \ 66 + purge_tlb_entries(mm, addr); \ 71 67 spin_unlock_irqrestore(&pa_tlb_lock, flags); \ 72 68 } while (0) 73 69 ··· 196 202 #define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT)) 197 203 #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) 198 204 199 - #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) 205 + #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) 200 206 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 201 207 #define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED) 202 208 #define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC) ··· 221 227 222 228 #ifndef __ASSEMBLY__ 223 229 224 - #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 225 - #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) 230 + #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER) 231 + #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE) 226 232 /* Others seem to make this executable, I don't know if that's correct 227 233 or not. The stack is mapped this way though so this is necessary 228 234 in the short term - dhd@linuxcare.com, 2000-08-08 */ 229 - #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) 230 - #define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED) 231 - #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) 235 + #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ) 236 + #define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE) 237 + #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC) 232 238 #define PAGE_COPY PAGE_EXECREAD 233 - #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) 239 + #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) 234 240 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) 235 241 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) 236 242 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX) 237 243 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO) 238 244 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) 239 - #define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ) 245 + #define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ) 240 246 241 247 242 248 /* ··· 473 479 spin_unlock_irqrestore(&pa_tlb_lock, flags); 474 480 return 0; 475 481 } 476 - purge_tlb_entries(vma->vm_mm, addr); 477 482 set_pte(ptep, pte_mkold(pte)); 483 + purge_tlb_entries(vma->vm_mm, addr); 478 484 spin_unlock_irqrestore(&pa_tlb_lock, flags); 479 485 return 1; 480 486 } ··· 487 493 488 494 spin_lock_irqsave(&pa_tlb_lock, flags); 489 495 old_pte = *ptep; 490 - if (pte_inserted(old_pte)) 491 - purge_tlb_entries(mm, addr); 492 496 set_pte(ptep, __pte(0)); 497 + purge_tlb_entries(mm, addr); 493 498 spin_unlock_irqrestore(&pa_tlb_lock, flags); 494 499 495 500 return old_pte; ··· 498 505 { 499 506 unsigned long flags; 500 507 spin_lock_irqsave(&pa_tlb_lock, flags); 501 - purge_tlb_entries(mm, addr); 502 508 set_pte(ptep, pte_wrprotect(*ptep)); 509 + purge_tlb_entries(mm, addr); 503 510 spin_unlock_irqrestore(&pa_tlb_lock, flags); 504 511 } 505 512
+2
arch/parisc/include/asm/sections.h
··· 5 5 /* nothing to see, move along */ 6 6 #include <asm-generic/sections.h> 7 7 8 + extern char __alt_instructions[], __alt_instructions_end[]; 9 + 8 10 #ifdef CONFIG_64BIT 9 11 10 12 #define HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR 1
+2 -2
arch/parisc/include/asm/spinlock.h
··· 37 37 volatile unsigned int *a; 38 38 39 39 a = __ldcw_align(x); 40 - mb(); 41 - *a = 1; 40 + /* Release with ordered store. */ 41 + __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory"); 42 42 } 43 43 44 44 static inline int arch_spin_trylock(arch_spinlock_t *x)
+1 -2
arch/parisc/include/asm/tlbflush.h
··· 85 85 purge_tlb_start(flags); 86 86 mtsp(sid, 1); 87 87 pdtlb(addr); 88 - if (unlikely(split_tlb)) 89 - pitlb(addr); 88 + pitlb(addr); 90 89 purge_tlb_end(flags); 91 90 } 92 91 #endif
+37 -26
arch/parisc/kernel/cache.c
··· 36 36 37 37 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); 38 38 EXPORT_SYMBOL(flush_dcache_page_asm); 39 + void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); 39 40 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); 40 41 41 42 ··· 304 303 preempt_enable(); 305 304 } 306 305 306 + static inline void 307 + __purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, 308 + unsigned long physaddr) 309 + { 310 + preempt_disable(); 311 + purge_dcache_page_asm(physaddr, vmaddr); 312 + if (vma->vm_flags & VM_EXEC) 313 + flush_icache_page_asm(physaddr, vmaddr); 314 + preempt_enable(); 315 + } 316 + 307 317 void flush_dcache_page(struct page *page) 308 318 { 309 319 struct address_space *mapping = page_mapping_file(page); ··· 376 364 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ 377 365 static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; 378 366 379 - #define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */ 367 + #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */ 380 368 static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD; 381 369 382 370 void __init parisc_setup_cache_timing(void) ··· 416 404 goto set_tlb_threshold; 417 405 } 418 406 419 - alltime = mfctl(16); 420 - flush_tlb_all(); 421 - alltime = mfctl(16) - alltime; 422 - 423 407 size = 0; 424 408 start = (unsigned long) _text; 425 409 rangetime = mfctl(16); ··· 426 418 } 427 419 rangetime = mfctl(16) - rangetime; 428 420 429 - printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n", 421 + alltime = mfctl(16); 422 + flush_tlb_all(); 423 + alltime = mfctl(16) - alltime; 424 + 425 + printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n", 430 426 alltime, size, rangetime); 431 427 432 - threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime); 428 + threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime); 429 + printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n", 430 + threshold/1024); 433 431 434 432 set_tlb_threshold: 435 - if (threshold) 433 + if (threshold > parisc_tlb_flush_threshold) 436 434 parisc_tlb_flush_threshold = threshold; 437 435 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n", 438 436 parisc_tlb_flush_threshold/1024); ··· 491 477 /* Purge TLB entries for small ranges using the pdtlb and 492 478 pitlb instructions. These instructions execute locally 493 479 but cause a purge request to be broadcast to other TLBs. */ 494 - if (likely(!split_tlb)) { 495 - while (start < end) { 496 - purge_tlb_start(flags); 497 - mtsp(sid, 1); 498 - pdtlb(start); 499 - purge_tlb_end(flags); 500 - start += PAGE_SIZE; 501 - } 502 - return 0; 503 - } 504 - 505 - /* split TLB case */ 506 480 while (start < end) { 507 481 purge_tlb_start(flags); 508 482 mtsp(sid, 1); ··· 575 573 pfn = pte_pfn(*ptep); 576 574 if (!pfn_valid(pfn)) 577 575 continue; 578 - if (unlikely(mm->context)) 576 + if (unlikely(mm->context)) { 579 577 flush_tlb_page(vma, addr); 580 - __flush_cache_page(vma, addr, PFN_PHYS(pfn)); 578 + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); 579 + } else { 580 + __purge_cache_page(vma, addr, PFN_PHYS(pfn)); 581 + } 581 582 } 582 583 } 583 584 } ··· 615 610 continue; 616 611 pfn = pte_pfn(*ptep); 617 612 if (pfn_valid(pfn)) { 618 - if (unlikely(vma->vm_mm->context)) 613 + if (unlikely(vma->vm_mm->context)) { 619 614 flush_tlb_page(vma, addr); 620 - __flush_cache_page(vma, addr, PFN_PHYS(pfn)); 615 + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); 616 + } else { 617 + __purge_cache_page(vma, addr, PFN_PHYS(pfn)); 618 + } 621 619 } 622 620 } 623 621 } ··· 629 621 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) 630 622 { 631 623 if (pfn_valid(pfn)) { 632 - if (likely(vma->vm_mm->context)) 624 + if (likely(vma->vm_mm->context)) { 633 625 flush_tlb_page(vma, vmaddr); 634 - __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 626 + __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 627 + } else { 628 + __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 629 + } 635 630 } 636 631 } 637 632
+17 -17
arch/parisc/kernel/entry.S
··· 38 38 #include <asm/ldcw.h> 39 39 #include <asm/traps.h> 40 40 #include <asm/thread_info.h> 41 + #include <asm/alternative.h> 41 42 42 43 #include <linux/linkage.h> 43 44 ··· 187 186 bv,n 0(%r3) 188 187 nop 189 188 .word 0 /* checksum (will be patched) */ 190 - .word PA(os_hpmc) /* address of handler */ 189 + .word 0 /* address of handler */ 191 190 .word 0 /* length of handler */ 192 191 .endm 193 192 ··· 427 426 ldw,s \index(\pmd),\pmd 428 427 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 429 428 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 430 - copy \pmd,%r9 431 - SHLREG %r9,PxD_VALUE_SHIFT,\pmd 429 + SHLREG \pmd,PxD_VALUE_SHIFT,\pmd 432 430 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 433 431 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 434 432 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ 435 - LDREG %r0(\pmd),\pte 436 - bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 437 433 .endm 438 434 439 435 /* Look up PTE in a 3-Level scheme. ··· 446 448 .macro L3_ptep pgd,pte,index,va,fault 447 449 #if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ 448 450 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 449 - copy %r0,\pte 450 451 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 451 452 ldw,s \index(\pgd),\pgd 452 453 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 ··· 460 463 L2_ptep \pgd,\pte,\index,\va,\fault 461 464 .endm 462 465 463 - /* Acquire pa_tlb_lock lock and recheck page is still present. */ 466 + /* Acquire pa_tlb_lock lock and check page is present. */ 464 467 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault 465 468 #ifdef CONFIG_SMP 466 - cmpib,COND(=),n 0,\spc,2f 469 + 98: cmpib,COND(=),n 0,\spc,2f 467 470 load_pa_tlb_lock \tmp 468 471 1: LDCW 0(\tmp),\tmp1 469 472 cmpib,COND(=) 0,\tmp1,1b 470 473 nop 471 474 LDREG 0(\ptp),\pte 472 - bb,<,n \pte,_PAGE_PRESENT_BIT,2f 475 + bb,<,n \pte,_PAGE_PRESENT_BIT,3f 473 476 b \fault 474 - stw \spc,0(\tmp) 475 - 2: 477 + stw,ma \spc,0(\tmp) 478 + 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 476 479 #endif 480 + 2: LDREG 0(\ptp),\pte 481 + bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 482 + 3: 477 483 .endm 478 484 479 485 /* Release pa_tlb_lock lock without reloading lock address. */ 480 486 .macro tlb_unlock0 spc,tmp 481 487 #ifdef CONFIG_SMP 482 - or,COND(=) %r0,\spc,%r0 483 - sync 484 - or,COND(=) %r0,\spc,%r0 485 - stw \spc,0(\tmp) 488 + 98: or,COND(=) %r0,\spc,%r0 489 + stw,ma \spc,0(\tmp) 490 + 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 486 491 #endif 487 492 .endm 488 493 489 494 /* Release pa_tlb_lock lock. */ 490 495 .macro tlb_unlock1 spc,tmp 491 496 #ifdef CONFIG_SMP 492 - load_pa_tlb_lock \tmp 497 + 98: load_pa_tlb_lock \tmp 498 + 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 493 499 tlb_unlock0 \spc,\tmp 494 500 #endif 495 501 .endm ··· 1658 1658 1659 1659 itlb_fault: 1660 1660 b intr_save 1661 - ldi 6,%r8 1661 + ldi PARISC_ITLB_TRAP,%r8 1662 1662 1663 1663 nadtlb_fault: 1664 1664 b intr_save
+57
arch/parisc/kernel/firmware.c
··· 1326 1326 } 1327 1327 1328 1328 /** 1329 + * pdc_pat_cell_info - Retrieve the cell's information. 1330 + * @info: The pointer to a struct pdc_pat_cell_info_rtn_block. 1331 + * @actcnt: The number of bytes which should be written to info. 1332 + * @offset: offset of the structure. 1333 + * @cell_number: The cell number which should be asked, or -1 for current cell. 1334 + * 1335 + * This PDC call returns information about the given cell (or all cells). 1336 + */ 1337 + int pdc_pat_cell_info(struct pdc_pat_cell_info_rtn_block *info, 1338 + unsigned long *actcnt, unsigned long offset, 1339 + unsigned long cell_number) 1340 + { 1341 + int retval; 1342 + unsigned long flags; 1343 + struct pdc_pat_cell_info_rtn_block result; 1344 + 1345 + spin_lock_irqsave(&pdc_lock, flags); 1346 + retval = mem_pdc_call(PDC_PAT_CELL, PDC_PAT_CELL_GET_INFO, 1347 + __pa(pdc_result), __pa(&result), *actcnt, 1348 + offset, cell_number); 1349 + if (!retval) { 1350 + *actcnt = pdc_result[0]; 1351 + memcpy(info, &result, *actcnt); 1352 + } 1353 + spin_unlock_irqrestore(&pdc_lock, flags); 1354 + 1355 + return retval; 1356 + } 1357 + 1358 + /** 1329 1359 * pdc_pat_cpu_get_number - Retrieve the cpu number. 1330 1360 * @cpu_info: The return buffer. 1331 1361 * @hpa: The Hard Physical Address of the CPU. ··· 1441 1411 1442 1412 return retval; 1443 1413 } 1414 + 1415 + /** 1416 + * pdc_pat_pd_get_PDC_interface_revisions - Retrieve PDC interface revisions. 1417 + * @legacy_rev: The legacy revision. 1418 + * @pat_rev: The PAT revision. 1419 + * @pdc_cap: The PDC capabilities. 1420 + * 1421 + */ 1422 + int pdc_pat_pd_get_pdc_revisions(unsigned long *legacy_rev, 1423 + unsigned long *pat_rev, unsigned long *pdc_cap) 1424 + { 1425 + int retval; 1426 + unsigned long flags; 1427 + 1428 + spin_lock_irqsave(&pdc_lock, flags); 1429 + retval = mem_pdc_call(PDC_PAT_PD, PDC_PAT_PD_GET_PDC_INTERF_REV, 1430 + __pa(pdc_result)); 1431 + if (retval == PDC_OK) { 1432 + *legacy_rev = pdc_result[0]; 1433 + *pat_rev = pdc_result[1]; 1434 + *pdc_cap = pdc_result[2]; 1435 + } 1436 + spin_unlock_irqrestore(&pdc_lock, flags); 1437 + 1438 + return retval; 1439 + } 1440 + 1444 1441 1445 1442 /** 1446 1443 * pdc_pat_io_pci_cfg_read - Read PCI configuration space.
+1 -2
arch/parisc/kernel/hpmc.S
··· 85 85 86 86 .import intr_save, code 87 87 .align 16 88 - ENTRY_CFI(os_hpmc) 88 + ENTRY(os_hpmc) 89 89 .os_hpmc: 90 90 91 91 /* ··· 302 302 b . 303 303 nop 304 304 .align 16 /* make function length multiple of 16 bytes */ 305 - ENDPROC_CFI(os_hpmc) 306 305 .os_hpmc_end: 307 306 308 307
+10
arch/parisc/kernel/inventory.c
··· 43 43 /* cell number and location (PAT firmware only) */ 44 44 unsigned long parisc_cell_num __read_mostly; 45 45 unsigned long parisc_cell_loc __read_mostly; 46 + unsigned long parisc_pat_pdc_cap __read_mostly; 46 47 47 48 48 49 void __init setup_pdc(void) ··· 82 81 #ifdef CONFIG_64BIT 83 82 status = pdc_pat_cell_get_number(&cell_info); 84 83 if (status == PDC_OK) { 84 + unsigned long legacy_rev, pat_rev; 85 85 pdc_type = PDC_TYPE_PAT; 86 86 pr_cont("64 bit PAT.\n"); 87 87 parisc_cell_num = cell_info.cell_num; 88 88 parisc_cell_loc = cell_info.cell_loc; 89 89 pr_info("PAT: Running on cell %lu and location %lu.\n", 90 90 parisc_cell_num, parisc_cell_loc); 91 + status = pdc_pat_pd_get_pdc_revisions(&legacy_rev, 92 + &pat_rev, &parisc_pat_pdc_cap); 93 + pr_info("PAT: legacy revision 0x%lx, pat_rev 0x%lx, pdc_cap 0x%lx, S-PTLB %d, HPMC_RENDEZ %d.\n", 94 + legacy_rev, pat_rev, parisc_pat_pdc_cap, 95 + parisc_pat_pdc_cap 96 + & PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB ? 1:0, 97 + parisc_pat_pdc_cap 98 + & PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ ? 1:0); 91 99 return; 92 100 } 93 101 #endif
+261 -49
arch/parisc/kernel/pacache.S
··· 37 37 #include <asm/pgtable.h> 38 38 #include <asm/cache.h> 39 39 #include <asm/ldcw.h> 40 + #include <asm/alternative.h> 40 41 #include <linux/linkage.h> 41 42 #include <linux/init.h> 42 43 ··· 191 190 .import cache_info,data 192 191 193 192 ENTRY_CFI(flush_instruction_cache_local) 194 - load32 cache_info, %r1 193 + 88: load32 cache_info, %r1 195 194 196 195 /* Flush Instruction Cache */ 197 196 ··· 244 243 fisync: 245 244 sync 246 245 mtsm %r22 /* restore I-bit */ 246 + 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP) 247 247 bv %r0(%r2) 248 248 nop 249 249 ENDPROC_CFI(flush_instruction_cache_local) ··· 252 250 253 251 .import cache_info, data 254 252 ENTRY_CFI(flush_data_cache_local) 255 - load32 cache_info, %r1 253 + 88: load32 cache_info, %r1 256 254 257 255 /* Flush Data Cache */ 258 256 ··· 306 304 syncdma 307 305 sync 308 306 mtsm %r22 /* restore I-bit */ 307 + 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) 309 308 bv %r0(%r2) 310 309 nop 311 310 ENDPROC_CFI(flush_data_cache_local) ··· 315 312 316 313 .macro tlb_lock la,flags,tmp 317 314 #ifdef CONFIG_SMP 315 + 98: 318 316 #if __PA_LDCW_ALIGNMENT > 4 319 317 load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la 320 318 depi 0,31,__PA_LDCW_ALIGN_ORDER, \la ··· 330 326 nop 331 327 b,n 2b 332 328 3: 329 + 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 333 330 #endif 334 331 .endm 335 332 336 333 .macro tlb_unlock la,flags,tmp 337 334 #ifdef CONFIG_SMP 338 - ldi 1,\tmp 335 + 98: ldi 1,\tmp 339 336 sync 340 337 stw \tmp,0(\la) 341 338 mtsm \flags 339 + 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 342 340 #endif 343 341 .endm 344 342 ··· 602 596 pdtlb,l %r0(%r29) 603 597 #else 604 598 tlb_lock %r20,%r21,%r22 605 - pdtlb %r0(%r28) 606 - pdtlb %r0(%r29) 599 + 0: pdtlb %r0(%r28) 600 + 1: pdtlb %r0(%r29) 607 601 tlb_unlock %r20,%r21,%r22 602 + ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB) 603 + ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SMP, INSN_PxTLB) 608 604 #endif 609 605 610 606 #ifdef CONFIG_64BIT ··· 744 736 pdtlb,l %r0(%r28) 745 737 #else 746 738 tlb_lock %r20,%r21,%r22 747 - pdtlb %r0(%r28) 739 + 0: pdtlb %r0(%r28) 748 740 tlb_unlock %r20,%r21,%r22 741 + ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB) 749 742 #endif 750 743 751 744 #ifdef CONFIG_64BIT ··· 822 813 pdtlb,l %r0(%r28) 823 814 #else 824 815 tlb_lock %r20,%r21,%r22 825 - pdtlb %r0(%r28) 816 + 0: pdtlb %r0(%r28) 826 817 tlb_unlock %r20,%r21,%r22 818 + ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB) 827 819 #endif 828 820 829 - ldil L%dcache_stride, %r1 821 + 88: ldil L%dcache_stride, %r1 830 822 ldw R%dcache_stride(%r1), r31 831 823 832 824 #ifdef CONFIG_64BIT ··· 838 828 add %r28, %r25, %r25 839 829 sub %r25, r31, %r25 840 830 831 + 1: fdc,m r31(%r28) 832 + fdc,m r31(%r28) 833 + fdc,m r31(%r28) 834 + fdc,m r31(%r28) 835 + fdc,m r31(%r28) 836 + fdc,m r31(%r28) 837 + fdc,m r31(%r28) 838 + fdc,m r31(%r28) 839 + fdc,m r31(%r28) 840 + fdc,m r31(%r28) 841 + fdc,m r31(%r28) 842 + fdc,m r31(%r28) 843 + fdc,m r31(%r28) 844 + fdc,m r31(%r28) 845 + fdc,m r31(%r28) 846 + cmpb,COND(>>) %r25, %r28, 1b /* predict taken */ 847 + fdc,m r31(%r28) 841 848 842 - 1: fdc,m r31(%r28) 843 - fdc,m r31(%r28) 844 - fdc,m r31(%r28) 845 - fdc,m r31(%r28) 846 - fdc,m r31(%r28) 847 - fdc,m r31(%r28) 848 - fdc,m r31(%r28) 849 - fdc,m r31(%r28) 850 - fdc,m r31(%r28) 851 - fdc,m r31(%r28) 852 - fdc,m r31(%r28) 853 - fdc,m r31(%r28) 854 - fdc,m r31(%r28) 855 - fdc,m r31(%r28) 856 - fdc,m r31(%r28) 857 - cmpb,COND(<<) %r28, %r25,1b 858 - fdc,m r31(%r28) 859 - 849 + 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) 860 850 sync 861 851 bv %r0(%r2) 862 852 nop 863 853 ENDPROC_CFI(flush_dcache_page_asm) 854 + 855 + ENTRY_CFI(purge_dcache_page_asm) 856 + ldil L%(TMPALIAS_MAP_START), %r28 857 + #ifdef CONFIG_64BIT 858 + #if (TMPALIAS_MAP_START >= 0x80000000) 859 + depdi 0, 31,32, %r28 /* clear any sign extension */ 860 + #endif 861 + convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ 862 + depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ 863 + depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ 864 + #else 865 + extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 866 + depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 867 + depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ 868 + #endif 869 + 870 + /* Purge any old translation */ 871 + 872 + #ifdef CONFIG_PA20 873 + pdtlb,l %r0(%r28) 874 + #else 875 + tlb_lock %r20,%r21,%r22 876 + 0: pdtlb %r0(%r28) 877 + tlb_unlock %r20,%r21,%r22 878 + ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB) 879 + #endif 880 + 881 + 88: ldil L%dcache_stride, %r1 882 + ldw R%dcache_stride(%r1), r31 883 + 884 + #ifdef CONFIG_64BIT 885 + depdi,z 1, 63-PAGE_SHIFT,1, %r25 886 + #else 887 + depwi,z 1, 31-PAGE_SHIFT,1, %r25 888 + #endif 889 + add %r28, %r25, %r25 890 + sub %r25, r31, %r25 891 + 892 + 1: pdc,m r31(%r28) 893 + pdc,m r31(%r28) 894 + pdc,m r31(%r28) 895 + pdc,m r31(%r28) 896 + pdc,m r31(%r28) 897 + pdc,m r31(%r28) 898 + pdc,m r31(%r28) 899 + pdc,m r31(%r28) 900 + pdc,m r31(%r28) 901 + pdc,m r31(%r28) 902 + pdc,m r31(%r28) 903 + pdc,m r31(%r28) 904 + pdc,m r31(%r28) 905 + pdc,m r31(%r28) 906 + pdc,m r31(%r28) 907 + cmpb,COND(>>) %r25, %r28, 1b /* predict taken */ 908 + pdc,m r31(%r28) 909 + 910 + 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) 911 + sync 912 + bv %r0(%r2) 913 + nop 914 + ENDPROC_CFI(purge_dcache_page_asm) 864 915 865 916 ENTRY_CFI(flush_icache_page_asm) 866 917 ldil L%(TMPALIAS_MAP_START), %r28 ··· 945 874 946 875 #ifdef CONFIG_PA20 947 876 pdtlb,l %r0(%r28) 948 - pitlb,l %r0(%sr4,%r28) 877 + 1: pitlb,l %r0(%sr4,%r28) 878 + ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SPLIT_TLB, INSN_NOP) 949 879 #else 950 880 tlb_lock %r20,%r21,%r22 951 - pdtlb %r0(%r28) 952 - pitlb %r0(%sr4,%r28) 881 + 0: pdtlb %r0(%r28) 882 + 1: pitlb %r0(%sr4,%r28) 953 883 tlb_unlock %r20,%r21,%r22 884 + ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB) 885 + ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SMP, INSN_PxTLB) 886 + ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SPLIT_TLB, INSN_NOP) 954 887 #endif 955 888 956 - ldil L%icache_stride, %r1 889 + 88: ldil L%icache_stride, %r1 957 890 ldw R%icache_stride(%r1), %r31 958 891 959 892 #ifdef CONFIG_64BIT ··· 967 892 #endif 968 893 add %r28, %r25, %r25 969 894 sub %r25, %r31, %r25 970 - 971 895 972 896 /* fic only has the type 26 form on PA1.1, requiring an 973 897 * explicit space specification, so use %sr4 */ ··· 985 911 fic,m %r31(%sr4,%r28) 986 912 fic,m %r31(%sr4,%r28) 987 913 fic,m %r31(%sr4,%r28) 988 - cmpb,COND(<<) %r28, %r25,1b 914 + cmpb,COND(>>) %r25, %r28, 1b /* predict taken */ 989 915 fic,m %r31(%sr4,%r28) 990 916 917 + 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP) 991 918 sync 992 919 bv %r0(%r2) 993 920 nop 994 921 ENDPROC_CFI(flush_icache_page_asm) 995 922 996 923 ENTRY_CFI(flush_kernel_dcache_page_asm) 997 - ldil L%dcache_stride, %r1 924 + 88: ldil L%dcache_stride, %r1 998 925 ldw R%dcache_stride(%r1), %r23 999 926 1000 927 #ifdef CONFIG_64BIT ··· 1005 930 #endif 1006 931 add %r26, %r25, %r25 1007 932 sub %r25, %r23, %r25 1008 - 1009 933 1010 934 1: fdc,m %r23(%r26) 1011 935 fdc,m %r23(%r26) ··· 1021 947 fdc,m %r23(%r26) 1022 948 fdc,m %r23(%r26) 1023 949 fdc,m %r23(%r26) 1024 - cmpb,COND(<<) %r26, %r25,1b 950 + cmpb,COND(>>) %r25, %r26, 1b /* predict taken */ 1025 951 fdc,m %r23(%r26) 1026 952 953 + 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) 1027 954 sync 1028 955 bv %r0(%r2) 1029 956 nop 1030 957 ENDPROC_CFI(flush_kernel_dcache_page_asm) 1031 958 1032 959 ENTRY_CFI(purge_kernel_dcache_page_asm) 1033 - ldil L%dcache_stride, %r1 960 + 88: ldil L%dcache_stride, %r1 1034 961 ldw R%dcache_stride(%r1), %r23 1035 962 1036 963 #ifdef CONFIG_64BIT ··· 1057 982 pdc,m %r23(%r26) 1058 983 pdc,m %r23(%r26) 1059 984 pdc,m %r23(%r26) 1060 - cmpb,COND(<<) %r26, %r25, 1b 985 + cmpb,COND(>>) %r25, %r26, 1b /* predict taken */ 1061 986 pdc,m %r23(%r26) 1062 987 988 + 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) 1063 989 sync 1064 990 bv %r0(%r2) 1065 991 nop 1066 992 ENDPROC_CFI(purge_kernel_dcache_page_asm) 1067 993 1068 994 ENTRY_CFI(flush_user_dcache_range_asm) 1069 - ldil L%dcache_stride, %r1 995 + 88: ldil L%dcache_stride, %r1 1070 996 ldw R%dcache_stride(%r1), %r23 1071 997 ldo -1(%r23), %r21 1072 998 ANDCM %r26, %r21, %r26 1073 999 1074 - 1: cmpb,COND(<<),n %r26, %r25, 1b 1000 + #ifdef CONFIG_64BIT 1001 + depd,z %r23, 59, 60, %r21 1002 + #else 1003 + depw,z %r23, 27, 28, %r21 1004 + #endif 1005 + add %r26, %r21, %r22 1006 + cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */ 1007 + 1: add %r22, %r21, %r22 1008 + fdc,m %r23(%sr3, %r26) 1009 + fdc,m %r23(%sr3, %r26) 1010 + fdc,m %r23(%sr3, %r26) 1011 + fdc,m %r23(%sr3, %r26) 1012 + fdc,m %r23(%sr3, %r26) 1013 + fdc,m %r23(%sr3, %r26) 1014 + fdc,m %r23(%sr3, %r26) 1015 + fdc,m %r23(%sr3, %r26) 1016 + fdc,m %r23(%sr3, %r26) 1017 + fdc,m %r23(%sr3, %r26) 1018 + fdc,m %r23(%sr3, %r26) 1019 + fdc,m %r23(%sr3, %r26) 1020 + fdc,m %r23(%sr3, %r26) 1021 + fdc,m %r23(%sr3, %r26) 1022 + fdc,m %r23(%sr3, %r26) 1023 + cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */ 1075 1024 fdc,m %r23(%sr3, %r26) 1076 1025 1026 + 2: cmpb,COND(>>),n %r25, %r26, 2b 1027 + fdc,m %r23(%sr3, %r26) 1028 + 1029 + 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) 1077 1030 sync 1078 1031 bv %r0(%r2) 1079 1032 nop 1080 1033 ENDPROC_CFI(flush_user_dcache_range_asm) 1081 1034 1082 1035 ENTRY_CFI(flush_kernel_dcache_range_asm) 1083 - ldil L%dcache_stride, %r1 1036 + 88: ldil L%dcache_stride, %r1 1084 1037 ldw R%dcache_stride(%r1), %r23 1085 1038 ldo -1(%r23), %r21 1086 1039 ANDCM %r26, %r21, %r26 1087 1040 1088 - 1: cmpb,COND(<<),n %r26, %r25,1b 1041 + #ifdef CONFIG_64BIT 1042 + depd,z %r23, 59, 60, %r21 1043 + #else 1044 + depw,z %r23, 27, 28, %r21 1045 + #endif 1046 + add %r26, %r21, %r22 1047 + cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */ 1048 + 1: add %r22, %r21, %r22 1049 + fdc,m %r23(%r26) 1050 + fdc,m %r23(%r26) 1051 + fdc,m %r23(%r26) 1052 + fdc,m %r23(%r26) 1053 + fdc,m %r23(%r26) 1054 + fdc,m %r23(%r26) 1055 + fdc,m %r23(%r26) 1056 + fdc,m %r23(%r26) 1057 + fdc,m %r23(%r26) 1058 + fdc,m %r23(%r26) 1059 + fdc,m %r23(%r26) 1060 + fdc,m %r23(%r26) 1061 + fdc,m %r23(%r26) 1062 + fdc,m %r23(%r26) 1063 + fdc,m %r23(%r26) 1064 + cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */ 1065 + fdc,m %r23(%r26) 1066 + 1067 + 2: cmpb,COND(>>),n %r25, %r26, 2b /* predict taken */ 1089 1068 fdc,m %r23(%r26) 1090 1069 1091 1070 sync 1071 + 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) 1092 1072 syncdma 1093 1073 bv %r0(%r2) 1094 1074 nop 1095 1075 ENDPROC_CFI(flush_kernel_dcache_range_asm) 1096 1076 1097 1077 ENTRY_CFI(purge_kernel_dcache_range_asm) 1098 - ldil L%dcache_stride, %r1 1078 + 88: ldil L%dcache_stride, %r1 1099 1079 ldw R%dcache_stride(%r1), %r23 1100 1080 ldo -1(%r23), %r21 1101 1081 ANDCM %r26, %r21, %r26 1102 1082 1103 - 1: cmpb,COND(<<),n %r26, %r25,1b 1083 + #ifdef CONFIG_64BIT 1084 + depd,z %r23, 59, 60, %r21 1085 + #else 1086 + depw,z %r23, 27, 28, %r21 1087 + #endif 1088 + add %r26, %r21, %r22 1089 + cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */ 1090 + 1: add %r22, %r21, %r22 1091 + pdc,m %r23(%r26) 1092 + pdc,m %r23(%r26) 1093 + pdc,m %r23(%r26) 1094 + pdc,m %r23(%r26) 1095 + pdc,m %r23(%r26) 1096 + pdc,m %r23(%r26) 1097 + pdc,m %r23(%r26) 1098 + pdc,m %r23(%r26) 1099 + pdc,m %r23(%r26) 1100 + pdc,m %r23(%r26) 1101 + pdc,m %r23(%r26) 1102 + pdc,m %r23(%r26) 1103 + pdc,m %r23(%r26) 1104 + pdc,m %r23(%r26) 1105 + pdc,m %r23(%r26) 1106 + cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */ 1107 + pdc,m %r23(%r26) 1108 + 1109 + 2: cmpb,COND(>>),n %r25, %r26, 2b /* predict taken */ 1104 1110 pdc,m %r23(%r26) 1105 1111 1106 1112 sync 1113 + 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) 1107 1114 syncdma 1108 1115 bv %r0(%r2) 1109 1116 nop 1110 1117 ENDPROC_CFI(purge_kernel_dcache_range_asm) 1111 1118 1112 1119 ENTRY_CFI(flush_user_icache_range_asm) 1113 - ldil L%icache_stride, %r1 1120 + 88: ldil L%icache_stride, %r1 1114 1121 ldw R%icache_stride(%r1), %r23 1115 1122 ldo -1(%r23), %r21 1116 1123 ANDCM %r26, %r21, %r26 1117 1124 1118 - 1: cmpb,COND(<<),n %r26, %r25,1b 1125 + #ifdef CONFIG_64BIT 1126 + depd,z %r23, 59, 60, %r21 1127 + #else 1128 + depw,z %r23, 27, 28, %r21 1129 + #endif 1130 + add %r26, %r21, %r22 1131 + cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */ 1132 + 1: add %r22, %r21, %r22 1133 + fic,m %r23(%sr3, %r26) 1134 + fic,m %r23(%sr3, %r26) 1135 + fic,m %r23(%sr3, %r26) 1136 + fic,m %r23(%sr3, %r26) 1137 + fic,m %r23(%sr3, %r26) 1138 + fic,m %r23(%sr3, %r26) 1139 + fic,m %r23(%sr3, %r26) 1140 + fic,m %r23(%sr3, %r26) 1141 + fic,m %r23(%sr3, %r26) 1142 + fic,m %r23(%sr3, %r26) 1143 + fic,m %r23(%sr3, %r26) 1144 + fic,m %r23(%sr3, %r26) 1145 + fic,m %r23(%sr3, %r26) 1146 + fic,m %r23(%sr3, %r26) 1147 + fic,m %r23(%sr3, %r26) 1148 + cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */ 1119 1149 fic,m %r23(%sr3, %r26) 1120 1150 1151 + 2: cmpb,COND(>>),n %r25, %r26, 2b 1152 + fic,m %r23(%sr3, %r26) 1153 + 1154 + 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP) 1121 1155 sync 1122 1156 bv %r0(%r2) 1123 1157 nop 1124 1158 ENDPROC_CFI(flush_user_icache_range_asm) 1125 1159 1126 1160 ENTRY_CFI(flush_kernel_icache_page) 1127 - ldil L%icache_stride, %r1 1161 + 88: ldil L%icache_stride, %r1 1128 1162 ldw R%icache_stride(%r1), %r23 1129 1163 1130 1164 #ifdef CONFIG_64BIT ··· 1260 1076 fic,m %r23(%sr4, %r26) 1261 1077 fic,m %r23(%sr4, %r26) 1262 1078 fic,m %r23(%sr4, %r26) 1263 - cmpb,COND(<<) %r26, %r25, 1b 1079 + cmpb,COND(>>) %r25, %r26, 1b /* predict taken */ 1264 1080 fic,m %r23(%sr4, %r26) 1265 1081 1082 + 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP) 1266 1083 sync 1267 1084 bv %r0(%r2) 1268 1085 nop 1269 1086 ENDPROC_CFI(flush_kernel_icache_page) 1270 1087 1271 1088 ENTRY_CFI(flush_kernel_icache_range_asm) 1272 - ldil L%icache_stride, %r1 1089 + 88: ldil L%icache_stride, %r1 1273 1090 ldw R%icache_stride(%r1), %r23 1274 1091 ldo -1(%r23), %r21 1275 1092 ANDCM %r26, %r21, %r26 1276 1093 1277 - 1: cmpb,COND(<<),n %r26, %r25, 1b 1094 + #ifdef CONFIG_64BIT 1095 + depd,z %r23, 59, 60, %r21 1096 + #else 1097 + depw,z %r23, 27, 28, %r21 1098 + #endif 1099 + add %r26, %r21, %r22 1100 + cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */ 1101 + 1: add %r22, %r21, %r22 1102 + fic,m %r23(%sr4, %r26) 1103 + fic,m %r23(%sr4, %r26) 1104 + fic,m %r23(%sr4, %r26) 1105 + fic,m %r23(%sr4, %r26) 1106 + fic,m %r23(%sr4, %r26) 1107 + fic,m %r23(%sr4, %r26) 1108 + fic,m %r23(%sr4, %r26) 1109 + fic,m %r23(%sr4, %r26) 1110 + fic,m %r23(%sr4, %r26) 1111 + fic,m %r23(%sr4, %r26) 1112 + fic,m %r23(%sr4, %r26) 1113 + fic,m %r23(%sr4, %r26) 1114 + fic,m %r23(%sr4, %r26) 1115 + fic,m %r23(%sr4, %r26) 1116 + fic,m %r23(%sr4, %r26) 1117 + cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */ 1278 1118 fic,m %r23(%sr4, %r26) 1279 1119 1120 + 2: cmpb,COND(>>),n %r25, %r26, 2b /* predict taken */ 1121 + fic,m %r23(%sr4, %r26) 1122 + 1123 + 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP) 1280 1124 sync 1281 1125 bv %r0(%r2) 1282 1126 nop
+81
arch/parisc/kernel/setup.c
··· 305 305 return 0; 306 306 } 307 307 308 + static int no_alternatives __initdata; 309 + static int __init setup_no_alternatives(char *str) 310 + { 311 + no_alternatives = 1; 312 + return 1; 313 + } 314 + __setup("no-alternatives", setup_no_alternatives); 315 + 316 + static void __init apply_alternatives_all(void) 317 + { 318 + struct alt_instr *entry; 319 + int index = 0, applied = 0; 320 + 321 + 322 + pr_info("alternatives: %spatching kernel code\n", 323 + no_alternatives ? "NOT " : ""); 324 + if (no_alternatives) 325 + return; 326 + 327 + set_kernel_text_rw(1); 328 + 329 + for (entry = (struct alt_instr *) &__alt_instructions; 330 + entry < (struct alt_instr *) &__alt_instructions_end; 331 + entry++, index++) { 332 + 333 + u32 *from, len, cond, replacement; 334 + 335 + from = (u32 *)((ulong)&entry->orig_offset + entry->orig_offset); 336 + len = entry->len; 337 + cond = entry->cond; 338 + replacement = entry->replacement; 339 + 340 + WARN_ON(!cond); 341 + pr_debug("Check %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n", 342 + index, cond, len, from, replacement); 343 + 344 + if ((cond & ALT_COND_NO_SMP) && (num_online_cpus() != 1)) 345 + continue; 346 + if ((cond & ALT_COND_NO_DCACHE) && (cache_info.dc_size != 0)) 347 + continue; 348 + if ((cond & ALT_COND_NO_ICACHE) && (cache_info.ic_size != 0)) 349 + continue; 350 + 351 + /* 352 + * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit 353 + * set (bit #61, big endian), we have to flush and sync every 354 + * time IO-PDIR is changed in Ike/Astro. 355 + */ 356 + if ((cond & ALT_COND_NO_IOC_FDC) && 357 + (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)) 358 + continue; 359 + 360 + /* Want to replace pdtlb by a pdtlb,l instruction? */ 361 + if (replacement == INSN_PxTLB) { 362 + replacement = *from; 363 + if (boot_cpu_data.cpu_type >= pcxu) /* >= pa2.0 ? */ 364 + replacement |= (1 << 10); /* set el bit */ 365 + } 366 + 367 + /* 368 + * Replace instruction with NOPs? 369 + * For long distance insert a branch instruction instead. 370 + */ 371 + if (replacement == INSN_NOP && len > 1) 372 + replacement = 0xe8000002 + (len-2)*8; /* "b,n .+8" */ 373 + 374 + pr_debug("Do %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n", 375 + index, cond, len, from, replacement); 376 + 377 + /* Replace instruction */ 378 + *from = replacement; 379 + applied++; 380 + } 381 + 382 + pr_info("alternatives: applied %d out of %d patches\n", applied, index); 383 + 384 + set_kernel_text_rw(0); 385 + } 386 + 387 + 308 388 extern void gsc_init(void); 309 389 extern void processor_init(void); 310 390 extern void ccio_init(void); ··· 426 346 boot_cpu_data.cpu_hz / 1000000, 427 347 boot_cpu_data.cpu_hz % 1000000 ); 428 348 349 + apply_alternatives_all(); 429 350 parisc_setup_cache_timing(); 430 351 431 352 /* These are in a non-obvious order, will fix when we have an iotree */
-1
arch/parisc/kernel/signal.c
··· 65 65 #define INSN_LDI_R25_1 0x34190002 /* ldi 1,%r25 (in_syscall=1) */ 66 66 #define INSN_LDI_R20 0x3414015a /* ldi __NR_rt_sigreturn,%r20 */ 67 67 #define INSN_BLE_SR2_R0 0xe4008200 /* be,l 0x100(%sr2,%r0),%sr0,%r31 */ 68 - #define INSN_NOP 0x08000240 /* nop */ 69 68 /* For debugging */ 70 69 #define INSN_DIE_HORRIBLY 0x68000ccc /* stw %r0,0x666(%sr0,%r0) */ 71 70
+4 -8
arch/parisc/kernel/syscall.S
··· 640 640 sub,<> %r28, %r25, %r0 641 641 2: stw %r24, 0(%r26) 642 642 /* Free lock */ 643 - sync 644 - stw %r20, 0(%sr2,%r20) 643 + stw,ma %r20, 0(%sr2,%r20) 645 644 #if ENABLE_LWS_DEBUG 646 645 /* Clear thread register indicator */ 647 646 stw %r0, 4(%sr2,%r20) ··· 654 655 3: 655 656 /* Error occurred on load or store */ 656 657 /* Free lock */ 657 - sync 658 - stw %r20, 0(%sr2,%r20) 658 + stw,ma %r20, 0(%sr2,%r20) 659 659 #if ENABLE_LWS_DEBUG 660 660 stw %r0, 4(%sr2,%r20) 661 661 #endif ··· 855 857 856 858 cas2_end: 857 859 /* Free lock */ 858 - sync 859 - stw %r20, 0(%sr2,%r20) 860 + stw,ma %r20, 0(%sr2,%r20) 860 861 /* Enable interrupts */ 861 862 ssm PSW_SM_I, %r0 862 863 /* Return to userspace, set no error */ ··· 865 868 22: 866 869 /* Error occurred on load or store */ 867 870 /* Free lock */ 868 - sync 869 - stw %r20, 0(%sr2,%r20) 871 + stw,ma %r20, 0(%sr2,%r20) 870 872 ssm PSW_SM_I, %r0 871 873 ldo 1(%r0),%r28 872 874 b lws_exit
+4 -3
arch/parisc/kernel/traps.c
··· 430 430 } 431 431 432 432 printk("\n"); 433 - pr_crit("%s: Code=%d (%s) regs=%p (Addr=" RFMT ")\n", 434 - msg, code, trap_name(code), regs, offset); 433 + pr_crit("%s: Code=%d (%s) at addr " RFMT "\n", 434 + msg, code, trap_name(code), offset); 435 435 show_regs(regs); 436 436 437 437 spin_unlock(&terminate_lock); ··· 802 802 * the Length/4 words starting at Address is zero. 803 803 */ 804 804 805 - /* Compute Checksum for HPMC handler */ 805 + /* Setup IVA and compute checksum for HPMC handler */ 806 + ivap[6] = (u32)__pa(os_hpmc); 806 807 length = os_hpmc_size; 807 808 ivap[7] = length; 808 809
+6
arch/parisc/kernel/vmlinux.lds.S
··· 61 61 EXIT_DATA 62 62 } 63 63 PERCPU_SECTION(8) 64 + . = ALIGN(4); 65 + .altinstructions : { 66 + __alt_instructions = .; 67 + *(.altinstructions) 68 + __alt_instructions_end = .; 69 + } 64 70 . = ALIGN(HUGEPAGE_SIZE); 65 71 __init_end = .; 66 72 /* freed after init ends here */
+17 -6
arch/parisc/mm/init.c
··· 494 494 pte = pte_mkhuge(pte); 495 495 } 496 496 497 - if (address >= end_paddr) { 498 - if (force) 499 - break; 500 - else 501 - pte_val(pte) = 0; 502 - } 497 + if (address >= end_paddr) 498 + break; 503 499 504 500 set_pte(pg_table, pte); 505 501 ··· 509 513 } 510 514 start_pmd = 0; 511 515 } 516 + } 517 + 518 + void __init set_kernel_text_rw(int enable_read_write) 519 + { 520 + unsigned long start = (unsigned long)_stext; 521 + unsigned long end = (unsigned long)_etext; 522 + 523 + map_pages(start, __pa(start), end-start, 524 + PAGE_KERNEL_RWX, enable_read_write ? 1:0); 525 + 526 + /* force the kernel to see the new TLB entries */ 527 + __flush_tlb_range(0, start, end); 528 + 529 + /* dump old cached instructions */ 530 + flush_icache_range(start, end); 512 531 } 513 532 514 533 void __ref free_initmem(void)
-3
drivers/parisc/Makefile
··· 8 8 obj-$(CONFIG_IOSAPIC) += iosapic.o 9 9 obj-$(CONFIG_IOMMU_SBA) += sba_iommu.o 10 10 obj-$(CONFIG_PCI_LBA) += lba_pci.o 11 - 12 - # Only use one of them: ccio-rm-dma is for PCX-W systems *only* 13 - # obj-$(CONFIG_IOMMU_CCIO) += ccio-rm-dma.o 14 11 obj-$(CONFIG_IOMMU_CCIO) += ccio-dma.o 15 12 16 13 obj-$(CONFIG_GSC) += gsc.o
+4 -8
drivers/parisc/ccio-dma.c
··· 609 609 ** PCX-T'? Don't know. (eg C110 or similar K-class) 610 610 ** 611 611 ** See PDC_MODEL/option 0/SW_CAP word for "Non-coherent IO-PDIR bit". 612 - ** Hopefully we can patch (NOP) these out at boot time somehow. 613 612 ** 614 613 ** "Since PCX-U employs an offset hash that is incompatible with 615 614 ** the real mode coherence index generation of U2, the PDIR entry 616 615 ** must be flushed to memory to retain coherence." 617 616 */ 618 - asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 619 - asm volatile("sync"); 617 + asm_io_fdc(pdir_ptr); 618 + asm_io_sync(); 620 619 } 621 620 622 621 /** ··· 681 682 ** FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360) 682 683 ** PCX-U/U+ do. (eg C200/C240) 683 684 ** See PDC_MODEL/option 0/SW_CAP for "Non-coherent IO-PDIR bit". 684 - ** 685 - ** Hopefully someone figures out how to patch (NOP) the 686 - ** FDC/SYNC out at boot time. 687 685 */ 688 - asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr[7])); 686 + asm_io_fdc(pdir_ptr); 689 687 690 688 iovp += IOVP_SIZE; 691 689 byte_cnt -= IOVP_SIZE; 692 690 } 693 691 694 - asm volatile("sync"); 692 + asm_io_sync(); 695 693 ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt); 696 694 } 697 695
-202
drivers/parisc/ccio-rm-dma.c
··· 1 - /* 2 - * ccio-rm-dma.c: 3 - * DMA management routines for first generation cache-coherent machines. 4 - * "Real Mode" operation refers to U2/Uturn chip operation. The chip 5 - * can perform coherency checks w/o using the I/O MMU. That's all we 6 - * need until support for more than 4GB phys mem is needed. 7 - * 8 - * This is the trivial case - basically what x86 does. 9 - * 10 - * Drawbacks of using Real Mode are: 11 - * o outbound DMA is slower since one isn't using the prefetching 12 - * U2 can do for outbound DMA. 13 - * o Ability to do scatter/gather in HW is also lost. 14 - * o only known to work with PCX-W processor. (eg C360) 15 - * (PCX-U/U+ are not coherent with U2 in real mode.) 16 - * 17 - * 18 - * This program is free software; you can redistribute it and/or modify 19 - * it under the terms of the GNU General Public License as published by 20 - * the Free Software Foundation; either version 2 of the License, or 21 - * (at your option) any later version. 22 - * 23 - * 24 - * Original version/author: 25 - * CVSROOT=:pserver:anonymous@198.186.203.37:/cvsroot/linux-parisc 26 - * cvs -z3 co linux/arch/parisc/kernel/dma-rm.c 27 - * 28 - * (C) Copyright 2000 Philipp Rumpf <prumpf@tux.org> 29 - * 30 - * 31 - * Adopted for The Puffin Group's parisc-linux port by Grant Grundler. 32 - * (C) Copyright 2000 Grant Grundler <grundler@puffin.external.hp.com> 33 - * 34 - */ 35 - 36 - #include <linux/types.h> 37 - #include <linux/init.h> 38 - #include <linux/mm.h> 39 - #include <linux/string.h> 40 - #include <linux/pci.h> 41 - #include <linux/gfp.h> 42 - 43 - #include <linux/uaccess.h> 44 - 45 - #include <asm/io.h> 46 - #include <asm/hardware.h> 47 - #include <asm/page.h> 48 - 49 - /* Only chose "ccio" since that's what HP-UX calls it.... 50 - ** Make it easier for folks to migrate from one to the other :^) 51 - */ 52 - #define MODULE_NAME "ccio" 53 - 54 - #define U2_IOA_RUNWAY 0x580 55 - #define U2_BC_GSC 0x501 56 - #define UTURN_IOA_RUNWAY 0x581 57 - #define UTURN_BC_GSC 0x502 58 - 59 - #define IS_U2(id) ( \ 60 - (((id)->hw_type == HPHW_IOA) && ((id)->hversion == U2_IOA_RUNWAY)) || \ 61 - (((id)->hw_type == HPHW_BCPORT) && ((id)->hversion == U2_BC_GSC)) \ 62 - ) 63 - 64 - #define IS_UTURN(id) ( \ 65 - (((id)->hw_type == HPHW_IOA) && ((id)->hversion == UTURN_IOA_RUNWAY)) || \ 66 - (((id)->hw_type == HPHW_BCPORT) && ((id)->hversion == UTURN_BC_GSC)) \ 67 - ) 68 - 69 - static int ccio_dma_supported( struct pci_dev *dev, u64 mask) 70 - { 71 - if (dev == NULL) { 72 - printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n"); 73 - BUG(); 74 - return(0); 75 - } 76 - 77 - /* only support 32-bit devices (ie PCI/GSC) */ 78 - return((int) (mask >= 0xffffffffUL)); 79 - } 80 - 81 - 82 - static void *ccio_alloc_consistent(struct pci_dev *dev, size_t size, 83 - dma_addr_t *handle) 84 - { 85 - void *ret; 86 - 87 - ret = (void *)__get_free_pages(GFP_ATOMIC, get_order(size)); 88 - 89 - if (ret != NULL) { 90 - memset(ret, 0, size); 91 - *handle = virt_to_phys(ret); 92 - } 93 - return ret; 94 - } 95 - 96 - static void ccio_free_consistent(struct pci_dev *dev, size_t size, 97 - void *vaddr, dma_addr_t handle) 98 - { 99 - free_pages((unsigned long)vaddr, get_order(size)); 100 - } 101 - 102 - static dma_addr_t ccio_map_single(struct pci_dev *dev, void *ptr, size_t size, 103 - int direction) 104 - { 105 - return virt_to_phys(ptr); 106 - } 107 - 108 - static void ccio_unmap_single(struct pci_dev *dev, dma_addr_t dma_addr, 109 - size_t size, int direction) 110 - { 111 - /* Nothing to do */ 112 - } 113 - 114 - 115 - static int ccio_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction) 116 - { 117 - int tmp = nents; 118 - 119 - /* KISS: map each buffer separately. */ 120 - while (nents) { 121 - sg_dma_address(sglist) = ccio_map_single(dev, sglist->address, sglist->length, direction); 122 - sg_dma_len(sglist) = sglist->length; 123 - nents--; 124 - sglist++; 125 - } 126 - 127 - return tmp; 128 - } 129 - 130 - 131 - static void ccio_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction) 132 - { 133 - #if 0 134 - while (nents) { 135 - ccio_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction); 136 - nents--; 137 - sglist++; 138 - } 139 - return; 140 - #else 141 - /* Do nothing (copied from current ccio_unmap_single() :^) */ 142 - #endif 143 - } 144 - 145 - 146 - static struct pci_dma_ops ccio_ops = { 147 - ccio_dma_supported, 148 - ccio_alloc_consistent, 149 - ccio_free_consistent, 150 - ccio_map_single, 151 - ccio_unmap_single, 152 - ccio_map_sg, 153 - ccio_unmap_sg, 154 - NULL, /* dma_sync_single_for_cpu : NOP for U2 */ 155 - NULL, /* dma_sync_single_for_device : NOP for U2 */ 156 - NULL, /* dma_sync_sg_for_cpu : ditto */ 157 - NULL, /* dma_sync_sg_for_device : ditto */ 158 - }; 159 - 160 - 161 - /* 162 - ** Determine if u2 should claim this chip (return 0) or not (return 1). 163 - ** If so, initialize the chip and tell other partners in crime they 164 - ** have work to do. 165 - */ 166 - static int __init 167 - ccio_probe(struct parisc_device *dev) 168 - { 169 - printk(KERN_INFO "%s found %s at 0x%lx\n", MODULE_NAME, 170 - dev->id.hversion == U2_BC_GSC ? "U2" : "UTurn", 171 - dev->hpa.start); 172 - 173 - /* 174 - ** FIXME - should check U2 registers to verify it's really running 175 - ** in "Real Mode". 176 - */ 177 - 178 - #if 0 179 - /* will need this for "Virtual Mode" operation */ 180 - ccio_hw_init(ccio_dev); 181 - ccio_common_init(ccio_dev); 182 - #endif 183 - hppa_dma_ops = &ccio_ops; 184 - return 0; 185 - } 186 - 187 - static const struct parisc_device_id ccio_tbl[] __initconst = { 188 - { HPHW_BCPORT, HVERSION_REV_ANY_ID, U2_BC_GSC, 0xc }, 189 - { HPHW_BCPORT, HVERSION_REV_ANY_ID, UTURN_BC_GSC, 0xc }, 190 - { 0, } 191 - }; 192 - 193 - static struct parisc_driver ccio_driver __refdata = { 194 - .name = "U2/Uturn", 195 - .id_table = ccio_tbl, 196 - .probe = ccio_probe, 197 - }; 198 - 199 - void __init ccio_init(void) 200 - { 201 - register_parisc_driver(&ccio_driver); 202 - }
+2 -3
drivers/parisc/dino.c
··· 382 382 DBG(KERN_DEBUG "%s(%d, %p) mask 0x%x\n", 383 383 __func__, irq, intr_dev, mask); 384 384 generic_handle_irq(irq); 385 - mask &= ~(1 << local_irq); 385 + mask &= ~DINO_MASK_IRQ(local_irq); 386 386 } while (mask); 387 387 388 388 /* Support for level triggered IRQ lines. ··· 396 396 if (mask) { 397 397 if (--ilr_loop > 0) 398 398 goto ilr_again; 399 - printk(KERN_ERR "Dino 0x%px: stuck interrupt %d\n", 399 + pr_warn_ratelimited("Dino 0x%px: stuck interrupt %d\n", 400 400 dino_dev->hba.base_addr, mask); 401 - return IRQ_NONE; 402 401 } 403 402 return IRQ_HANDLED; 404 403 }
+6 -11
drivers/parisc/sba_iommu.c
··· 587 587 * (bit #61, big endian), we have to flush and sync every time 588 588 * IO-PDIR is changed in Ike/Astro. 589 589 */ 590 - if (ioc_needs_fdc) 591 - asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 590 + asm_io_fdc(pdir_ptr); 592 591 } 593 592 594 593 ··· 640 641 do { 641 642 /* clear I/O Pdir entry "valid" bit first */ 642 643 ((u8 *) pdir_ptr)[7] = 0; 644 + asm_io_fdc(pdir_ptr); 643 645 if (ioc_needs_fdc) { 644 - asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 645 646 #if 0 646 647 entries_per_cacheline = L1_CACHE_SHIFT - 3; 647 648 #endif ··· 660 661 ** could dump core on HPMC. 661 662 */ 662 663 ((u8 *) pdir_ptr)[7] = 0; 663 - if (ioc_needs_fdc) 664 - asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 664 + asm_io_fdc(pdir_ptr); 665 665 666 666 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM); 667 667 } ··· 771 773 } 772 774 773 775 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */ 774 - if (ioc_needs_fdc) 775 - asm volatile("sync" : : ); 776 + asm_io_sync(); 776 777 777 778 #ifdef ASSERT_PDIR_SANITY 778 779 sba_check_pdir(ioc,"Check after sba_map_single()"); ··· 855 858 sba_free_range(ioc, iova, size); 856 859 857 860 /* If fdc's were issued, force fdc's to be visible now */ 858 - if (ioc_needs_fdc) 859 - asm volatile("sync" : : ); 861 + asm_io_sync(); 860 862 861 863 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 862 864 #endif /* DELAYED_RESOURCE_CNT == 0 */ ··· 1004 1008 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry); 1005 1009 1006 1010 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */ 1007 - if (ioc_needs_fdc) 1008 - asm volatile("sync" : : ); 1011 + asm_io_sync(); 1009 1012 1010 1013 #ifdef ASSERT_PDIR_SANITY 1011 1014 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
+3 -3
scripts/extract-vmlinux
··· 48 48 tmp=$(mktemp /tmp/vmlinux-XXX) 49 49 trap "rm -f $tmp" 0 50 50 51 - # Initial attempt for uncompressed images or objects: 52 - check_vmlinux $img 53 - 54 51 # That didn't work, so retry after decompression. 55 52 try_decompress '\037\213\010' xy gunzip 56 53 try_decompress '\3757zXZ\000' abcde unxz ··· 56 59 try_decompress '\211\114\132' xy 'lzop -d' 57 60 try_decompress '\002!L\030' xxx 'lz4 -d' 58 61 try_decompress '(\265/\375' xxx unzstd 62 + 63 + # Finally check for uncompressed images or objects: 64 + check_vmlinux $img 59 65 60 66 # Bail out: 61 67 echo "$me: Cannot find vmlinux." >&2