Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'parisc-4.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:
"Main changes are:

- Added support to the parisc dma functions to return DMA_ERROR_CODE
if DMA isn't possible. This fixes a long standing kernel crash if
parport_pc is enabled (by Thomas Bogendoerfer, marked for stable
series).

- Use the compat_sys_keyctl() in compat mode (by Eric Biggers, marked
for stable series).

- Initial support for the Page Deallocation Table (PDT) which is
maintained by firmware and holds the list of memory addresses which
had physical errors. By checking that list we can prevent Linux to
use those broken memory areas.

- Ensure IRQs are off in switch_mm().

- Report SIGSEGV instead of SIGBUS when running out of stack.

- Mark the cr16 clocksource stable on single-socket and single-core
machines"

* 'parisc-4.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
parisc: DMA API: return error instead of BUG_ON for dma ops on non dma devs
parisc: Report SIGSEGV instead of SIGBUS when running out of stack
parisc: use compat_sys_keyctl()
parisc: Don't hardcode PSW values in hpmc code
parisc: Don't hardcode PSW values in gsc_*() functions
parisc: Avoid zeroing gr[0] in fixup_exception()
parisc/mm: Ensure IRQs are off in switch_mm()
parisc: Add Page Deallocation Table (PDT) support
parisc: Enhance detection of synchronous cr16 clocksources
parisc: Drop per_cpu uaccess related exception_data struct
parisc: Inline trivial exception code in lusercopy.S

+420 -83
+7 -4
arch/parisc/include/asm/dma-mapping.h
··· 20 20 ** flush/purge and allocate "regular" cacheable pages for everything. 21 21 */ 22 22 23 + #define DMA_ERROR_CODE (~(dma_addr_t)0) 24 + 23 25 #ifdef CONFIG_PA11 24 26 extern const struct dma_map_ops pcxl_dma_ops; 25 27 extern const struct dma_map_ops pcx_dma_ops; ··· 56 54 break; 57 55 } 58 56 } 59 - BUG_ON(!dev->platform_data); 60 57 return dev->platform_data; 61 58 } 62 - 63 - #define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu) 64 - 59 + 60 + #define GET_IOC(dev) ({ \ 61 + void *__pdata = parisc_walk_tree(dev); \ 62 + __pdata ? HBA_DATA(__pdata)->iommu : NULL; \ 63 + }) 65 64 66 65 #ifdef CONFIG_IOMMU_CCIO 67 66 struct parisc_device;
+8 -8
arch/parisc/include/asm/io.h
··· 34 34 unsigned char ret; 35 35 36 36 __asm__ __volatile__( 37 - " rsm 2,%0\n" 37 + " rsm %3,%0\n" 38 38 " ldbx 0(%2),%1\n" 39 39 " mtsm %0\n" 40 - : "=&r" (flags), "=r" (ret) : "r" (addr) ); 40 + : "=&r" (flags), "=r" (ret) : "r" (addr), "i" (PSW_SM_D) ); 41 41 42 42 return ret; 43 43 } ··· 48 48 unsigned short ret; 49 49 50 50 __asm__ __volatile__( 51 - " rsm 2,%0\n" 51 + " rsm %3,%0\n" 52 52 " ldhx 0(%2),%1\n" 53 53 " mtsm %0\n" 54 - : "=&r" (flags), "=r" (ret) : "r" (addr) ); 54 + : "=&r" (flags), "=r" (ret) : "r" (addr), "i" (PSW_SM_D) ); 55 55 56 56 return ret; 57 57 } ··· 87 87 { 88 88 long flags; 89 89 __asm__ __volatile__( 90 - " rsm 2,%0\n" 90 + " rsm %3,%0\n" 91 91 " stbs %1,0(%2)\n" 92 92 " mtsm %0\n" 93 - : "=&r" (flags) : "r" (val), "r" (addr) ); 93 + : "=&r" (flags) : "r" (val), "r" (addr), "i" (PSW_SM_D) ); 94 94 } 95 95 96 96 static inline void gsc_writew(unsigned short val, unsigned long addr) 97 97 { 98 98 long flags; 99 99 __asm__ __volatile__( 100 - " rsm 2,%0\n" 100 + " rsm %3,%0\n" 101 101 " sths %1,0(%2)\n" 102 102 " mtsm %0\n" 103 - : "=&r" (flags) : "r" (val), "r" (addr) ); 103 + : "=&r" (flags) : "r" (val), "r" (addr), "i" (PSW_SM_D) ); 104 104 } 105 105 106 106 static inline void gsc_writel(unsigned int val, unsigned long addr)
+13 -2
arch/parisc/include/asm/mmu_context.h
··· 49 49 mtctl(__space_to_prot(context), 8); 50 50 } 51 51 52 - static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) 52 + static inline void switch_mm_irqs_off(struct mm_struct *prev, 53 + struct mm_struct *next, struct task_struct *tsk) 53 54 { 54 - 55 55 if (prev != next) { 56 56 mtctl(__pa(next->pgd), 25); 57 57 load_context(next->context); 58 58 } 59 59 } 60 + 61 + static inline void switch_mm(struct mm_struct *prev, 62 + struct mm_struct *next, struct task_struct *tsk) 63 + { 64 + unsigned long flags; 65 + 66 + local_irq_save(flags); 67 + switch_mm_irqs_off(prev, next, tsk); 68 + local_irq_restore(flags); 69 + } 70 + #define switch_mm_irqs_off switch_mm_irqs_off 60 71 61 72 #define deactivate_mm(tsk,mm) do { } while (0) 62 73
+18
arch/parisc/include/asm/pdc.h
··· 6 6 #if !defined(__ASSEMBLY__) 7 7 8 8 extern int pdc_type; 9 + extern unsigned long parisc_cell_num; /* cell number the CPU runs on (PAT) */ 10 + extern unsigned long parisc_cell_loc; /* cell location of CPU (PAT) */ 9 11 10 12 /* Values for pdc_type */ 11 13 #define PDC_TYPE_ILLEGAL -1 ··· 144 142 }; 145 143 146 144 #endif /* !CONFIG_PA20 */ 145 + 146 + struct pdc_mem_retinfo { /* PDC_MEM/PDC_MEM_MEMINFO (return info) */ 147 + unsigned long pdt_size; 148 + unsigned long pdt_entries; 149 + unsigned long pdt_status; 150 + unsigned long first_dbe_loc; 151 + unsigned long good_mem; 152 + }; 153 + 154 + struct pdc_mem_read_pdt { /* PDC_MEM/PDC_MEM_READ_PDT (return info) */ 155 + unsigned long pdt_entries; 156 + }; 147 157 148 158 #ifdef CONFIG_64BIT 149 159 struct pdc_memory_table_raddr { /* PDC_MEM/PDC_MEM_TABLE (return info) */ ··· 315 301 int pdc_tod_read(struct pdc_tod *tod); 316 302 int pdc_tod_set(unsigned long sec, unsigned long usec); 317 303 304 + void pdc_pdt_init(void); /* in pdt.c */ 305 + int pdc_mem_pdt_info(struct pdc_mem_retinfo *rinfo); 306 + int pdc_mem_pdt_read_entries(struct pdc_mem_read_pdt *rpdt_read, 307 + unsigned long *pdt_entries_ptr); 318 308 #ifdef CONFIG_64BIT 319 309 int pdc_mem_mem_table(struct pdc_memory_table_raddr *r_addr, 320 310 struct pdc_memory_table *tbl, unsigned long entries);
+26 -9
arch/parisc/include/asm/pdcpat.h
··· 147 147 #define PDC_PAT_MEM_CELL_CLEAR 6L /* Clear PDT For Cell */ 148 148 #define PDC_PAT_MEM_CELL_READ 7L /* Read PDT entries For Cell */ 149 149 #define PDC_PAT_MEM_CELL_RESET 8L /* Reset clear bit For Cell */ 150 - #define PDC_PAT_MEM_SETGM 9L /* Set Golden Memory value */ 151 - #define PDC_PAT_MEM_ADD_PAGE 10L /* ADDs a page to the cell */ 152 - #define PDC_PAT_MEM_ADDRESS 11L /* Get Physical Location From */ 150 + #define PDC_PAT_MEM_SETGM 9L /* Set Good Memory value */ 151 + #define PDC_PAT_MEM_ADD_PAGE 10L /* ADDs a page to the cell */ 152 + #define PDC_PAT_MEM_ADDRESS 11L /* Get Physical Location From */ 153 153 /* Memory Address */ 154 154 #define PDC_PAT_MEM_GET_TXT_SIZE 12L /* Get Formatted Text Size */ 155 155 #define PDC_PAT_MEM_GET_PD_TXT 13L /* Get PD Formatted Text */ ··· 211 211 unsigned long cpu_num; 212 212 unsigned long cpu_loc; 213 213 }; 214 + 215 + struct pdc_pat_mem_retinfo { /* PDC_PAT_MEM/PDC_PAT_MEM_PD_INFO (return info) */ 216 + unsigned int ke; /* bit 0: memory inside good memory? */ 217 + unsigned int current_pdt_entries:16; 218 + unsigned int max_pdt_entries:16; 219 + unsigned long Cs_bitmap; 220 + unsigned long Ic_bitmap; 221 + unsigned long good_mem; 222 + unsigned long first_dbe_loc; /* first location of double bit error */ 223 + unsigned long clear_time; /* last PDT clear time (since Jan 1970) */ 224 + }; 225 + 226 + struct pdc_pat_mem_read_pd_retinfo { /* PDC_PAT_MEM/PDC_PAT_MEM_PD_READ */ 227 + unsigned long actual_count_bytes; 228 + unsigned long pdt_entries; 229 + }; 230 + 214 231 215 232 struct pdc_pat_pd_addr_map_entry { 216 233 unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */ ··· 310 293 311 294 extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, unsigned long count, unsigned long offset); 312 295 313 - 314 296 extern int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *val); 315 297 extern int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val); 316 298 317 - 318 - /* Flag to indicate this is a PAT box...don't use this unless you 319 - ** really have to...it might go away some day. 320 - */ 321 - extern int pdc_pat; /* arch/parisc/kernel/inventory.c */ 299 + extern int pdc_pat_mem_pdt_info(struct pdc_pat_mem_retinfo *rinfo); 300 + extern int pdc_pat_mem_read_cell_pdt(struct pdc_pat_mem_read_pd_retinfo *pret, 301 + unsigned long *pdt_entries_ptr, unsigned long max_entries); 302 + extern int pdc_pat_mem_read_pd_pdt(struct pdc_pat_mem_read_pd_retinfo *pret, 303 + unsigned long *pdt_entries_ptr, unsigned long count, 304 + unsigned long offset); 322 305 323 306 #endif /* __ASSEMBLY__ */ 324 307
+3
arch/parisc/include/asm/pgtable.h
··· 511 511 512 512 #define pte_same(A,B) (pte_val(A) == pte_val(B)) 513 513 514 + struct seq_file; 515 + extern void arch_report_meminfo(struct seq_file *m); 516 + 514 517 #endif /* !__ASSEMBLY__ */ 515 518 516 519
+2
arch/parisc/include/asm/processor.h
··· 103 103 unsigned long bh_count; /* number of times bh was invoked */ 104 104 unsigned long fp_rev; 105 105 unsigned long fp_model; 106 + unsigned long cpu_num; /* CPU number from PAT firmware */ 107 + unsigned long cpu_loc; /* CPU location from PAT firmware */ 106 108 unsigned int state; 107 109 struct parisc_device *dev; 108 110 unsigned long loops_per_jiffy;
-11
arch/parisc/include/asm/uaccess.h
··· 69 69 ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1) 70 70 71 71 /* 72 - * The page fault handler stores, in a per-cpu area, the following information 73 - * if a fixup routine is available. 74 - */ 75 - struct exception_data { 76 - unsigned long fault_ip; 77 - unsigned long fault_gp; 78 - unsigned long fault_space; 79 - unsigned long fault_addr; 80 - }; 81 - 82 - /* 83 72 * load_sr2() preloads the space register %%sr2 - based on the value of 84 73 * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which 85 74 * is 0), or with the current value of %%sr3 to access user space (USER_DS)
+6 -6
arch/parisc/include/uapi/asm/pdc.h
··· 131 131 #define PDC_TLB_SETUP 1 /* set up miss handling */ 132 132 133 133 #define PDC_MEM 20 /* Manage memory */ 134 - #define PDC_MEM_MEMINFO 0 135 - #define PDC_MEM_ADD_PAGE 1 136 - #define PDC_MEM_CLEAR_PDT 2 137 - #define PDC_MEM_READ_PDT 3 138 - #define PDC_MEM_RESET_CLEAR 4 139 - #define PDC_MEM_GOODMEM 5 134 + #define PDC_MEM_MEMINFO 0 /* Return PDT info */ 135 + #define PDC_MEM_ADD_PAGE 1 /* Add page to PDT */ 136 + #define PDC_MEM_CLEAR_PDT 2 /* Clear PDT */ 137 + #define PDC_MEM_READ_PDT 3 /* Read PDT entry */ 138 + #define PDC_MEM_RESET_CLEAR 4 /* Reset PDT clear flag */ 139 + #define PDC_MEM_GOODMEM 5 /* Set good_mem value */ 140 140 #define PDC_MEM_TABLE 128 /* Non contig mem map (sprockets) */ 141 141 #define PDC_MEM_RETURN_ADDRESS_TABLE PDC_MEM_TABLE 142 142 #define PDC_MEM_GET_MEMORY_SYSTEM_TABLES_SIZE 131
+1 -1
arch/parisc/kernel/Makefile
··· 4 4 5 5 extra-y := head.o vmlinux.lds 6 6 7 - obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \ 7 + obj-y := cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \ 8 8 pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \ 9 9 ptrace.o hardware.o inventory.o drivers.o \ 10 10 signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
-5
arch/parisc/kernel/asm-offsets.c
··· 298 298 DEFINE(HUGEPAGE_SIZE, PAGE_SIZE); 299 299 #endif 300 300 BLANK(); 301 - DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); 302 - DEFINE(EXCDATA_GP, offsetof(struct exception_data, fault_gp)); 303 - DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); 304 - DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr)); 305 - BLANK(); 306 301 DEFINE(ASM_PDC_RESULT_SIZE, NUM_PDC_RESULT * sizeof(unsigned long)); 307 302 BLANK(); 308 303 return 0;
+108
arch/parisc/kernel/firmware.c
··· 957 957 } 958 958 EXPORT_SYMBOL(pdc_tod_read); 959 959 960 + int pdc_mem_pdt_info(struct pdc_mem_retinfo *rinfo) 961 + { 962 + int retval; 963 + unsigned long flags; 964 + 965 + spin_lock_irqsave(&pdc_lock, flags); 966 + retval = mem_pdc_call(PDC_MEM, PDC_MEM_MEMINFO, __pa(pdc_result), 0); 967 + convert_to_wide(pdc_result); 968 + memcpy(rinfo, pdc_result, sizeof(*rinfo)); 969 + spin_unlock_irqrestore(&pdc_lock, flags); 970 + 971 + return retval; 972 + } 973 + 974 + int pdc_mem_pdt_read_entries(struct pdc_mem_read_pdt *pret, 975 + unsigned long *pdt_entries_ptr) 976 + { 977 + int retval; 978 + unsigned long flags; 979 + 980 + spin_lock_irqsave(&pdc_lock, flags); 981 + retval = mem_pdc_call(PDC_MEM, PDC_MEM_READ_PDT, __pa(pdc_result), 982 + __pa(pdc_result2)); 983 + if (retval == PDC_OK) { 984 + convert_to_wide(pdc_result); 985 + memcpy(pret, pdc_result, sizeof(*pret)); 986 + convert_to_wide(pdc_result2); 987 + memcpy(pdt_entries_ptr, pdc_result2, 988 + pret->pdt_entries * sizeof(*pdt_entries_ptr)); 989 + } 990 + spin_unlock_irqrestore(&pdc_lock, flags); 991 + 992 + return retval; 993 + } 994 + 960 995 /** 961 996 * pdc_tod_set - Set the Time-Of-Day clock. 962 997 * @sec: The number of seconds since epoch. ··· 1414 1379 spin_lock_irqsave(&pdc_lock, flags); 1415 1380 retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_PCI_CONFIG_WRITE, 1416 1381 pci_addr, pci_size, val); 1382 + spin_unlock_irqrestore(&pdc_lock, flags); 1383 + 1384 + return retval; 1385 + } 1386 + 1387 + /** 1388 + * pdc_pat_mem_pdc_info - Retrieve information about page deallocation table 1389 + * @rinfo: memory pdt information 1390 + * 1391 + */ 1392 + int pdc_pat_mem_pdt_info(struct pdc_pat_mem_retinfo *rinfo) 1393 + { 1394 + int retval; 1395 + unsigned long flags; 1396 + 1397 + spin_lock_irqsave(&pdc_lock, flags); 1398 + retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_PD_INFO, 1399 + __pa(&pdc_result)); 1400 + if (retval == PDC_OK) 1401 + memcpy(rinfo, &pdc_result, sizeof(*rinfo)); 1402 + spin_unlock_irqrestore(&pdc_lock, flags); 1403 + 1404 + return retval; 1405 + } 1406 + 1407 + /** 1408 + * pdc_pat_mem_read_cell_pdt - Read PDT entries from (old) PAT firmware 1409 + * @pret: array of PDT entries 1410 + * @pdt_entries_ptr: ptr to hold number of PDT entries 1411 + * @max_entries: maximum number of entries to be read 1412 + * 1413 + */ 1414 + int pdc_pat_mem_read_cell_pdt(struct pdc_pat_mem_read_pd_retinfo *pret, 1415 + unsigned long *pdt_entries_ptr, unsigned long max_entries) 1416 + { 1417 + int retval; 1418 + unsigned long flags, entries; 1419 + 1420 + spin_lock_irqsave(&pdc_lock, flags); 1421 + /* PDC_PAT_MEM_CELL_READ is available on early PAT machines only */ 1422 + retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_CELL_READ, 1423 + __pa(&pdc_result), parisc_cell_num, __pa(&pdc_result2)); 1424 + 1425 + if (retval == PDC_OK) { 1426 + /* build up return value as for PDC_PAT_MEM_PD_READ */ 1427 + entries = min(pdc_result[0], max_entries); 1428 + pret->pdt_entries = entries; 1429 + pret->actual_count_bytes = entries * sizeof(unsigned long); 1430 + memcpy(pdt_entries_ptr, &pdc_result2, pret->actual_count_bytes); 1431 + } 1432 + 1433 + spin_unlock_irqrestore(&pdc_lock, flags); 1434 + WARN_ON(retval == PDC_OK && pdc_result[0] > max_entries); 1435 + 1436 + return retval; 1437 + } 1438 + /** 1439 + * pdc_pat_mem_read_pd_pdt - Read PDT entries from (newer) PAT firmware 1440 + * @pret: array of PDT entries 1441 + * @pdt_entries_ptr: ptr to hold number of PDT entries 1442 + * 1443 + */ 1444 + int pdc_pat_mem_read_pd_pdt(struct pdc_pat_mem_read_pd_retinfo *pret, 1445 + unsigned long *pdt_entries_ptr, unsigned long count, 1446 + unsigned long offset) 1447 + { 1448 + int retval; 1449 + unsigned long flags; 1450 + 1451 + spin_lock_irqsave(&pdc_lock, flags); 1452 + retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_PD_READ, 1453 + __pa(&pret), __pa(pdt_entries_ptr), 1454 + count, offset); 1417 1455 spin_unlock_irqrestore(&pdc_lock, flags); 1418 1456 1419 1457 return retval;
+3 -2
arch/parisc/kernel/hpmc.S
··· 44 44 45 45 #include <asm/assembly.h> 46 46 #include <asm/pdc.h> 47 + #include <asm/psw.h> 47 48 48 49 #include <linux/linkage.h> 49 50 #include <linux/init.h> ··· 136 135 * So turn on the Q bit and turn off the M bit. 137 136 */ 138 137 139 - ldo 8(%r0),%r4 /* PSW Q on, PSW M off */ 138 + ldi PSW_SM_Q,%r4 /* PSW Q on, PSW M off */ 140 139 mtctl %r4,ipsw 141 140 mtctl %r0,pcsq 142 141 mtctl %r0,pcsq ··· 258 257 259 258 tovirt_r1 %r30 /* make sp virtual */ 260 259 261 - rsm 8,%r0 /* Clear Q bit */ 260 + rsm PSW_SM_Q,%r0 /* Clear Q bit */ 262 261 ldi 1,%r8 /* Set trap code to "1" for HPMC */ 263 262 load32 PA(intr_save),%r1 264 263 be 0(%sr7,%r1)
+9
arch/parisc/kernel/inventory.c
··· 40 40 41 41 int pdc_type __read_mostly = PDC_TYPE_ILLEGAL; 42 42 43 + /* cell number and location (PAT firmware only) */ 44 + unsigned long parisc_cell_num __read_mostly; 45 + unsigned long parisc_cell_loc __read_mostly; 46 + 47 + 43 48 void __init setup_pdc(void) 44 49 { 45 50 long status; ··· 83 78 if (status == PDC_OK) { 84 79 pdc_type = PDC_TYPE_PAT; 85 80 pr_cont("64 bit PAT.\n"); 81 + parisc_cell_num = cell_info.cell_num; 82 + parisc_cell_loc = cell_info.cell_loc; 83 + pr_info("PAT: Running on cell %lu and location %lu.\n", 84 + parisc_cell_num, parisc_cell_loc); 86 85 return; 87 86 } 88 87 #endif
+143
arch/parisc/kernel/pdt.c
··· 1 + /* 2 + * Page Deallocation Table (PDT) support 3 + * 4 + * The Page Deallocation Table (PDT) holds a table with pointers to bad 5 + * memory (broken RAM modules) which is maintained by firmware. 6 + * 7 + * Copyright 2017 by Helge Deller <deller@gmx.de> 8 + * 9 + * TODO: 10 + * - check regularily for new bad memory 11 + * - add userspace interface with procfs or sysfs 12 + * - increase number of PDT entries dynamically 13 + */ 14 + 15 + #include <linux/memblock.h> 16 + #include <linux/seq_file.h> 17 + 18 + #include <asm/pdc.h> 19 + #include <asm/pdcpat.h> 20 + #include <asm/sections.h> 21 + #include <asm/pgtable.h> 22 + 23 + enum pdt_access_type { 24 + PDT_NONE, 25 + PDT_PDC, 26 + PDT_PAT_NEW, 27 + PDT_PAT_OLD 28 + }; 29 + 30 + static enum pdt_access_type pdt_type; 31 + 32 + /* global PDT status information */ 33 + static struct pdc_mem_retinfo pdt_status; 34 + 35 + #define MAX_PDT_TABLE_SIZE PAGE_SIZE 36 + #define MAX_PDT_ENTRIES (MAX_PDT_TABLE_SIZE / sizeof(unsigned long)) 37 + static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss; 38 + 39 + 40 + /* report PDT entries via /proc/meminfo */ 41 + void arch_report_meminfo(struct seq_file *m) 42 + { 43 + if (pdt_type == PDT_NONE) 44 + return; 45 + 46 + seq_printf(m, "PDT_max_entries: %7lu\n", 47 + pdt_status.pdt_size); 48 + seq_printf(m, "PDT_cur_entries: %7lu\n", 49 + pdt_status.pdt_entries); 50 + } 51 + 52 + /* 53 + * pdc_pdt_init() 54 + * 55 + * Initialize kernel PDT structures, read initial PDT table from firmware, 56 + * report all current PDT entries and mark bad memory with memblock_reserve() 57 + * to avoid that the kernel will use broken memory areas. 58 + * 59 + */ 60 + void __init pdc_pdt_init(void) 61 + { 62 + int ret, i; 63 + unsigned long entries; 64 + struct pdc_mem_read_pdt pdt_read_ret; 65 + 66 + if (is_pdc_pat()) { 67 + struct pdc_pat_mem_retinfo pat_rinfo; 68 + 69 + pdt_type = PDT_PAT_NEW; 70 + ret = pdc_pat_mem_pdt_info(&pat_rinfo); 71 + pdt_status.pdt_size = pat_rinfo.max_pdt_entries; 72 + pdt_status.pdt_entries = pat_rinfo.current_pdt_entries; 73 + pdt_status.pdt_status = 0; 74 + pdt_status.first_dbe_loc = pat_rinfo.first_dbe_loc; 75 + pdt_status.good_mem = pat_rinfo.good_mem; 76 + } else { 77 + pdt_type = PDT_PDC; 78 + ret = pdc_mem_pdt_info(&pdt_status); 79 + } 80 + 81 + if (ret != PDC_OK) { 82 + pdt_type = PDT_NONE; 83 + pr_info("PDT: Firmware does not provide any page deallocation" 84 + " information.\n"); 85 + return; 86 + } 87 + 88 + entries = pdt_status.pdt_entries; 89 + WARN_ON(entries > MAX_PDT_ENTRIES); 90 + 91 + pr_info("PDT: size %lu, entries %lu, status %lu, dbe_loc 0x%lx," 92 + " good_mem %lu\n", 93 + pdt_status.pdt_size, pdt_status.pdt_entries, 94 + pdt_status.pdt_status, pdt_status.first_dbe_loc, 95 + pdt_status.good_mem); 96 + 97 + if (entries == 0) { 98 + pr_info("PDT: Firmware reports all memory OK.\n"); 99 + return; 100 + } 101 + 102 + if (pdt_status.first_dbe_loc && 103 + pdt_status.first_dbe_loc <= __pa((unsigned long)&_end)) 104 + pr_crit("CRITICAL: Bad memory inside kernel image memory area!\n"); 105 + 106 + pr_warn("PDT: Firmware reports %lu entries of faulty memory:\n", 107 + entries); 108 + 109 + if (pdt_type == PDT_PDC) 110 + ret = pdc_mem_pdt_read_entries(&pdt_read_ret, pdt_entry); 111 + else { 112 + #ifdef CONFIG_64BIT 113 + struct pdc_pat_mem_read_pd_retinfo pat_pret; 114 + 115 + ret = pdc_pat_mem_read_cell_pdt(&pat_pret, pdt_entry, 116 + MAX_PDT_ENTRIES); 117 + if (ret != PDC_OK) { 118 + pdt_type = PDT_PAT_OLD; 119 + ret = pdc_pat_mem_read_pd_pdt(&pat_pret, pdt_entry, 120 + MAX_PDT_TABLE_SIZE, 0); 121 + } 122 + #else 123 + ret = PDC_BAD_PROC; 124 + #endif 125 + } 126 + 127 + if (ret != PDC_OK) { 128 + pdt_type = PDT_NONE; 129 + pr_debug("PDT type %d, retval = %d\n", pdt_type, ret); 130 + return; 131 + } 132 + 133 + for (i = 0; i < pdt_status.pdt_entries; i++) { 134 + if (i < 20) 135 + pr_warn("PDT: BAD PAGE #%d at 0x%08lx (error_type = %lu)\n", 136 + i, 137 + pdt_entry[i] & PAGE_MASK, 138 + pdt_entry[i] & 1); 139 + 140 + /* mark memory page bad */ 141 + memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE); 142 + } 143 + }
+4 -1
arch/parisc/kernel/processor.c
··· 94 94 unsigned long txn_addr; 95 95 unsigned long cpuid; 96 96 struct cpuinfo_parisc *p; 97 - struct pdc_pat_cpu_num cpu_info __maybe_unused; 97 + struct pdc_pat_cpu_num cpu_info = { }; 98 98 99 99 #ifdef CONFIG_SMP 100 100 if (num_online_cpus() >= nr_cpu_ids) { ··· 113 113 */ 114 114 cpuid = boot_cpu_data.cpu_count; 115 115 txn_addr = dev->hpa.start; /* for legacy PDC */ 116 + cpu_info.cpu_num = cpu_info.cpu_loc = cpuid; 116 117 117 118 #ifdef CONFIG_64BIT 118 119 if (is_pdc_pat()) { ··· 181 180 p->hpa = dev->hpa.start; /* save CPU hpa */ 182 181 p->cpuid = cpuid; /* save CPU id */ 183 182 p->txn_addr = txn_addr; /* save CPU IRQ address */ 183 + p->cpu_num = cpu_info.cpu_num; 184 + p->cpu_loc = cpu_info.cpu_loc; 184 185 #ifdef CONFIG_SMP 185 186 /* 186 187 ** FIXME: review if any other initialization is clobbered
+1 -1
arch/parisc/kernel/syscall_table.S
··· 361 361 ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */ 362 362 ENTRY_SAME(add_key) 363 363 ENTRY_SAME(request_key) /* 265 */ 364 - ENTRY_SAME(keyctl) 364 + ENTRY_COMP(keyctl) 365 365 ENTRY_SAME(ioprio_set) 366 366 ENTRY_SAME(ioprio_get) 367 367 ENTRY_SAME(inotify_init)
+20 -4
arch/parisc/kernel/time.c
··· 243 243 static int __init init_cr16_clocksource(void) 244 244 { 245 245 /* 246 - * The cr16 interval timers are not syncronized across CPUs, so mark 247 - * them unstable and lower rating on SMP systems. 246 + * The cr16 interval timers are not syncronized across CPUs on 247 + * different sockets, so mark them unstable and lower rating on 248 + * multi-socket SMP systems. 248 249 */ 249 250 if (num_online_cpus() > 1) { 250 - clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; 251 - clocksource_cr16.rating = 0; 251 + int cpu; 252 + unsigned long cpu0_loc; 253 + cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; 254 + 255 + for_each_online_cpu(cpu) { 256 + if (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc) 257 + continue; 258 + 259 + clocksource_cr16.name = "cr16_unstable"; 260 + clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; 261 + clocksource_cr16.rating = 0; 262 + break; 263 + } 252 264 } 265 + 266 + /* XXX: We may want to mark sched_clock stable here if cr16 clocks are 267 + * in sync: 268 + * (clocksource_cr16.flags == CLOCK_SOURCE_IS_CONTINUOUS) */ 253 269 254 270 /* register at clocksource framework */ 255 271 clocksource_register_hz(&clocksource_cr16,
+9 -16
arch/parisc/lib/lusercopy.S
··· 56 56 mtsp %r1,%sr1 57 57 .endm 58 58 59 - .macro fixup_branch lbl 60 - ldil L%\lbl, %r1 61 - ldo R%\lbl(%r1), %r1 62 - bv %r0(%r1) 63 - .endm 64 - 65 59 /* 66 60 * unsigned long lclear_user(void *to, unsigned long n) 67 61 * ··· 76 82 $lclu_done: 77 83 bv %r0(%r2) 78 84 copy %r25,%r28 85 + 86 + 2: b $lclu_done 87 + ldo 1(%r25),%r25 88 + 89 + ASM_EXCEPTIONTABLE_ENTRY(1b,2b) 90 + 79 91 .exit 80 92 ENDPROC_CFI(lclear_user) 81 93 82 - .section .fixup,"ax" 83 - 2: fixup_branch $lclu_done 84 - ldo 1(%r25),%r25 85 - .previous 86 - 87 - ASM_EXCEPTIONTABLE_ENTRY(1b,2b) 88 94 89 95 .procend 90 96 ··· 116 122 $lslen_nzero: 117 123 b $lslen_done 118 124 ldo 1(%r26),%r26 /* special case for N == 0 */ 119 - ENDPROC_CFI(lstrnlen_user) 120 125 121 - .section .fixup,"ax" 122 - 3: fixup_branch $lslen_done 126 + 3: b $lslen_done 123 127 copy %r24,%r26 /* reset r26 so 0 is returned on fault */ 124 - .previous 125 128 126 129 ASM_EXCEPTIONTABLE_ENTRY(1b,3b) 127 130 ASM_EXCEPTIONTABLE_ENTRY(2b,3b) 131 + 132 + ENDPROC_CFI(lstrnlen_user) 128 133 129 134 .procend 130 135
+2 -10
arch/parisc/mm/fault.c
··· 29 29 #define BITSSET 0x1c0 /* for identifying LDCW */ 30 30 31 31 32 - DEFINE_PER_CPU(struct exception_data, exception_data); 33 - 34 32 int show_unhandled_signals = 1; 35 33 36 34 /* ··· 141 143 142 144 fix = search_exception_tables(regs->iaoq[0]); 143 145 if (fix) { 144 - struct exception_data *d; 145 - d = this_cpu_ptr(&exception_data); 146 - d->fault_ip = regs->iaoq[0]; 147 - d->fault_gp = regs->gr[27]; 148 - d->fault_space = regs->isr; 149 - d->fault_addr = regs->ior; 150 - 151 146 /* 152 147 * Fix up get_user() and put_user(). 153 148 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant ··· 154 163 /* zero target register for get_user() */ 155 164 if (parisc_acctyp(0, regs->iir) == VM_READ) { 156 165 int treg = regs->iir & 0x1f; 166 + BUG_ON(treg == 0); 157 167 regs->gr[treg] = 0; 158 168 } 159 169 } ··· 359 367 case 15: /* Data TLB miss fault/Data page fault */ 360 368 /* send SIGSEGV when outside of vma */ 361 369 if (!vma || 362 - address < vma->vm_start || address > vma->vm_end) { 370 + address < vma->vm_start || address >= vma->vm_end) { 363 371 si.si_signo = SIGSEGV; 364 372 si.si_code = SEGV_MAPERR; 365 373 break;
+3
arch/parisc/mm/init.c
··· 381 381 request_resource(res, &data_resource); 382 382 } 383 383 request_resource(&sysram_resources[0], &pdcdata_resource); 384 + 385 + /* Initialize Page Deallocation Table (PDT) and check for bad memory. */ 386 + pdc_pdt_init(); 384 387 } 385 388 386 389 static int __init parisc_text_address(unsigned long vaddr)
+12
drivers/parisc/ccio-dma.c
··· 741 741 742 742 BUG_ON(!dev); 743 743 ioc = GET_IOC(dev); 744 + if (!ioc) 745 + return DMA_ERROR_CODE; 744 746 745 747 BUG_ON(size <= 0); 746 748 ··· 816 814 817 815 BUG_ON(!dev); 818 816 ioc = GET_IOC(dev); 817 + if (!ioc) { 818 + WARN_ON(!ioc); 819 + return; 820 + } 819 821 820 822 DBG_RUN("%s() iovp 0x%lx/%x\n", 821 823 __func__, (long)iova, size); ··· 924 918 925 919 BUG_ON(!dev); 926 920 ioc = GET_IOC(dev); 921 + if (!ioc) 922 + return 0; 927 923 928 924 DBG_RUN_SG("%s() START %d entries\n", __func__, nents); 929 925 ··· 998 990 999 991 BUG_ON(!dev); 1000 992 ioc = GET_IOC(dev); 993 + if (!ioc) { 994 + WARN_ON(!ioc); 995 + return; 996 + } 1001 997 1002 998 DBG_RUN_SG("%s() START %d entries, %p,%x\n", 1003 999 __func__, nents, sg_virt(sglist), sglist->length);
+4 -1
drivers/parisc/dino.c
··· 154 154 }; 155 155 156 156 /* Looks nice and keeps the compiler happy */ 157 - #define DINO_DEV(d) ((struct dino_device *) d) 157 + #define DINO_DEV(d) ({ \ 158 + void *__pdata = d; \ 159 + BUG_ON(!__pdata); \ 160 + (struct dino_device *)__pdata; }) 158 161 159 162 160 163 /*
+4 -2
drivers/parisc/lba_pci.c
··· 111 111 112 112 113 113 /* Looks nice and keeps the compiler happy */ 114 - #define LBA_DEV(d) ((struct lba_device *) (d)) 115 - 114 + #define LBA_DEV(d) ({ \ 115 + void *__pdata = d; \ 116 + BUG_ON(!__pdata); \ 117 + (struct lba_device *)__pdata; }) 116 118 117 119 /* 118 120 ** Only allow 8 subsidiary busses per LBA
+14
drivers/parisc/sba_iommu.c
··· 691 691 return 0; 692 692 693 693 ioc = GET_IOC(dev); 694 + if (!ioc) 695 + return 0; 694 696 695 697 /* 696 698 * check if mask is >= than the current max IO Virt Address ··· 724 722 int pide; 725 723 726 724 ioc = GET_IOC(dev); 725 + if (!ioc) 726 + return DMA_ERROR_CODE; 727 727 728 728 /* save offset bits */ 729 729 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK; ··· 817 813 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size); 818 814 819 815 ioc = GET_IOC(dev); 816 + if (!ioc) { 817 + WARN_ON(!ioc); 818 + return; 819 + } 820 820 offset = iova & ~IOVP_MASK; 821 821 iova ^= offset; /* clear offset bits */ 822 822 size += offset; ··· 960 952 DBG_RUN_SG("%s() START %d entries\n", __func__, nents); 961 953 962 954 ioc = GET_IOC(dev); 955 + if (!ioc) 956 + return 0; 963 957 964 958 /* Fast path single entry scatterlists. */ 965 959 if (nents == 1) { ··· 1047 1037 __func__, nents, sg_virt(sglist), sglist->length); 1048 1038 1049 1039 ioc = GET_IOC(dev); 1040 + if (!ioc) { 1041 + WARN_ON(!ioc); 1042 + return; 1043 + } 1050 1044 1051 1045 #ifdef SBA_COLLECT_STATS 1052 1046 ioc->usg_calls++;