Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'powerpc-4.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
- opal-prd mmap fix from Vaidy
- set kernel taint for MCEs from Daniel
- alignment exception description from Anton
- ppc4xx_hsta_msi build fix from Daniel
- opal-elog interrupt fix from Alistair
- core_idle_state race fix from Shreyas
- hv-24x7 lockdep fix from Sukadev
- multiple cxl fixes from Daniel, Ian, Mikey & Maninder
- update MAINTAINERS to point at shared tree

* tag 'powerpc-4.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
cxl: Check if afu is not null in cxl_slbia
powerpc: Update MAINTAINERS to point at shared tree
powerpc/perf/24x7: Fix lockdep warning
cxl: Fix off by one error allowing subsequent mmap page to be accessed
cxl: Fail mmap if requested mapping is larger than assigned problem state area
cxl: Fix refcounting in kernel API
powerpc/powernv: Fix race in updating core_idle_state
powerpc/powernv: Fix opal-elog interrupt handler
powerpc/ppc4xx_hsta_msi: Include ppc-pci.h to fix reference to hose_list
powerpc: Add plain English description for alignment exception oopses
cxl: Test the correct mmio space before unmapping
powerpc: Set the correct kernel taint on machine check errors
cxl/vphb.c: Use phb pointer after NULL check
powerpc/powernv: Fix vma page prot flags in opal-prd driver

+60 -40
+1 -1
MAINTAINERS
··· 6173 6173 W: http://www.penguinppc.org/ 6174 6174 L: linuxppc-dev@lists.ozlabs.org 6175 6175 Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/ 6176 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git 6176 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git 6177 6177 S: Supported 6178 6178 F: Documentation/powerpc/ 6179 6179 F: arch/powerpc/
+21 -10
arch/powerpc/kernel/idle_power7.S
··· 52 52 .text 53 53 54 54 /* 55 + * Used by threads when the lock bit of core_idle_state is set. 56 + * Threads will spin in HMT_LOW until the lock bit is cleared. 57 + * r14 - pointer to core_idle_state 58 + * r15 - used to load contents of core_idle_state 59 + */ 60 + 61 + core_idle_lock_held: 62 + HMT_LOW 63 + 3: lwz r15,0(r14) 64 + andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT 65 + bne 3b 66 + HMT_MEDIUM 67 + lwarx r15,0,r14 68 + blr 69 + 70 + /* 55 71 * Pass requested state in r3: 56 72 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE 57 73 * ··· 166 150 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 167 151 lwarx_loop1: 168 152 lwarx r15,0,r14 153 + 154 + andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT 155 + bnel core_idle_lock_held 156 + 169 157 andc r15,r15,r7 /* Clear thread bit */ 170 158 171 159 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS ··· 314 294 * workaround undo code or resyncing timebase or restoring context 315 295 * In either case loop until the lock bit is cleared. 316 296 */ 317 - bne core_idle_lock_held 297 + bnel core_idle_lock_held 318 298 319 299 cmpwi cr2,r15,0 320 300 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) ··· 338 318 bne- lwarx_loop2 339 319 isync 340 320 b common_exit 341 - 342 - core_idle_lock_held: 343 - HMT_LOW 344 - core_idle_lock_loop: 345 - lwz r15,0(14) 346 - andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT 347 - bne core_idle_lock_loop 348 - HMT_MEDIUM 349 - b lwarx_loop2 350 321 351 322 first_thread_in_subcore: 352 323 /* First thread in subcore to wakeup */
+2
arch/powerpc/kernel/traps.c
··· 297 297 298 298 __this_cpu_inc(irq_stat.mce_exceptions); 299 299 300 + add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 301 + 300 302 if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 301 303 handled = cur_cpu_spec->machine_check_early(regs); 302 304 return handled;
+4
arch/powerpc/mm/fault.c
··· 529 529 printk(KERN_ALERT "Unable to handle kernel paging request for " 530 530 "instruction fetch\n"); 531 531 break; 532 + case 0x600: 533 + printk(KERN_ALERT "Unable to handle kernel paging request for " 534 + "unaligned access at address 0x%08lx\n", regs->dar); 535 + break; 532 536 default: 533 537 printk(KERN_ALERT "Unable to handle kernel paging request for " 534 538 "unknown fault\n");
+2
arch/powerpc/perf/hv-24x7.c
··· 320 320 if (!attr) 321 321 return NULL; 322 322 323 + sysfs_attr_init(&attr->attr.attr); 324 + 323 325 attr->var = str; 324 326 attr->attr.attr.name = name; 325 327 attr->attr.attr.mode = 0444;
+5 -11
arch/powerpc/platforms/powernv/opal-elog.c
··· 237 237 return elog; 238 238 } 239 239 240 - static void elog_work_fn(struct work_struct *work) 240 + static irqreturn_t elog_event(int irq, void *data) 241 241 { 242 242 __be64 size; 243 243 __be64 id; ··· 251 251 rc = opal_get_elog_size(&id, &size, &type); 252 252 if (rc != OPAL_SUCCESS) { 253 253 pr_err("ELOG: OPAL log info read failed\n"); 254 - return; 254 + return IRQ_HANDLED; 255 255 } 256 256 257 257 elog_size = be64_to_cpu(size); ··· 270 270 * entries. 271 271 */ 272 272 if (kset_find_obj(elog_kset, name)) 273 - return; 273 + return IRQ_HANDLED; 274 274 275 275 create_elog_obj(log_id, elog_size, elog_type); 276 - } 277 276 278 - static DECLARE_WORK(elog_work, elog_work_fn); 279 - 280 - static irqreturn_t elog_event(int irq, void *data) 281 - { 282 - schedule_work(&elog_work); 283 277 return IRQ_HANDLED; 284 278 } 285 279 ··· 298 304 return irq; 299 305 } 300 306 301 - rc = request_irq(irq, elog_event, 302 - IRQ_TYPE_LEVEL_HIGH, "opal-elog", NULL); 307 + rc = request_threaded_irq(irq, NULL, elog_event, 308 + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "opal-elog", NULL); 303 309 if (rc) { 304 310 pr_err("%s: Can't request OPAL event irq (%d)\n", 305 311 __func__, rc);
+4 -5
arch/powerpc/platforms/powernv/opal-prd.c
··· 112 112 static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma) 113 113 { 114 114 size_t addr, size; 115 + pgprot_t page_prot; 115 116 int rc; 116 117 117 118 pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n", ··· 126 125 if (!opal_prd_range_is_valid(addr, size)) 127 126 return -EINVAL; 128 127 129 - vma->vm_page_prot = __pgprot(pgprot_val(phys_mem_access_prot(file, 130 - vma->vm_pgoff, 131 - size, vma->vm_page_prot)) 132 - | _PAGE_SPECIAL); 128 + page_prot = phys_mem_access_prot(file, vma->vm_pgoff, 129 + size, vma->vm_page_prot); 133 130 134 131 rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, 135 - vma->vm_page_prot); 132 + page_prot); 136 133 137 134 return rc; 138 135 }
+1
arch/powerpc/sysdev/ppc4xx_hsta_msi.c
··· 18 18 #include <linux/pci.h> 19 19 #include <linux/semaphore.h> 20 20 #include <asm/msi_bitmap.h> 21 + #include <asm/ppc-pci.h> 21 22 22 23 struct ppc4xx_hsta_msi { 23 24 struct device *dev;
+5 -7
drivers/misc/cxl/api.c
··· 23 23 24 24 afu = cxl_pci_to_afu(dev); 25 25 26 + get_device(&afu->dev); 26 27 ctx = cxl_context_alloc(); 27 28 if (IS_ERR(ctx)) 28 29 return ctx; ··· 32 31 rc = cxl_context_init(ctx, afu, false, NULL); 33 32 if (rc) { 34 33 kfree(ctx); 34 + put_device(&afu->dev); 35 35 return ERR_PTR(-ENOMEM); 36 36 } 37 37 cxl_assign_psn_space(ctx); ··· 61 59 { 62 60 if (ctx->status != CLOSED) 63 61 return -EBUSY; 62 + 63 + put_device(&ctx->afu->dev); 64 64 65 65 cxl_context_free(ctx); 66 66 ··· 163 159 } 164 160 165 161 ctx->status = STARTED; 166 - get_device(&ctx->afu->dev); 167 162 out: 168 163 mutex_unlock(&ctx->status_mutex); 169 164 return rc; ··· 178 175 /* Stop a context. Returns 0 on success, otherwise -Errno */ 179 176 int cxl_stop_context(struct cxl_context *ctx) 180 177 { 181 - int rc; 182 - 183 - rc = __detach_context(ctx); 184 - if (!rc) 185 - put_device(&ctx->afu->dev); 186 - return rc; 178 + return __detach_context(ctx); 187 179 } 188 180 EXPORT_SYMBOL_GPL(cxl_stop_context); 189 181
+11 -3
drivers/misc/cxl/context.c
··· 113 113 114 114 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { 115 115 area = ctx->afu->psn_phys; 116 - if (offset > ctx->afu->adapter->ps_size) 116 + if (offset >= ctx->afu->adapter->ps_size) 117 117 return VM_FAULT_SIGBUS; 118 118 } else { 119 119 area = ctx->psn_phys; 120 - if (offset > ctx->psn_size) 120 + if (offset >= ctx->psn_size) 121 121 return VM_FAULT_SIGBUS; 122 122 } 123 123 ··· 145 145 */ 146 146 int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) 147 147 { 148 + u64 start = vma->vm_pgoff << PAGE_SHIFT; 148 149 u64 len = vma->vm_end - vma->vm_start; 149 - len = min(len, ctx->psn_size); 150 + 151 + if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { 152 + if (start + len > ctx->afu->adapter->ps_size) 153 + return -EINVAL; 154 + } else { 155 + if (start + len > ctx->psn_size) 156 + return -EINVAL; 157 + } 150 158 151 159 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { 152 160 /* make sure there is a valid per process space for this AFU */
+1 -1
drivers/misc/cxl/main.c
··· 73 73 spin_lock(&adapter->afu_list_lock); 74 74 for (slice = 0; slice < adapter->slices; slice++) { 75 75 afu = adapter->afu[slice]; 76 - if (!afu->enabled) 76 + if (!afu || !afu->enabled) 77 77 continue; 78 78 rcu_read_lock(); 79 79 idr_for_each_entry(&afu->contexts_idr, ctx, id)
+1 -1
drivers/misc/cxl/pci.c
··· 539 539 540 540 static void cxl_unmap_slice_regs(struct cxl_afu *afu) 541 541 { 542 - if (afu->p1n_mmio) 542 + if (afu->p2n_mmio) 543 543 iounmap(afu->p2n_mmio); 544 544 if (afu->p1n_mmio) 545 545 iounmap(afu->p1n_mmio);
+2 -1
drivers/misc/cxl/vphb.c
··· 112 112 unsigned long addr; 113 113 114 114 phb = pci_bus_to_host(bus); 115 - afu = (struct cxl_afu *)phb->private_data; 116 115 if (phb == NULL) 117 116 return PCIBIOS_DEVICE_NOT_FOUND; 117 + afu = (struct cxl_afu *)phb->private_data; 118 + 118 119 if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num) 119 120 return PCIBIOS_DEVICE_NOT_FOUND; 120 121 if (offset >= (unsigned long)phb->cfg_data)