Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc:
[POWERPC] Make alignment exception always check exception table
[POWERPC] Disallow kprobes on emulate_step and branch_taken
[POWERPC] Make mmiowb's io_sync preempt safe
[POWERPC] Make high hugepage areas preempt safe
[POWERPC] Make current preempt-safe
[POWERPC] qe_lib: qe_issue_cmd writes wrong value to CECDR
[POWERPC] Use 4kB iommu pages even on 64kB-page systems
[POWERPC] Fix oprofile support for e500 in arch/powerpc
[POWERPC] Fix rmb() for e500-based machines it
[POWERPC] Fix various offb issues

+382 -403
-1
arch/powerpc/kernel/Makefile
··· 38 38 obj-$(CONFIG_TAU) += tau_6xx.o 39 39 obj32-$(CONFIG_SOFTWARE_SUSPEND) += swsusp_32.o 40 40 obj32-$(CONFIG_MODULES) += module_32.o 41 - obj-$(CONFIG_E500) += perfmon_fsl_booke.o 42 41 43 42 ifeq ($(CONFIG_PPC_MERGE),y) 44 43
+1 -1
arch/powerpc/kernel/btext.c
··· 182 182 prop = get_property(np, "linux,bootx-linebytes", NULL); 183 183 if (prop == NULL) 184 184 prop = get_property(np, "linebytes", NULL); 185 - if (prop) 185 + if (prop && *prop != 0xffffffffu) 186 186 pitch = *prop; 187 187 if (pitch == 1) 188 188 pitch = 0x1000;
+45 -32
arch/powerpc/kernel/iommu.c
··· 47 47 static int novmerge = 1; 48 48 #endif 49 49 50 + static inline unsigned long iommu_num_pages(unsigned long vaddr, 51 + unsigned long slen) 52 + { 53 + unsigned long npages; 54 + 55 + npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK); 56 + npages >>= IOMMU_PAGE_SHIFT; 57 + 58 + return npages; 59 + } 60 + 50 61 static int __init setup_iommu(char *str) 51 62 { 52 63 if (!strcmp(str, "novmerge")) ··· 189 178 } 190 179 191 180 entry += tbl->it_offset; /* Offset into real TCE table */ 192 - ret = entry << PAGE_SHIFT; /* Set the return dma address */ 181 + ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ 193 182 194 183 /* Put the TCEs in the HW table */ 195 - ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK, 184 + ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK, 196 185 direction); 197 186 198 187 ··· 214 203 unsigned long entry, free_entry; 215 204 unsigned long i; 216 205 217 - entry = dma_addr >> PAGE_SHIFT; 206 + entry = dma_addr >> IOMMU_PAGE_SHIFT; 218 207 free_entry = entry - tbl->it_offset; 219 208 220 209 if (((free_entry + npages) > tbl->it_size) || ··· 281 270 /* Init first segment length for backout at failure */ 282 271 outs->dma_length = 0; 283 272 284 - DBG("mapping %d elements:\n", nelems); 273 + DBG("sg mapping %d elements:\n", nelems); 285 274 286 275 spin_lock_irqsave(&(tbl->it_lock), flags); 287 276 ··· 296 285 } 297 286 /* Allocate iommu entries for that segment */ 298 287 vaddr = (unsigned long)page_address(s->page) + s->offset; 299 - npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK); 300 - npages >>= PAGE_SHIFT; 301 - entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0); 288 + npages = iommu_num_pages(vaddr, slen); 289 + entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0); 302 290 303 291 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); 304 292 ··· 311 301 312 302 /* Convert entry to a dma_addr_t */ 313 303 entry += tbl->it_offset; 314 - dma_addr = entry << PAGE_SHIFT; 315 - dma_addr |= s->offset; 304 + dma_addr = entry << IOMMU_PAGE_SHIFT; 305 + dma_addr |= (s->offset & ~IOMMU_PAGE_MASK); 316 306 317 - DBG(" - %lx pages, entry: %lx, dma_addr: %lx\n", 307 + DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", 318 308 npages, entry, dma_addr); 319 309 320 310 /* Insert into HW table */ 321 - ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction); 311 + ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction); 322 312 323 313 /* If we are in an open segment, try merging */ 324 314 if (segstart != s) { ··· 333 323 DBG(" can't merge, new segment.\n"); 334 324 } else { 335 325 outs->dma_length += s->length; 336 - DBG(" merged, new len: %lx\n", outs->dma_length); 326 + DBG(" merged, new len: %ux\n", outs->dma_length); 337 327 } 338 328 } 339 329 ··· 377 367 if (s->dma_length != 0) { 378 368 unsigned long vaddr, npages; 379 369 380 - vaddr = s->dma_address & PAGE_MASK; 381 - npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr) 382 - >> PAGE_SHIFT; 370 + vaddr = s->dma_address & IOMMU_PAGE_MASK; 371 + npages = iommu_num_pages(s->dma_address, s->dma_length); 383 372 __iommu_free(tbl, vaddr, npages); 384 373 s->dma_address = DMA_ERROR_CODE; 385 374 s->dma_length = 0; ··· 407 398 408 399 if (sglist->dma_length == 0) 409 400 break; 410 - npages = (PAGE_ALIGN(dma_handle + sglist->dma_length) 411 - - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT; 401 + npages = iommu_num_pages(dma_handle,sglist->dma_length); 412 402 __iommu_free(tbl, dma_handle, npages); 413 403 sglist++; 414 404 } ··· 540 532 BUG_ON(direction == DMA_NONE); 541 533 542 534 uaddr = (unsigned long)vaddr; 543 - npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK); 544 - npages >>= PAGE_SHIFT; 535 + npages = iommu_num_pages(uaddr, size); 545 536 546 537 if (tbl) { 547 538 dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 548 - mask >> PAGE_SHIFT, 0); 539 + mask >> IOMMU_PAGE_SHIFT, 0); 549 540 if (dma_handle == DMA_ERROR_CODE) { 550 541 if (printk_ratelimit()) { 551 542 printk(KERN_INFO "iommu_alloc failed, " ··· 552 545 tbl, vaddr, npages); 553 546 } 554 547 } else 555 - dma_handle |= (uaddr & ~PAGE_MASK); 548 + dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); 556 549 } 557 550 558 551 return dma_handle; ··· 561 554 void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, 562 555 size_t size, enum dma_data_direction direction) 563 556 { 557 + unsigned int npages; 558 + 564 559 BUG_ON(direction == DMA_NONE); 565 560 566 - if (tbl) 567 - iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) - 568 - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT); 561 + if (tbl) { 562 + npages = iommu_num_pages(dma_handle, size); 563 + iommu_free(tbl, dma_handle, npages); 564 + } 569 565 } 570 566 571 567 /* Allocates a contiguous real buffer and creates mappings over it. ··· 580 570 { 581 571 void *ret = NULL; 582 572 dma_addr_t mapping; 583 - unsigned int npages, order; 573 + unsigned int order; 574 + unsigned int nio_pages, io_order; 584 575 struct page *page; 585 576 586 577 size = PAGE_ALIGN(size); 587 - npages = size >> PAGE_SHIFT; 588 578 order = get_order(size); 589 579 590 580 /* ··· 608 598 memset(ret, 0, size); 609 599 610 600 /* Set up tces to cover the allocated range */ 611 - mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, 612 - mask >> PAGE_SHIFT, order); 601 + nio_pages = size >> IOMMU_PAGE_SHIFT; 602 + io_order = get_iommu_order(size); 603 + mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL, 604 + mask >> IOMMU_PAGE_SHIFT, io_order); 613 605 if (mapping == DMA_ERROR_CODE) { 614 606 free_pages((unsigned long)ret, order); 615 607 return NULL; ··· 623 611 void iommu_free_coherent(struct iommu_table *tbl, size_t size, 624 612 void *vaddr, dma_addr_t dma_handle) 625 613 { 626 - unsigned int npages; 627 - 628 614 if (tbl) { 615 + unsigned int nio_pages; 616 + 629 617 size = PAGE_ALIGN(size); 630 - npages = size >> PAGE_SHIFT; 631 - iommu_free(tbl, dma_handle, npages); 618 + nio_pages = size >> IOMMU_PAGE_SHIFT; 619 + iommu_free(tbl, dma_handle, nio_pages); 620 + size = PAGE_ALIGN(size); 632 621 free_pages((unsigned long)vaddr, get_order(size)); 633 622 } 634 623 }
-221
arch/powerpc/kernel/perfmon_fsl_booke.c
··· 1 - /* arch/powerpc/kernel/perfmon_fsl_booke.c 2 - * Freescale Book-E Performance Monitor code 3 - * 4 - * Author: Andy Fleming 5 - * Copyright (c) 2004 Freescale Semiconductor, Inc 6 - * 7 - * This program is free software; you can redistribute it and/or 8 - * modify it under the terms of the GNU General Public License 9 - * as published by the Free Software Foundation; either version 10 - * 2 of the License, or (at your option) any later version. 11 - */ 12 - 13 - #include <linux/errno.h> 14 - #include <linux/sched.h> 15 - #include <linux/kernel.h> 16 - #include <linux/mm.h> 17 - #include <linux/stddef.h> 18 - #include <linux/unistd.h> 19 - #include <linux/ptrace.h> 20 - #include <linux/slab.h> 21 - #include <linux/user.h> 22 - #include <linux/a.out.h> 23 - #include <linux/interrupt.h> 24 - #include <linux/init.h> 25 - #include <linux/module.h> 26 - #include <linux/prctl.h> 27 - 28 - #include <asm/pgtable.h> 29 - #include <asm/uaccess.h> 30 - #include <asm/system.h> 31 - #include <asm/io.h> 32 - #include <asm/reg.h> 33 - #include <asm/xmon.h> 34 - #include <asm/pmc.h> 35 - 36 - static inline u32 get_pmlca(int ctr); 37 - static inline void set_pmlca(int ctr, u32 pmlca); 38 - 39 - static inline u32 get_pmlca(int ctr) 40 - { 41 - u32 pmlca; 42 - 43 - switch (ctr) { 44 - case 0: 45 - pmlca = mfpmr(PMRN_PMLCA0); 46 - break; 47 - case 1: 48 - pmlca = mfpmr(PMRN_PMLCA1); 49 - break; 50 - case 2: 51 - pmlca = mfpmr(PMRN_PMLCA2); 52 - break; 53 - case 3: 54 - pmlca = mfpmr(PMRN_PMLCA3); 55 - break; 56 - default: 57 - panic("Bad ctr number\n"); 58 - } 59 - 60 - return pmlca; 61 - } 62 - 63 - static inline void set_pmlca(int ctr, u32 pmlca) 64 - { 65 - switch (ctr) { 66 - case 0: 67 - mtpmr(PMRN_PMLCA0, pmlca); 68 - break; 69 - case 1: 70 - mtpmr(PMRN_PMLCA1, pmlca); 71 - break; 72 - case 2: 73 - mtpmr(PMRN_PMLCA2, pmlca); 74 - break; 75 - case 3: 76 - mtpmr(PMRN_PMLCA3, pmlca); 77 - break; 78 - default: 79 - panic("Bad ctr number\n"); 80 - } 81 - } 82 - 83 - void init_pmc_stop(int ctr) 84 - { 85 - u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU | 86 - PMLCA_FCM1 | PMLCA_FCM0); 87 - u32 pmlcb = 0; 88 - 89 - switch (ctr) { 90 - case 0: 91 - mtpmr(PMRN_PMLCA0, pmlca); 92 - mtpmr(PMRN_PMLCB0, pmlcb); 93 - break; 94 - case 1: 95 - mtpmr(PMRN_PMLCA1, pmlca); 96 - mtpmr(PMRN_PMLCB1, pmlcb); 97 - break; 98 - case 2: 99 - mtpmr(PMRN_PMLCA2, pmlca); 100 - mtpmr(PMRN_PMLCB2, pmlcb); 101 - break; 102 - case 3: 103 - mtpmr(PMRN_PMLCA3, pmlca); 104 - mtpmr(PMRN_PMLCB3, pmlcb); 105 - break; 106 - default: 107 - panic("Bad ctr number!\n"); 108 - } 109 - } 110 - 111 - void set_pmc_event(int ctr, int event) 112 - { 113 - u32 pmlca; 114 - 115 - pmlca = get_pmlca(ctr); 116 - 117 - pmlca = (pmlca & ~PMLCA_EVENT_MASK) | 118 - ((event << PMLCA_EVENT_SHIFT) & 119 - PMLCA_EVENT_MASK); 120 - 121 - set_pmlca(ctr, pmlca); 122 - } 123 - 124 - void set_pmc_user_kernel(int ctr, int user, int kernel) 125 - { 126 - u32 pmlca; 127 - 128 - pmlca = get_pmlca(ctr); 129 - 130 - if(user) 131 - pmlca &= ~PMLCA_FCU; 132 - else 133 - pmlca |= PMLCA_FCU; 134 - 135 - if(kernel) 136 - pmlca &= ~PMLCA_FCS; 137 - else 138 - pmlca |= PMLCA_FCS; 139 - 140 - set_pmlca(ctr, pmlca); 141 - } 142 - 143 - void set_pmc_marked(int ctr, int mark0, int mark1) 144 - { 145 - u32 pmlca = get_pmlca(ctr); 146 - 147 - if(mark0) 148 - pmlca &= ~PMLCA_FCM0; 149 - else 150 - pmlca |= PMLCA_FCM0; 151 - 152 - if(mark1) 153 - pmlca &= ~PMLCA_FCM1; 154 - else 155 - pmlca |= PMLCA_FCM1; 156 - 157 - set_pmlca(ctr, pmlca); 158 - } 159 - 160 - void pmc_start_ctr(int ctr, int enable) 161 - { 162 - u32 pmlca = get_pmlca(ctr); 163 - 164 - pmlca &= ~PMLCA_FC; 165 - 166 - if (enable) 167 - pmlca |= PMLCA_CE; 168 - else 169 - pmlca &= ~PMLCA_CE; 170 - 171 - set_pmlca(ctr, pmlca); 172 - } 173 - 174 - void pmc_start_ctrs(int enable) 175 - { 176 - u32 pmgc0 = mfpmr(PMRN_PMGC0); 177 - 178 - pmgc0 &= ~PMGC0_FAC; 179 - pmgc0 |= PMGC0_FCECE; 180 - 181 - if (enable) 182 - pmgc0 |= PMGC0_PMIE; 183 - else 184 - pmgc0 &= ~PMGC0_PMIE; 185 - 186 - mtpmr(PMRN_PMGC0, pmgc0); 187 - } 188 - 189 - void pmc_stop_ctrs(void) 190 - { 191 - u32 pmgc0 = mfpmr(PMRN_PMGC0); 192 - 193 - pmgc0 |= PMGC0_FAC; 194 - 195 - pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE); 196 - 197 - mtpmr(PMRN_PMGC0, pmgc0); 198 - } 199 - 200 - void dump_pmcs(void) 201 - { 202 - printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0)); 203 - printk("pmc\t\tpmlca\t\tpmlcb\n"); 204 - printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0), 205 - mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0)); 206 - printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1), 207 - mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1)); 208 - printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2), 209 - mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2)); 210 - printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3), 211 - mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3)); 212 - } 213 - 214 - EXPORT_SYMBOL(init_pmc_stop); 215 - EXPORT_SYMBOL(set_pmc_event); 216 - EXPORT_SYMBOL(set_pmc_user_kernel); 217 - EXPORT_SYMBOL(set_pmc_marked); 218 - EXPORT_SYMBOL(pmc_start_ctr); 219 - EXPORT_SYMBOL(pmc_start_ctrs); 220 - EXPORT_SYMBOL(pmc_stop_ctrs); 221 - EXPORT_SYMBOL(dump_pmcs);
+1 -1
arch/powerpc/kernel/pmc.c
··· 71 71 } 72 72 73 73 pmc_owner_caller = __builtin_return_address(0); 74 - perf_irq = new_perf_irq ? : dummy_perf; 74 + perf_irq = new_perf_irq ? new_perf_irq : dummy_perf; 75 75 76 76 out: 77 77 spin_unlock(&pmc_owner_lock);
+10 -8
arch/powerpc/kernel/traps.c
··· 843 843 844 844 void alignment_exception(struct pt_regs *regs) 845 845 { 846 - int fixed = 0; 846 + int sig, code, fixed = 0; 847 847 848 848 /* we don't implement logging of alignment exceptions */ 849 849 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) ··· 857 857 858 858 /* Operand address was bad */ 859 859 if (fixed == -EFAULT) { 860 - if (user_mode(regs)) 861 - _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar); 862 - else 863 - /* Search exception table */ 864 - bad_page_fault(regs, regs->dar, SIGSEGV); 865 - return; 860 + sig = SIGSEGV; 861 + code = SEGV_ACCERR; 862 + } else { 863 + sig = SIGBUS; 864 + code = BUS_ADRALN; 866 865 } 867 - _exception(SIGBUS, regs, BUS_ADRALN, regs->dar); 866 + if (user_mode(regs)) 867 + _exception(sig, regs, code, regs->dar); 868 + else 869 + bad_page_fault(regs, regs->dar, sig); 868 870 } 869 871 870 872 void StackOverflow(struct pt_regs *regs)
+2 -2
arch/powerpc/kernel/vio.c
··· 92 92 &tbl->it_index, &offset, &size); 93 93 94 94 /* TCE table size - measured in tce entries */ 95 - tbl->it_size = size >> PAGE_SHIFT; 95 + tbl->it_size = size >> IOMMU_PAGE_SHIFT; 96 96 /* offset for VIO should always be 0 */ 97 - tbl->it_offset = offset >> PAGE_SHIFT; 97 + tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; 98 98 tbl->it_busno = 0; 99 99 tbl->it_type = TCE_VB; 100 100
+3 -2
arch/powerpc/lib/sstep.c
··· 9 9 * 2 of the License, or (at your option) any later version. 10 10 */ 11 11 #include <linux/kernel.h> 12 + #include <linux/kprobes.h> 12 13 #include <linux/ptrace.h> 13 14 #include <asm/sstep.h> 14 15 #include <asm/processor.h> ··· 26 25 /* 27 26 * Determine whether a conditional branch instruction would branch. 28 27 */ 29 - static int branch_taken(unsigned int instr, struct pt_regs *regs) 28 + static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs) 30 29 { 31 30 unsigned int bo = (instr >> 21) & 0x1f; 32 31 unsigned int bi; ··· 52 51 * or -1 if the instruction is one that should not be stepped, 53 52 * such as an rfid, or a mtmsrd that would clear MSR_RI. 54 53 */ 55 - int emulate_step(struct pt_regs *regs, unsigned int instr) 54 + int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) 56 55 { 57 56 unsigned int opcode, rd; 58 57 unsigned long int imm;
-3
arch/powerpc/mm/hugetlbpage.c
··· 480 480 481 481 mm->context.high_htlb_areas |= newareas; 482 482 483 - /* update the paca copy of the context struct */ 484 - get_paca()->context = mm->context; 485 - 486 483 /* the context change must make it to memory before the flush, 487 484 * so that further SLB misses do the right thing. */ 488 485 mb();
+1 -1
arch/powerpc/oprofile/Makefile
··· 13 13 oprofile-y := $(DRIVER_OBJS) common.o backtrace.o 14 14 oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o 15 15 oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o 16 - oprofile-$(CONFIG_PPC32) += op_model_7450.o 16 + oprofile-$(CONFIG_6xx) += op_model_7450.o
+8 -2
arch/powerpc/oprofile/common.c
··· 34 34 model->handle_interrupt(regs, ctr); 35 35 } 36 36 37 + static void op_powerpc_cpu_setup(void *dummy) 38 + { 39 + model->cpu_setup(ctr); 40 + } 41 + 37 42 static int op_powerpc_setup(void) 38 43 { 39 44 int err; ··· 52 47 model->reg_setup(ctr, &sys, model->num_counters); 53 48 54 49 /* Configure the registers on all cpus. */ 55 - on_each_cpu(model->cpu_setup, NULL, 0, 1); 50 + on_each_cpu(op_powerpc_cpu_setup, NULL, 0, 1); 56 51 57 52 return 0; 58 53 } ··· 147 142 case PPC_OPROFILE_POWER4: 148 143 model = &op_model_power4; 149 144 break; 150 - #else 145 + #endif 146 + #ifdef CONFIG_6xx 151 147 case PPC_OPROFILE_G4: 152 148 model = &op_model_7450; 153 149 break;
+1 -1
arch/powerpc/oprofile/op_model_7450.c
··· 81 81 82 82 /* Configures the counters on this CPU based on the global 83 83 * settings */ 84 - static void fsl7450_cpu_setup(void *unused) 84 + static void fsl7450_cpu_setup(struct op_counter_config *ctr) 85 85 { 86 86 /* freeze all counters */ 87 87 pmc_stop_ctrs();
+138 -36
arch/powerpc/oprofile/op_model_fsl_booke.c
··· 32 32 static int num_counters; 33 33 static int oprofile_running; 34 34 35 - static inline unsigned int ctr_read(unsigned int i) 35 + static void init_pmc_stop(int ctr) 36 36 { 37 - switch(i) { 37 + u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU | 38 + PMLCA_FCM1 | PMLCA_FCM0); 39 + u32 pmlcb = 0; 40 + 41 + switch (ctr) { 38 42 case 0: 39 - return mfpmr(PMRN_PMC0); 43 + mtpmr(PMRN_PMLCA0, pmlca); 44 + mtpmr(PMRN_PMLCB0, pmlcb); 45 + break; 40 46 case 1: 41 - return mfpmr(PMRN_PMC1); 47 + mtpmr(PMRN_PMLCA1, pmlca); 48 + mtpmr(PMRN_PMLCB1, pmlcb); 49 + break; 42 50 case 2: 43 - return mfpmr(PMRN_PMC2); 51 + mtpmr(PMRN_PMLCA2, pmlca); 52 + mtpmr(PMRN_PMLCB2, pmlcb); 53 + break; 44 54 case 3: 45 - return mfpmr(PMRN_PMC3); 55 + mtpmr(PMRN_PMLCA3, pmlca); 56 + mtpmr(PMRN_PMLCB3, pmlcb); 57 + break; 46 58 default: 47 - return 0; 59 + panic("Bad ctr number!\n"); 48 60 } 49 61 } 50 62 51 - static inline void ctr_write(unsigned int i, unsigned int val) 63 + static void set_pmc_event(int ctr, int event) 52 64 { 53 - switch(i) { 54 - case 0: 55 - mtpmr(PMRN_PMC0, val); 56 - break; 57 - case 1: 58 - mtpmr(PMRN_PMC1, val); 59 - break; 60 - case 2: 61 - mtpmr(PMRN_PMC2, val); 62 - break; 63 - case 3: 64 - mtpmr(PMRN_PMC3, val); 65 - break; 66 - default: 67 - break; 68 - } 65 + u32 pmlca; 66 + 67 + pmlca = get_pmlca(ctr); 68 + 69 + pmlca = (pmlca & ~PMLCA_EVENT_MASK) | 70 + ((event << PMLCA_EVENT_SHIFT) & 71 + PMLCA_EVENT_MASK); 72 + 73 + set_pmlca(ctr, pmlca); 69 74 } 70 75 76 + static void set_pmc_user_kernel(int ctr, int user, int kernel) 77 + { 78 + u32 pmlca; 79 + 80 + pmlca = get_pmlca(ctr); 81 + 82 + if(user) 83 + pmlca &= ~PMLCA_FCU; 84 + else 85 + pmlca |= PMLCA_FCU; 86 + 87 + if(kernel) 88 + pmlca &= ~PMLCA_FCS; 89 + else 90 + pmlca |= PMLCA_FCS; 91 + 92 + set_pmlca(ctr, pmlca); 93 + } 94 + 95 + static void set_pmc_marked(int ctr, int mark0, int mark1) 96 + { 97 + u32 pmlca = get_pmlca(ctr); 98 + 99 + if(mark0) 100 + pmlca &= ~PMLCA_FCM0; 101 + else 102 + pmlca |= PMLCA_FCM0; 103 + 104 + if(mark1) 105 + pmlca &= ~PMLCA_FCM1; 106 + else 107 + pmlca |= PMLCA_FCM1; 108 + 109 + set_pmlca(ctr, pmlca); 110 + } 111 + 112 + static void pmc_start_ctr(int ctr, int enable) 113 + { 114 + u32 pmlca = get_pmlca(ctr); 115 + 116 + pmlca &= ~PMLCA_FC; 117 + 118 + if (enable) 119 + pmlca |= PMLCA_CE; 120 + else 121 + pmlca &= ~PMLCA_CE; 122 + 123 + set_pmlca(ctr, pmlca); 124 + } 125 + 126 + static void pmc_start_ctrs(int enable) 127 + { 128 + u32 pmgc0 = mfpmr(PMRN_PMGC0); 129 + 130 + pmgc0 &= ~PMGC0_FAC; 131 + pmgc0 |= PMGC0_FCECE; 132 + 133 + if (enable) 134 + pmgc0 |= PMGC0_PMIE; 135 + else 136 + pmgc0 &= ~PMGC0_PMIE; 137 + 138 + mtpmr(PMRN_PMGC0, pmgc0); 139 + } 140 + 141 + static void pmc_stop_ctrs(void) 142 + { 143 + u32 pmgc0 = mfpmr(PMRN_PMGC0); 144 + 145 + pmgc0 |= PMGC0_FAC; 146 + 147 + pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE); 148 + 149 + mtpmr(PMRN_PMGC0, pmgc0); 150 + } 151 + 152 + static void dump_pmcs(void) 153 + { 154 + printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0)); 155 + printk("pmc\t\tpmlca\t\tpmlcb\n"); 156 + printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0), 157 + mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0)); 158 + printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1), 159 + mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1)); 160 + printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2), 161 + mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2)); 162 + printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3), 163 + mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3)); 164 + } 165 + 166 + static void fsl_booke_cpu_setup(struct op_counter_config *ctr) 167 + { 168 + int i; 169 + 170 + /* freeze all counters */ 171 + pmc_stop_ctrs(); 172 + 173 + for (i = 0;i < num_counters;i++) { 174 + init_pmc_stop(i); 175 + 176 + set_pmc_event(i, ctr[i].event); 177 + 178 + set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel); 179 + } 180 + } 71 181 72 182 static void fsl_booke_reg_setup(struct op_counter_config *ctr, 73 183 struct op_system_config *sys, ··· 187 77 188 78 num_counters = num_ctrs; 189 79 190 - /* freeze all counters */ 191 - pmc_stop_ctrs(); 192 - 193 80 /* Our counters count up, and "count" refers to 194 81 * how much before the next interrupt, and we interrupt 195 82 * on overflow. So we calculate the starting value 196 83 * which will give us "count" until overflow. 197 84 * Then we set the events on the enabled counters */ 198 - for (i = 0; i < num_counters; ++i) { 85 + for (i = 0; i < num_counters; ++i) 199 86 reset_value[i] = 0x80000000UL - ctr[i].count; 200 87 201 - init_pmc_stop(i); 202 - 203 - set_pmc_event(i, ctr[i].event); 204 - 205 - set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel); 206 - } 207 88 } 208 89 209 90 static void fsl_booke_start(struct op_counter_config *ctr) ··· 206 105 for (i = 0; i < num_counters; ++i) { 207 106 if (ctr[i].enabled) { 208 107 ctr_write(i, reset_value[i]); 209 - /* Set Each enabled counterd to only 210 - * count when the Mark bit is not set */ 108 + /* Set each enabled counter to only 109 + * count when the Mark bit is *not* set */ 211 110 set_pmc_marked(i, 1, 0); 212 111 pmc_start_ctr(i, 1); 213 112 } else { ··· 278 177 279 178 struct op_powerpc_model op_model_fsl_booke = { 280 179 .reg_setup = fsl_booke_reg_setup, 180 + .cpu_setup = fsl_booke_cpu_setup, 281 181 .start = fsl_booke_start, 282 182 .stop = fsl_booke_stop, 283 183 .handle_interrupt = fsl_booke_handle_interrupt,
+1 -1
arch/powerpc/oprofile/op_model_power4.c
··· 82 82 return 0; 83 83 } 84 84 85 - static void power4_cpu_setup(void *unused) 85 + static void power4_cpu_setup(struct op_counter_config *ctr) 86 86 { 87 87 unsigned int mmcr0 = mmcr0_val; 88 88 unsigned long mmcra = mmcra_val;
+1 -1
arch/powerpc/oprofile/op_model_rs64.c
··· 102 102 /* XXX setup user and kernel profiling */ 103 103 } 104 104 105 - static void rs64_cpu_setup(void *unused) 105 + static void rs64_cpu_setup(struct op_counter_config *ctr) 106 106 { 107 107 unsigned int mmcr0; 108 108
+2 -9
arch/powerpc/platforms/iseries/iommu.c
··· 43 43 u64 rc; 44 44 u64 tce, rpn; 45 45 46 - index <<= TCE_PAGE_FACTOR; 47 - npages <<= TCE_PAGE_FACTOR; 48 - 49 46 while (npages--) { 50 47 rpn = virt_to_abs(uaddr) >> TCE_SHIFT; 51 48 tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; ··· 71 74 static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) 72 75 { 73 76 u64 rc; 74 - 75 - npages <<= TCE_PAGE_FACTOR; 76 - index <<= TCE_PAGE_FACTOR; 77 77 78 78 while (npages--) { 79 79 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0); ··· 130 136 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms); 131 137 132 138 /* itc_size is in pages worth of table, it_size is in # of entries */ 133 - tbl->it_size = ((parms->itc_size * TCE_PAGE_SIZE) / 134 - TCE_ENTRY_SIZE) >> TCE_PAGE_FACTOR; 139 + tbl->it_size = (parms->itc_size * TCE_PAGE_SIZE) / TCE_ENTRY_SIZE; 135 140 tbl->it_busno = parms->itc_busno; 136 - tbl->it_offset = parms->itc_offset >> TCE_PAGE_FACTOR; 141 + tbl->it_offset = parms->itc_offset; 137 142 tbl->it_index = parms->itc_index; 138 143 tbl->it_blocksize = 1; 139 144 tbl->it_type = virtbus ? TCE_VB : TCE_PCI;
+8 -27
arch/powerpc/platforms/pseries/iommu.c
··· 57 57 u64 *tcep; 58 58 u64 rpn; 59 59 60 - index <<= TCE_PAGE_FACTOR; 61 - npages <<= TCE_PAGE_FACTOR; 62 - 63 60 proto_tce = TCE_PCI_READ; // Read allowed 64 61 65 62 if (direction != DMA_TO_DEVICE) ··· 79 82 { 80 83 u64 *tcep; 81 84 82 - npages <<= TCE_PAGE_FACTOR; 83 - index <<= TCE_PAGE_FACTOR; 84 - 85 85 tcep = ((u64 *)tbl->it_base) + index; 86 86 87 87 while (npages--) ··· 89 95 { 90 96 u64 *tcep; 91 97 92 - index <<= TCE_PAGE_FACTOR; 93 98 tcep = ((u64 *)tbl->it_base) + index; 94 99 95 100 return *tcep; ··· 101 108 u64 rc; 102 109 u64 proto_tce, tce; 103 110 u64 rpn; 104 - 105 - tcenum <<= TCE_PAGE_FACTOR; 106 - npages <<= TCE_PAGE_FACTOR; 107 111 108 112 rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 109 113 proto_tce = TCE_PCI_READ; ··· 136 146 u64 rpn; 137 147 long l, limit; 138 148 139 - if (TCE_PAGE_FACTOR == 0 && npages == 1) 149 + if (npages == 1) 140 150 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, 141 151 direction); 142 152 ··· 153 163 uaddr, direction); 154 164 __get_cpu_var(tce_page) = tcep; 155 165 } 156 - 157 - tcenum <<= TCE_PAGE_FACTOR; 158 - npages <<= TCE_PAGE_FACTOR; 159 166 160 167 rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 161 168 proto_tce = TCE_PCI_READ; ··· 194 207 { 195 208 u64 rc; 196 209 197 - tcenum <<= TCE_PAGE_FACTOR; 198 - npages <<= TCE_PAGE_FACTOR; 199 - 200 210 while (npages--) { 201 211 rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0); 202 212 ··· 213 229 { 214 230 u64 rc; 215 231 216 - tcenum <<= TCE_PAGE_FACTOR; 217 - npages <<= TCE_PAGE_FACTOR; 218 - 219 232 rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages); 220 233 221 234 if (rc && printk_ratelimit()) { ··· 229 248 u64 rc; 230 249 unsigned long tce_ret; 231 250 232 - tcenum <<= TCE_PAGE_FACTOR; 233 251 rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret); 234 252 235 253 if (rc && printk_ratelimit()) { ··· 269 289 tbl->it_busno = phb->bus->number; 270 290 271 291 /* Units of tce entries */ 272 - tbl->it_offset = phb->dma_window_base_cur >> PAGE_SHIFT; 292 + tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT; 273 293 274 294 /* Test if we are going over 2GB of DMA space */ 275 295 if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) { ··· 280 300 phb->dma_window_base_cur += phb->dma_window_size; 281 301 282 302 /* Set the tce table size - measured in entries */ 283 - tbl->it_size = phb->dma_window_size >> PAGE_SHIFT; 303 + tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT; 284 304 285 305 tbl->it_index = 0; 286 306 tbl->it_blocksize = 16; ··· 305 325 tbl->it_base = 0; 306 326 tbl->it_blocksize = 16; 307 327 tbl->it_type = TCE_PCI; 308 - tbl->it_offset = offset >> PAGE_SHIFT; 309 - tbl->it_size = size >> PAGE_SHIFT; 328 + tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; 329 + tbl->it_size = size >> IOMMU_PAGE_SHIFT; 310 330 } 311 331 312 332 static void iommu_bus_setup_pSeries(struct pci_bus *bus) ··· 502 522 const void *dma_window = NULL; 503 523 struct pci_dn *pci; 504 524 505 - DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, pci_name(dev)); 506 - 507 525 /* dev setup for LPAR is a little tricky, since the device tree might 508 526 * contain the dma-window properties per-device and not neccesarily 509 527 * for the bus. So we need to search upwards in the tree until we ··· 509 531 * already allocated. 510 532 */ 511 533 dn = pci_device_to_OF_node(dev); 534 + 535 + DBG("iommu_dev_setup_pSeriesLP, dev %p (%s) %s\n", 536 + dev, pci_name(dev), dn->full_name); 512 537 513 538 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table; 514 539 pdn = pdn->parent) {
-1
arch/powerpc/sysdev/dart.h
··· 72 72 73 73 #define DART_PAGE_SHIFT 12 74 74 #define DART_PAGE_SIZE (1 << DART_PAGE_SHIFT) 75 - #define DART_PAGE_FACTOR (PAGE_SHIFT - DART_PAGE_SHIFT) 76 75 77 76 78 77 #endif /* _POWERPC_SYSDEV_DART_H */
+1 -7
arch/powerpc/sysdev/dart_iommu.c
··· 156 156 157 157 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); 158 158 159 - index <<= DART_PAGE_FACTOR; 160 - npages <<= DART_PAGE_FACTOR; 161 - 162 159 dp = ((unsigned int*)tbl->it_base) + index; 163 160 164 161 /* On U3, all memory is contigous, so we can move this ··· 195 198 */ 196 199 197 200 DBG("dart: free at: %lx, %lx\n", index, npages); 198 - 199 - index <<= DART_PAGE_FACTOR; 200 - npages <<= DART_PAGE_FACTOR; 201 201 202 202 dp = ((unsigned int *)tbl->it_base) + index; 203 203 ··· 275 281 iommu_table_dart.it_busno = 0; 276 282 iommu_table_dart.it_offset = 0; 277 283 /* it_size is in number of entries */ 278 - iommu_table_dart.it_size = (dart_tablesize / sizeof(u32)) >> DART_PAGE_FACTOR; 284 + iommu_table_dart.it_size = dart_tablesize / sizeof(u32); 279 285 280 286 /* Initialize the common IOMMU code */ 281 287 iommu_table_dart.it_base = (unsigned long)dart_vbase;
+1 -2
arch/powerpc/sysdev/qe_lib/qe.c
··· 122 122 mcn_shift = QE_CR_MCN_NORMAL_SHIFT; 123 123 } 124 124 125 - out_be32(&qe_immr->cp.cecdr, 126 - immrbar_virt_to_phys((void *)cmd_input)); 125 + out_be32(&qe_immr->cp.cecdr, cmd_input); 127 126 out_be32(&qe_immr->cp.cecr, 128 127 (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32) 129 128 mcn_protocol << mcn_shift));
+10 -8
arch/ppc/kernel/traps.c
··· 708 708 709 709 void alignment_exception(struct pt_regs *regs) 710 710 { 711 - int fixed; 711 + int sig, code, fixed = 0; 712 712 713 713 fixed = fix_alignment(regs); 714 714 if (fixed == 1) { ··· 717 717 return; 718 718 } 719 719 if (fixed == -EFAULT) { 720 - /* fixed == -EFAULT means the operand address was bad */ 721 - if (user_mode(regs)) 722 - _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar); 723 - else 724 - bad_page_fault(regs, regs->dar, SIGSEGV); 725 - return; 720 + sig = SIGSEGV; 721 + code = SEGV_ACCERR; 722 + } else { 723 + sig = SIGBUS; 724 + code = BUS_ADRALN; 726 725 } 727 - _exception(SIGBUS, regs, BUS_ADRALN, regs->dar); 726 + if (user_mode(regs)) 727 + _exception(sig, regs, code, regs->dar); 728 + else 729 + bad_page_fault(regs, regs->dar, sig); 728 730 } 729 731 730 732 void StackOverflow(struct pt_regs *regs)
+22 -12
drivers/video/offb.c
··· 157 157 out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue)); 158 158 break; 159 159 case cmap_gxt2000: 160 - out_le32((unsigned __iomem *) par->cmap_adr + regno, 160 + out_le32(((unsigned __iomem *) par->cmap_adr) + regno, 161 161 (red << 16 | green << 8 | blue)); 162 162 break; 163 163 } ··· 213 213 out_le32(par->cmap_adr + 0xb4, 0); 214 214 break; 215 215 case cmap_gxt2000: 216 - out_le32((unsigned __iomem *) par->cmap_adr + i, 216 + out_le32(((unsigned __iomem *) par->cmap_adr) + i, 217 217 0); 218 218 break; 219 219 } ··· 226 226 static void __iomem *offb_map_reg(struct device_node *np, int index, 227 227 unsigned long offset, unsigned long size) 228 228 { 229 - struct resource r; 229 + const u32 *addrp; 230 + u64 asize, taddr; 231 + unsigned int flags; 230 232 231 - if (of_address_to_resource(np, index, &r)) 232 - return 0; 233 - if ((r.start + offset + size) > r.end) 234 - return 0; 235 - return ioremap(r.start + offset, size); 233 + addrp = of_get_pci_address(np, index, &asize, &flags); 234 + if (addrp == NULL) 235 + addrp = of_get_address(np, index, &asize, &flags); 236 + if (addrp == NULL) 237 + return NULL; 238 + if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0) 239 + return NULL; 240 + if ((offset + size) > asize) 241 + return NULL; 242 + taddr = of_translate_address(np, addrp); 243 + if (taddr == OF_BAD_ADDR) 244 + return NULL; 245 + return ioremap(taddr + offset, size); 236 246 } 237 247 238 248 static void __init offb_init_fb(const char *name, const char *full_name, ··· 299 289 300 290 par->cmap_type = cmap_unknown; 301 291 if (depth == 8) { 302 - /* Palette hacks disabled for now */ 303 292 if (dp && !strncmp(name, "ATY,Rage128", 11)) { 304 293 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); 305 294 if (par->cmap_adr) ··· 322 313 ioremap(base + 0x7ff000, 0x1000) + 0xcc0; 323 314 par->cmap_data = par->cmap_adr + 1; 324 315 par->cmap_type = cmap_m64; 325 - } else if (dp && device_is_compatible(dp, "pci1014,b7")) { 316 + } else if (dp && (device_is_compatible(dp, "pci1014,b7") || 317 + device_is_compatible(dp, "pci1014,21c"))) { 326 318 par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); 327 319 if (par->cmap_adr) 328 320 par->cmap_type = cmap_gxt2000; ··· 443 433 pp = get_property(dp, "linux,bootx-linebytes", &len); 444 434 if (pp == NULL) 445 435 pp = get_property(dp, "linebytes", &len); 446 - if (pp && len == sizeof(u32)) 436 + if (pp && len == sizeof(u32) && (*pp != 0xffffffffu)) 447 437 pitch = *pp; 448 438 else 449 439 pitch = width * ((depth + 7) / 8); ··· 506 496 offb_init_fb(no_real_node ? "bootx" : dp->name, 507 497 no_real_node ? "display" : dp->full_name, 508 498 width, height, depth, pitch, address, 509 - no_real_node ? dp : NULL); 499 + no_real_node ? NULL : dp); 510 500 } 511 501 } 512 502
+11 -1
include/asm-powerpc/current.h
··· 14 14 #ifdef __powerpc64__ 15 15 #include <asm/paca.h> 16 16 17 - #define current (get_paca()->__current) 17 + static inline struct task_struct *get_current(void) 18 + { 19 + struct task_struct *task; 20 + 21 + __asm__ __volatile__("ld %0,%1(13)" 22 + : "=r" (task) 23 + : "i" (offsetof(struct paca_struct, __current))); 24 + 25 + return task; 26 + } 27 + #define current get_current() 18 28 19 29 #else 20 30
+5 -2
include/asm-powerpc/io.h
··· 163 163 164 164 static inline void mmiowb(void) 165 165 { 166 - __asm__ __volatile__ ("sync" : : : "memory"); 167 - get_paca()->io_sync = 0; 166 + unsigned long tmp; 167 + 168 + __asm__ __volatile__("sync; li %0,0; stb %0,%1(13)" 169 + : "=&r" (tmp) : "i" (offsetof(struct paca_struct, io_sync)) 170 + : "memory"); 168 171 } 169 172 170 173 /*
+20 -2
include/asm-powerpc/iommu.h
··· 22 22 #define _ASM_IOMMU_H 23 23 #ifdef __KERNEL__ 24 24 25 - #include <asm/types.h> 25 + #include <linux/compiler.h> 26 26 #include <linux/spinlock.h> 27 27 #include <linux/device.h> 28 28 #include <linux/dma-mapping.h> 29 + #include <asm/types.h> 30 + #include <asm/bitops.h> 31 + 32 + #define IOMMU_PAGE_SHIFT 12 33 + #define IOMMU_PAGE_SIZE (ASM_CONST(1) << IOMMU_PAGE_SHIFT) 34 + #define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1)) 35 + #define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE) 36 + 37 + #ifndef __ASSEMBLY__ 38 + 39 + /* Pure 2^n version of get_order */ 40 + static __inline__ __attribute_const__ int get_iommu_order(unsigned long size) 41 + { 42 + return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1; 43 + } 44 + 45 + #endif /* __ASSEMBLY__ */ 46 + 29 47 30 48 /* 31 49 * IOMAP_MAX_ORDER defines the largest contiguous block 32 50 * of dma space we can get. IOMAP_MAX_ORDER = 13 33 51 * allows up to 2**12 pages (4096 * 4096) = 16 MB 34 52 */ 35 - #define IOMAP_MAX_ORDER 13 53 + #define IOMAP_MAX_ORDER 13 36 54 37 55 struct iommu_table { 38 56 unsigned long it_busno; /* Bus number this table belongs to */
+85 -2
include/asm-powerpc/oprofile_impl.h
··· 42 42 void (*reg_setup) (struct op_counter_config *, 43 43 struct op_system_config *, 44 44 int num_counters); 45 - void (*cpu_setup) (void *); 45 + void (*cpu_setup) (struct op_counter_config *); 46 46 void (*start) (struct op_counter_config *); 47 47 void (*stop) (void); 48 48 void (*handle_interrupt) (struct pt_regs *, ··· 121 121 break; 122 122 } 123 123 } 124 - #endif /* !CONFIG_FSL_BOOKE */ 124 + #else /* CONFIG_FSL_BOOKE */ 125 + static inline u32 get_pmlca(int ctr) 126 + { 127 + u32 pmlca; 128 + 129 + switch (ctr) { 130 + case 0: 131 + pmlca = mfpmr(PMRN_PMLCA0); 132 + break; 133 + case 1: 134 + pmlca = mfpmr(PMRN_PMLCA1); 135 + break; 136 + case 2: 137 + pmlca = mfpmr(PMRN_PMLCA2); 138 + break; 139 + case 3: 140 + pmlca = mfpmr(PMRN_PMLCA3); 141 + break; 142 + default: 143 + panic("Bad ctr number\n"); 144 + } 145 + 146 + return pmlca; 147 + } 148 + 149 + static inline void set_pmlca(int ctr, u32 pmlca) 150 + { 151 + switch (ctr) { 152 + case 0: 153 + mtpmr(PMRN_PMLCA0, pmlca); 154 + break; 155 + case 1: 156 + mtpmr(PMRN_PMLCA1, pmlca); 157 + break; 158 + case 2: 159 + mtpmr(PMRN_PMLCA2, pmlca); 160 + break; 161 + case 3: 162 + mtpmr(PMRN_PMLCA3, pmlca); 163 + break; 164 + default: 165 + panic("Bad ctr number\n"); 166 + } 167 + } 168 + 169 + static inline unsigned int ctr_read(unsigned int i) 170 + { 171 + switch(i) { 172 + case 0: 173 + return mfpmr(PMRN_PMC0); 174 + case 1: 175 + return mfpmr(PMRN_PMC1); 176 + case 2: 177 + return mfpmr(PMRN_PMC2); 178 + case 3: 179 + return mfpmr(PMRN_PMC3); 180 + default: 181 + return 0; 182 + } 183 + } 184 + 185 + static inline void ctr_write(unsigned int i, unsigned int val) 186 + { 187 + switch(i) { 188 + case 0: 189 + mtpmr(PMRN_PMC0, val); 190 + break; 191 + case 1: 192 + mtpmr(PMRN_PMC1, val); 193 + break; 194 + case 2: 195 + mtpmr(PMRN_PMC2, val); 196 + break; 197 + case 3: 198 + mtpmr(PMRN_PMC3, val); 199 + break; 200 + default: 201 + break; 202 + } 203 + } 204 + 205 + 206 + #endif /* CONFIG_FSL_BOOKE */ 207 + 125 208 126 209 extern void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth); 127 210
-13
include/asm-powerpc/pmc.h
··· 32 32 void power4_enable_pmcs(void); 33 33 #endif 34 34 35 - #ifdef CONFIG_FSL_BOOKE 36 - void init_pmc_stop(int ctr); 37 - void set_pmc_event(int ctr, int event); 38 - void set_pmc_user_kernel(int ctr, int user, int kernel); 39 - void set_pmc_marked(int ctr, int mark0, int mark1); 40 - void pmc_start_ctr(int ctr, int enable); 41 - void pmc_start_ctrs(int enable); 42 - void pmc_stop_ctrs(void); 43 - void dump_pmcs(void); 44 - 45 - extern struct op_powerpc_model op_model_fsl_booke; 46 - #endif 47 - 48 35 #endif /* __KERNEL__ */ 49 36 #endif /* _POWERPC_PMC_H */
+3 -3
include/asm-powerpc/system.h
··· 25 25 * 26 26 * We have to use the sync instructions for mb(), since lwsync doesn't 27 27 * order loads with respect to previous stores. Lwsync is fine for 28 - * rmb(), though. Note that lwsync is interpreted as sync by 29 - * 32-bit and older 64-bit CPUs. 28 + * rmb(), though. Note that rmb() actually uses a sync on 32-bit 29 + * architectures. 30 30 * 31 31 * For wmb(), we use sync since wmb is used in drivers to order 32 32 * stores to system memory with respect to writes to the device. ··· 34 34 * SMP since it is only used to order updates to system memory. 35 35 */ 36 36 #define mb() __asm__ __volatile__ ("sync" : : : "memory") 37 - #define rmb() __asm__ __volatile__ ("lwsync" : : : "memory") 37 + #define rmb() __asm__ __volatile__ (__stringify(LWSYNC) : : : "memory") 38 38 #define wmb() __asm__ __volatile__ ("sync" : : : "memory") 39 39 #define read_barrier_depends() do { } while(0) 40 40
+2 -1
include/asm-powerpc/tce.h
··· 22 22 #define _ASM_POWERPC_TCE_H 23 23 #ifdef __KERNEL__ 24 24 25 + #include <asm/iommu.h> 26 + 25 27 /* 26 28 * Tces come in two formats, one for the virtual bus and a different 27 29 * format for PCI ··· 35 33 36 34 #define TCE_SHIFT 12 37 35 #define TCE_PAGE_SIZE (1 << TCE_SHIFT) 38 - #define TCE_PAGE_FACTOR (PAGE_SHIFT - TCE_SHIFT) 39 36 40 37 #define TCE_ENTRY_SIZE 8 /* each TCE is 64 bits */ 41 38