Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'soc-fsl-next-v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux into arm/drivers

NXP/FSL SoC driver updates for v5.13

- Add ACPI support for RCPM driver
- Use generic io{read,write} for QE drivers after performance optimized
for PowerPC
- Fix QBMAN probe to cleanup HW states correctly for kexec
- Various cleanup and style fix for QBMAN/QE/GUTS drivers

* tag 'soc-fsl-next-v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux:
soc: fsl: enable acpi support in RCPM driver
Revert "soc: fsl: qe: introduce qe_io{read,write}* wrappers"
tty: serial: ucc_uart: replace qe_io{read,write}* wrappers by generic io{read,write}*
soc: fsl: qe: replace qe_io{read,write}* wrappers by generic io{read,write}*
soc: fsl: guts: fix comment syntax in file
soc: fsl: guts: remove unneeded semicolon
soc: fsl: qe: Use DEFINE_SPINLOCK() for spinlock
soc: fsl: qbman: Delete useless kfree code
soc: fsl: qbman: Ensure device cleanup is run for kexec

Link: https://lore.kernel.org/r/20210409205719.27927-1-leoyang.li@nxp.com
Signed-off-by: Arnd Bergmann <arnd@arndb.de>

+198 -194
+1 -1
drivers/soc/fsl/guts.c
··· 117 117 if (matches->svr == (svr & matches->mask)) 118 118 return matches; 119 119 matches++; 120 - }; 120 + } 121 121 return NULL; 122 122 } 123 123
-1
drivers/soc/fsl/qbman/bman.c
··· 709 709 return pool; 710 710 err: 711 711 bm_release_bpid(bpid); 712 - kfree(pool); 713 712 return NULL; 714 713 } 715 714 EXPORT_SYMBOL(bman_new_pool);
+2 -1
drivers/soc/fsl/qbman/bman_portal.c
··· 160 160 __bman_portals_probed = 1; 161 161 /* unassigned portal, skip init */ 162 162 spin_unlock(&bman_lock); 163 - return 0; 163 + goto check_cleanup; 164 164 } 165 165 166 166 cpumask_set_cpu(cpu, &portal_cpus); ··· 176 176 if (!cpu_online(cpu)) 177 177 bman_offline_cpu(cpu); 178 178 179 + check_cleanup: 179 180 if (__bman_portals_probed == 1 && bman_requires_cleanup()) { 180 181 /* 181 182 * BMan wasn't reset prior to boot (Kexec for example)
+2 -1
drivers/soc/fsl/qbman/qman_portal.c
··· 302 302 __qman_portals_probed = 1; 303 303 /* unassigned portal, skip init */ 304 304 spin_unlock(&qman_lock); 305 - return 0; 305 + goto check_cleanup; 306 306 } 307 307 308 308 cpumask_set_cpu(cpu, &portal_cpus); ··· 323 323 if (!cpu_online(cpu)) 324 324 qman_offline_cpu(cpu); 325 325 326 + check_cleanup: 326 327 if (__qman_portals_probed == 1 && qman_requires_cleanup()) { 327 328 /* 328 329 * QMan wasn't reset prior to boot (Kexec for example)
+10 -10
drivers/soc/fsl/qe/gpio.c
··· 41 41 container_of(mm_gc, struct qe_gpio_chip, mm_gc); 42 42 struct qe_pio_regs __iomem *regs = mm_gc->regs; 43 43 44 - qe_gc->cpdata = qe_ioread32be(&regs->cpdata); 44 + qe_gc->cpdata = ioread32be(&regs->cpdata); 45 45 qe_gc->saved_regs.cpdata = qe_gc->cpdata; 46 - qe_gc->saved_regs.cpdir1 = qe_ioread32be(&regs->cpdir1); 47 - qe_gc->saved_regs.cpdir2 = qe_ioread32be(&regs->cpdir2); 48 - qe_gc->saved_regs.cppar1 = qe_ioread32be(&regs->cppar1); 49 - qe_gc->saved_regs.cppar2 = qe_ioread32be(&regs->cppar2); 50 - qe_gc->saved_regs.cpodr = qe_ioread32be(&regs->cpodr); 46 + qe_gc->saved_regs.cpdir1 = ioread32be(&regs->cpdir1); 47 + qe_gc->saved_regs.cpdir2 = ioread32be(&regs->cpdir2); 48 + qe_gc->saved_regs.cppar1 = ioread32be(&regs->cppar1); 49 + qe_gc->saved_regs.cppar2 = ioread32be(&regs->cppar2); 50 + qe_gc->saved_regs.cpodr = ioread32be(&regs->cpodr); 51 51 } 52 52 53 53 static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio) ··· 56 56 struct qe_pio_regs __iomem *regs = mm_gc->regs; 57 57 u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio); 58 58 59 - return !!(qe_ioread32be(&regs->cpdata) & pin_mask); 59 + return !!(ioread32be(&regs->cpdata) & pin_mask); 60 60 } 61 61 62 62 static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) ··· 74 74 else 75 75 qe_gc->cpdata &= ~pin_mask; 76 76 77 - qe_iowrite32be(qe_gc->cpdata, &regs->cpdata); 77 + iowrite32be(qe_gc->cpdata, &regs->cpdata); 78 78 79 79 spin_unlock_irqrestore(&qe_gc->lock, flags); 80 80 } ··· 101 101 } 102 102 } 103 103 104 - qe_iowrite32be(qe_gc->cpdata, &regs->cpdata); 104 + iowrite32be(qe_gc->cpdata, &regs->cpdata); 105 105 106 106 spin_unlock_irqrestore(&qe_gc->lock, flags); 107 107 } ··· 269 269 else 270 270 qe_gc->cpdata &= ~mask1; 271 271 272 - qe_iowrite32be(qe_gc->cpdata, &regs->cpdata); 272 + iowrite32be(qe_gc->cpdata, &regs->cpdata); 273 273 qe_clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1); 274 274 275 275 spin_unlock_irqrestore(&qe_gc->lock, flags);
+12 -12
drivers/soc/fsl/qe/qe.c
··· 109 109 110 110 spin_lock_irqsave(&qe_lock, flags); 111 111 if (cmd == QE_RESET) { 112 - qe_iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr); 112 + iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr); 113 113 } else { 114 114 if (cmd == QE_ASSIGN_PAGE) { 115 115 /* Here device is the SNUM, not sub-block */ ··· 126 126 mcn_shift = QE_CR_MCN_NORMAL_SHIFT; 127 127 } 128 128 129 - qe_iowrite32be(cmd_input, &qe_immr->cp.cecdr); 130 - qe_iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift), 129 + iowrite32be(cmd_input, &qe_immr->cp.cecdr); 130 + iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift), 131 131 &qe_immr->cp.cecr); 132 132 } 133 133 134 134 /* wait for the QE_CR_FLG to clear */ 135 - ret = readx_poll_timeout_atomic(qe_ioread32be, &qe_immr->cp.cecr, val, 135 + ret = readx_poll_timeout_atomic(ioread32be, &qe_immr->cp.cecr, val, 136 136 (val & QE_CR_FLG) == 0, 0, 100); 137 137 /* On timeout, ret is -ETIMEDOUT, otherwise it will be 0. */ 138 138 spin_unlock_irqrestore(&qe_lock, flags); ··· 231 231 tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | 232 232 QE_BRGC_ENABLE | div16; 233 233 234 - qe_iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]); 234 + iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]); 235 235 236 236 return 0; 237 237 } ··· 375 375 return -ENOMEM; 376 376 } 377 377 378 - qe_iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK, 378 + iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK, 379 379 &sdma->sdebcr); 380 - qe_iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)), 380 + iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)), 381 381 &sdma->sdmr); 382 382 383 383 return 0; ··· 416 416 "uploading microcode '%s'\n", ucode->id); 417 417 418 418 /* Use auto-increment */ 419 - qe_iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR, 419 + iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR, 420 420 &qe_immr->iram.iadd); 421 421 422 422 for (i = 0; i < be32_to_cpu(ucode->count); i++) 423 - qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata); 423 + iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata); 424 424 425 425 /* Set I-RAM Ready Register */ 426 - qe_iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready); 426 + iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready); 427 427 } 428 428 429 429 /* ··· 542 542 u32 trap = be32_to_cpu(ucode->traps[j]); 543 543 544 544 if (trap) 545 - qe_iowrite32be(trap, 545 + iowrite32be(trap, 546 546 &qe_immr->rsp[i].tibcr[j]); 547 547 } 548 548 549 549 /* Enable traps */ 550 - qe_iowrite32be(be32_to_cpu(ucode->eccr), 550 + iowrite32be(be32_to_cpu(ucode->eccr), 551 551 &qe_immr->rsp[i].eccr); 552 552 } 553 553
+1 -2
drivers/soc/fsl/qe/qe_common.c
··· 26 26 #include <soc/fsl/qe/qe.h> 27 27 28 28 static struct gen_pool *muram_pool; 29 - static spinlock_t cpm_muram_lock; 29 + static DEFINE_SPINLOCK(cpm_muram_lock); 30 30 static void __iomem *muram_vbase; 31 31 static phys_addr_t muram_pbase; 32 32 ··· 54 54 if (muram_pbase) 55 55 return 0; 56 56 57 - spin_lock_init(&cpm_muram_lock); 58 57 np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data"); 59 58 if (!np) { 60 59 /* try legacy bindings */
+2 -2
drivers/soc/fsl/qe/qe_ic.c
··· 222 222 223 223 static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg) 224 224 { 225 - return qe_ioread32be(base + (reg >> 2)); 225 + return ioread32be(base + (reg >> 2)); 226 226 } 227 227 228 228 static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg, 229 229 u32 value) 230 230 { 231 - qe_iowrite32be(value, base + (reg >> 2)); 231 + iowrite32be(value, base + (reg >> 2)); 232 232 } 233 233 234 234 static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
+18 -18
drivers/soc/fsl/qe/qe_io.c
··· 54 54 pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1))); 55 55 56 56 /* Set open drain, if required */ 57 - tmp_val = qe_ioread32be(&par_io->cpodr); 57 + tmp_val = ioread32be(&par_io->cpodr); 58 58 if (open_drain) 59 - qe_iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr); 59 + iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr); 60 60 else 61 - qe_iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr); 61 + iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr); 62 62 63 63 /* define direction */ 64 64 tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? 65 - qe_ioread32be(&par_io->cpdir2) : 66 - qe_ioread32be(&par_io->cpdir1); 65 + ioread32be(&par_io->cpdir2) : 66 + ioread32be(&par_io->cpdir1); 67 67 68 68 /* get all bits mask for 2 bit per port */ 69 69 pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS - ··· 75 75 76 76 /* clear and set 2 bits mask */ 77 77 if (pin > (QE_PIO_PINS / 2) - 1) { 78 - qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2); 78 + iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2); 79 79 tmp_val &= ~pin_mask2bits; 80 - qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2); 80 + iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2); 81 81 } else { 82 - qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1); 82 + iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1); 83 83 tmp_val &= ~pin_mask2bits; 84 - qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1); 84 + iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1); 85 85 } 86 86 /* define pin assignment */ 87 87 tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? 88 - qe_ioread32be(&par_io->cppar2) : 89 - qe_ioread32be(&par_io->cppar1); 88 + ioread32be(&par_io->cppar2) : 89 + ioread32be(&par_io->cppar1); 90 90 91 91 new_mask2bits = (u32) (assignment << (QE_PIO_PINS - 92 92 (pin % (QE_PIO_PINS / 2) + 1) * 2)); 93 93 /* clear and set 2 bits mask */ 94 94 if (pin > (QE_PIO_PINS / 2) - 1) { 95 - qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2); 95 + iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2); 96 96 tmp_val &= ~pin_mask2bits; 97 - qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2); 97 + iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2); 98 98 } else { 99 - qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1); 99 + iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1); 100 100 tmp_val &= ~pin_mask2bits; 101 - qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1); 101 + iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1); 102 102 } 103 103 } 104 104 EXPORT_SYMBOL(__par_io_config_pin); ··· 126 126 /* calculate pin location */ 127 127 pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin)); 128 128 129 - tmp_val = qe_ioread32be(&par_io[port].cpdata); 129 + tmp_val = ioread32be(&par_io[port].cpdata); 130 130 131 131 if (val == 0) /* clear */ 132 - qe_iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata); 132 + iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata); 133 133 else /* set */ 134 - qe_iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata); 134 + iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata); 135 135 136 136 return 0; 137 137 }
+34 -34
drivers/soc/fsl/qe/ucc_fast.c
··· 29 29 printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs); 30 30 31 31 printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n", 32 - &uccf->uf_regs->gumr, qe_ioread32be(&uccf->uf_regs->gumr)); 32 + &uccf->uf_regs->gumr, ioread32be(&uccf->uf_regs->gumr)); 33 33 printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n", 34 - &uccf->uf_regs->upsmr, qe_ioread32be(&uccf->uf_regs->upsmr)); 34 + &uccf->uf_regs->upsmr, ioread32be(&uccf->uf_regs->upsmr)); 35 35 printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n", 36 - &uccf->uf_regs->utodr, qe_ioread16be(&uccf->uf_regs->utodr)); 36 + &uccf->uf_regs->utodr, ioread16be(&uccf->uf_regs->utodr)); 37 37 printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n", 38 - &uccf->uf_regs->udsr, qe_ioread16be(&uccf->uf_regs->udsr)); 38 + &uccf->uf_regs->udsr, ioread16be(&uccf->uf_regs->udsr)); 39 39 printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n", 40 - &uccf->uf_regs->ucce, qe_ioread32be(&uccf->uf_regs->ucce)); 40 + &uccf->uf_regs->ucce, ioread32be(&uccf->uf_regs->ucce)); 41 41 printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n", 42 - &uccf->uf_regs->uccm, qe_ioread32be(&uccf->uf_regs->uccm)); 42 + &uccf->uf_regs->uccm, ioread32be(&uccf->uf_regs->uccm)); 43 43 printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n", 44 - &uccf->uf_regs->uccs, qe_ioread8(&uccf->uf_regs->uccs)); 44 + &uccf->uf_regs->uccs, ioread8(&uccf->uf_regs->uccs)); 45 45 printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n", 46 - &uccf->uf_regs->urfb, qe_ioread32be(&uccf->uf_regs->urfb)); 46 + &uccf->uf_regs->urfb, ioread32be(&uccf->uf_regs->urfb)); 47 47 printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n", 48 - &uccf->uf_regs->urfs, qe_ioread16be(&uccf->uf_regs->urfs)); 48 + &uccf->uf_regs->urfs, ioread16be(&uccf->uf_regs->urfs)); 49 49 printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n", 50 - &uccf->uf_regs->urfet, qe_ioread16be(&uccf->uf_regs->urfet)); 50 + &uccf->uf_regs->urfet, ioread16be(&uccf->uf_regs->urfet)); 51 51 printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n", 52 52 &uccf->uf_regs->urfset, 53 - qe_ioread16be(&uccf->uf_regs->urfset)); 53 + ioread16be(&uccf->uf_regs->urfset)); 54 54 printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n", 55 - &uccf->uf_regs->utfb, qe_ioread32be(&uccf->uf_regs->utfb)); 55 + &uccf->uf_regs->utfb, ioread32be(&uccf->uf_regs->utfb)); 56 56 printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n", 57 - &uccf->uf_regs->utfs, qe_ioread16be(&uccf->uf_regs->utfs)); 57 + &uccf->uf_regs->utfs, ioread16be(&uccf->uf_regs->utfs)); 58 58 printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n", 59 - &uccf->uf_regs->utfet, qe_ioread16be(&uccf->uf_regs->utfet)); 59 + &uccf->uf_regs->utfet, ioread16be(&uccf->uf_regs->utfet)); 60 60 printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n", 61 - &uccf->uf_regs->utftt, qe_ioread16be(&uccf->uf_regs->utftt)); 61 + &uccf->uf_regs->utftt, ioread16be(&uccf->uf_regs->utftt)); 62 62 printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n", 63 - &uccf->uf_regs->utpt, qe_ioread16be(&uccf->uf_regs->utpt)); 63 + &uccf->uf_regs->utpt, ioread16be(&uccf->uf_regs->utpt)); 64 64 printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n", 65 - &uccf->uf_regs->urtry, qe_ioread32be(&uccf->uf_regs->urtry)); 65 + &uccf->uf_regs->urtry, ioread32be(&uccf->uf_regs->urtry)); 66 66 printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n", 67 - &uccf->uf_regs->guemr, qe_ioread8(&uccf->uf_regs->guemr)); 67 + &uccf->uf_regs->guemr, ioread8(&uccf->uf_regs->guemr)); 68 68 } 69 69 EXPORT_SYMBOL(ucc_fast_dump_regs); 70 70 ··· 86 86 87 87 void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf) 88 88 { 89 - qe_iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr); 89 + iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr); 90 90 } 91 91 EXPORT_SYMBOL(ucc_fast_transmit_on_demand); 92 92 ··· 98 98 uf_regs = uccf->uf_regs; 99 99 100 100 /* Enable reception and/or transmission on this UCC. */ 101 - gumr = qe_ioread32be(&uf_regs->gumr); 101 + gumr = ioread32be(&uf_regs->gumr); 102 102 if (mode & COMM_DIR_TX) { 103 103 gumr |= UCC_FAST_GUMR_ENT; 104 104 uccf->enabled_tx = 1; ··· 107 107 gumr |= UCC_FAST_GUMR_ENR; 108 108 uccf->enabled_rx = 1; 109 109 } 110 - qe_iowrite32be(gumr, &uf_regs->gumr); 110 + iowrite32be(gumr, &uf_regs->gumr); 111 111 } 112 112 EXPORT_SYMBOL(ucc_fast_enable); 113 113 ··· 119 119 uf_regs = uccf->uf_regs; 120 120 121 121 /* Disable reception and/or transmission on this UCC. */ 122 - gumr = qe_ioread32be(&uf_regs->gumr); 122 + gumr = ioread32be(&uf_regs->gumr); 123 123 if (mode & COMM_DIR_TX) { 124 124 gumr &= ~UCC_FAST_GUMR_ENT; 125 125 uccf->enabled_tx = 0; ··· 128 128 gumr &= ~UCC_FAST_GUMR_ENR; 129 129 uccf->enabled_rx = 0; 130 130 } 131 - qe_iowrite32be(gumr, &uf_regs->gumr); 131 + iowrite32be(gumr, &uf_regs->gumr); 132 132 } 133 133 EXPORT_SYMBOL(ucc_fast_disable); 134 134 ··· 262 262 gumr |= uf_info->tenc; 263 263 gumr |= uf_info->tcrc; 264 264 gumr |= uf_info->mode; 265 - qe_iowrite32be(gumr, &uf_regs->gumr); 265 + iowrite32be(gumr, &uf_regs->gumr); 266 266 267 267 /* Allocate memory for Tx Virtual Fifo */ 268 268 uccf->ucc_fast_tx_virtual_fifo_base_offset = ··· 287 287 } 288 288 289 289 /* Set Virtual Fifo registers */ 290 - qe_iowrite16be(uf_info->urfs, &uf_regs->urfs); 291 - qe_iowrite16be(uf_info->urfet, &uf_regs->urfet); 292 - qe_iowrite16be(uf_info->urfset, &uf_regs->urfset); 293 - qe_iowrite16be(uf_info->utfs, &uf_regs->utfs); 294 - qe_iowrite16be(uf_info->utfet, &uf_regs->utfet); 295 - qe_iowrite16be(uf_info->utftt, &uf_regs->utftt); 290 + iowrite16be(uf_info->urfs, &uf_regs->urfs); 291 + iowrite16be(uf_info->urfet, &uf_regs->urfet); 292 + iowrite16be(uf_info->urfset, &uf_regs->urfset); 293 + iowrite16be(uf_info->utfs, &uf_regs->utfs); 294 + iowrite16be(uf_info->utfet, &uf_regs->utfet); 295 + iowrite16be(uf_info->utftt, &uf_regs->utftt); 296 296 /* utfb, urfb are offsets from MURAM base */ 297 - qe_iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, 297 + iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, 298 298 &uf_regs->utfb); 299 - qe_iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, 299 + iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, 300 300 &uf_regs->urfb); 301 301 302 302 /* Mux clocking */ ··· 365 365 } 366 366 367 367 /* Set interrupt mask register at UCC level. */ 368 - qe_iowrite32be(uf_info->uccm_mask, &uf_regs->uccm); 368 + iowrite32be(uf_info->uccm_mask, &uf_regs->uccm); 369 369 370 370 /* First, clear anything pending at UCC level, 371 371 * otherwise, old garbage may come through 372 372 * as soon as the dam is opened. */ 373 373 374 374 /* Writing '1' clears */ 375 - qe_iowrite32be(0xffffffff, &uf_regs->ucce); 375 + iowrite32be(0xffffffff, &uf_regs->ucce); 376 376 377 377 *uccf_ret = uccf; 378 378 return 0;
+21 -21
drivers/soc/fsl/qe/ucc_slow.c
··· 78 78 us_regs = uccs->us_regs; 79 79 80 80 /* Enable reception and/or transmission on this UCC. */ 81 - gumr_l = qe_ioread32be(&us_regs->gumr_l); 81 + gumr_l = ioread32be(&us_regs->gumr_l); 82 82 if (mode & COMM_DIR_TX) { 83 83 gumr_l |= UCC_SLOW_GUMR_L_ENT; 84 84 uccs->enabled_tx = 1; ··· 87 87 gumr_l |= UCC_SLOW_GUMR_L_ENR; 88 88 uccs->enabled_rx = 1; 89 89 } 90 - qe_iowrite32be(gumr_l, &us_regs->gumr_l); 90 + iowrite32be(gumr_l, &us_regs->gumr_l); 91 91 } 92 92 EXPORT_SYMBOL(ucc_slow_enable); 93 93 ··· 99 99 us_regs = uccs->us_regs; 100 100 101 101 /* Disable reception and/or transmission on this UCC. */ 102 - gumr_l = qe_ioread32be(&us_regs->gumr_l); 102 + gumr_l = ioread32be(&us_regs->gumr_l); 103 103 if (mode & COMM_DIR_TX) { 104 104 gumr_l &= ~UCC_SLOW_GUMR_L_ENT; 105 105 uccs->enabled_tx = 0; ··· 108 108 gumr_l &= ~UCC_SLOW_GUMR_L_ENR; 109 109 uccs->enabled_rx = 0; 110 110 } 111 - qe_iowrite32be(gumr_l, &us_regs->gumr_l); 111 + iowrite32be(gumr_l, &us_regs->gumr_l); 112 112 } 113 113 EXPORT_SYMBOL(ucc_slow_disable); 114 114 ··· 194 194 return ret; 195 195 } 196 196 197 - qe_iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr); 197 + iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr); 198 198 199 199 INIT_LIST_HEAD(&uccs->confQ); 200 200 ··· 222 222 bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset); 223 223 for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) { 224 224 /* clear bd buffer */ 225 - qe_iowrite32be(0, &bd->buf); 225 + iowrite32be(0, &bd->buf); 226 226 /* set bd status and length */ 227 - qe_iowrite32be(0, (u32 __iomem *)bd); 227 + iowrite32be(0, (u32 __iomem *)bd); 228 228 bd++; 229 229 } 230 230 /* for last BD set Wrap bit */ 231 - qe_iowrite32be(0, &bd->buf); 232 - qe_iowrite32be(T_W, (u32 __iomem *)bd); 231 + iowrite32be(0, &bd->buf); 232 + iowrite32be(T_W, (u32 __iomem *)bd); 233 233 234 234 /* Init Rx bds */ 235 235 bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset); 236 236 for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) { 237 237 /* set bd status and length */ 238 - qe_iowrite32be(0, (u32 __iomem *)bd); 238 + iowrite32be(0, (u32 __iomem *)bd); 239 239 /* clear bd buffer */ 240 - qe_iowrite32be(0, &bd->buf); 240 + iowrite32be(0, &bd->buf); 241 241 bd++; 242 242 } 243 243 /* for last BD set Wrap bit */ 244 - qe_iowrite32be(R_W, (u32 __iomem *)bd); 245 - qe_iowrite32be(0, &bd->buf); 244 + iowrite32be(R_W, (u32 __iomem *)bd); 245 + iowrite32be(0, &bd->buf); 246 246 247 247 /* Set GUMR (For more details see the hardware spec.). */ 248 248 /* gumr_h */ ··· 263 263 gumr |= UCC_SLOW_GUMR_H_TXSY; 264 264 if (us_info->rtsm) 265 265 gumr |= UCC_SLOW_GUMR_H_RTSM; 266 - qe_iowrite32be(gumr, &us_regs->gumr_h); 266 + iowrite32be(gumr, &us_regs->gumr_h); 267 267 268 268 /* gumr_l */ 269 269 gumr = (u32)us_info->tdcr | (u32)us_info->rdcr | (u32)us_info->tenc | ··· 276 276 gumr |= UCC_SLOW_GUMR_L_TINV; 277 277 if (us_info->tend) 278 278 gumr |= UCC_SLOW_GUMR_L_TEND; 279 - qe_iowrite32be(gumr, &us_regs->gumr_l); 279 + iowrite32be(gumr, &us_regs->gumr_l); 280 280 281 281 /* Function code registers */ 282 282 283 283 /* if the data is in cachable memory, the 'global' */ 284 284 /* in the function code should be set. */ 285 - qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr); 286 - qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr); 285 + iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr); 286 + iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr); 287 287 288 288 /* rbase, tbase are offsets from MURAM base */ 289 - qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase); 290 - qe_iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase); 289 + iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase); 290 + iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase); 291 291 292 292 /* Mux clocking */ 293 293 /* Grant Support */ ··· 317 317 } 318 318 319 319 /* Set interrupt mask register at UCC level. */ 320 - qe_iowrite16be(us_info->uccm_mask, &us_regs->uccm); 320 + iowrite16be(us_info->uccm_mask, &us_regs->uccm); 321 321 322 322 /* First, clear anything pending at UCC level, 323 323 * otherwise, old garbage may come through 324 324 * as soon as the dam is opened. */ 325 325 326 326 /* Writing '1' clears */ 327 - qe_iowrite16be(0xffff, &us_regs->ucce); 327 + iowrite16be(0xffff, &us_regs->ucce); 328 328 329 329 /* Issue QE Init command */ 330 330 if (us_info->init_tx && us_info->init_rx)
+22 -2
drivers/soc/fsl/rcpm.c
··· 13 13 #include <linux/slab.h> 14 14 #include <linux/suspend.h> 15 15 #include <linux/kernel.h> 16 + #include <linux/acpi.h> 16 17 17 18 #define RCPM_WAKEUP_CELL_MAX_SIZE 7 18 19 ··· 79 78 "fsl,rcpm-wakeup", value, 80 79 rcpm->wakeup_cells + 1); 81 80 82 - /* Wakeup source should refer to current rcpm device */ 83 - if (ret || (np->phandle != value[0])) 81 + if (ret) 84 82 continue; 83 + 84 + /* 85 + * For DT mode, would handle devices with "fsl,rcpm-wakeup" 86 + * pointing to the current RCPM node. 87 + * 88 + * For ACPI mode, currently we assume there is only one 89 + * RCPM controller existing. 90 + */ 91 + if (is_of_node(dev->fwnode)) 92 + if (np->phandle != value[0]) 93 + continue; 85 94 86 95 /* Property "#fsl,rcpm-wakeup-cells" of rcpm node defines the 87 96 * number of IPPDEXPCR register cells, and "fsl,rcpm-wakeup" ··· 183 172 }; 184 173 MODULE_DEVICE_TABLE(of, rcpm_of_match); 185 174 175 + #ifdef CONFIG_ACPI 176 + static const struct acpi_device_id rcpm_acpi_ids[] = { 177 + {"NXP0015",}, 178 + { } 179 + }; 180 + MODULE_DEVICE_TABLE(acpi, rcpm_acpi_ids); 181 + #endif 182 + 186 183 static struct platform_driver rcpm_driver = { 187 184 .driver = { 188 185 .name = "rcpm", 189 186 .of_match_table = rcpm_of_match, 187 + .acpi_match_table = ACPI_PTR(rcpm_acpi_ids), 190 188 .pm = &rcpm_pm_ops, 191 189 }, 192 190 .probe = rcpm_probe,
+62 -62
drivers/tty/serial/ucc_uart.c
··· 261 261 struct qe_bd *bdp = qe_port->tx_bd_base; 262 262 263 263 while (1) { 264 - if (qe_ioread16be(&bdp->status) & BD_SC_READY) 264 + if (ioread16be(&bdp->status) & BD_SC_READY) 265 265 /* This BD is not done, so return "not done" */ 266 266 return 0; 267 267 268 - if (qe_ioread16be(&bdp->status) & BD_SC_WRAP) 268 + if (ioread16be(&bdp->status) & BD_SC_WRAP) 269 269 /* 270 270 * This BD is done and it's the last one, so return 271 271 * "done" ··· 344 344 p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port); 345 345 346 346 *p++ = port->x_char; 347 - qe_iowrite16be(1, &bdp->length); 347 + iowrite16be(1, &bdp->length); 348 348 qe_setbits_be16(&bdp->status, BD_SC_READY); 349 349 /* Get next BD. */ 350 - if (qe_ioread16be(&bdp->status) & BD_SC_WRAP) 350 + if (ioread16be(&bdp->status) & BD_SC_WRAP) 351 351 bdp = qe_port->tx_bd_base; 352 352 else 353 353 bdp++; ··· 366 366 /* Pick next descriptor and fill from buffer */ 367 367 bdp = qe_port->tx_cur; 368 368 369 - while (!(qe_ioread16be(&bdp->status) & BD_SC_READY) && 369 + while (!(ioread16be(&bdp->status) & BD_SC_READY) && 370 370 (xmit->tail != xmit->head)) { 371 371 count = 0; 372 372 p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port); ··· 379 379 break; 380 380 } 381 381 382 - qe_iowrite16be(count, &bdp->length); 382 + iowrite16be(count, &bdp->length); 383 383 qe_setbits_be16(&bdp->status, BD_SC_READY); 384 384 385 385 /* Get next BD. */ 386 - if (qe_ioread16be(&bdp->status) & BD_SC_WRAP) 386 + if (ioread16be(&bdp->status) & BD_SC_WRAP) 387 387 bdp = qe_port->tx_bd_base; 388 388 else 389 389 bdp++; ··· 416 416 container_of(port, struct uart_qe_port, port); 417 417 418 418 /* If we currently are transmitting, then just return */ 419 - if (qe_ioread16be(&qe_port->uccp->uccm) & UCC_UART_UCCE_TX) 419 + if (ioread16be(&qe_port->uccp->uccm) & UCC_UART_UCCE_TX) 420 420 return; 421 421 422 422 /* Otherwise, pump the port and start transmission */ ··· 471 471 */ 472 472 bdp = qe_port->rx_cur; 473 473 while (1) { 474 - status = qe_ioread16be(&bdp->status); 474 + status = ioread16be(&bdp->status); 475 475 476 476 /* If this one is empty, then we assume we've read them all */ 477 477 if (status & BD_SC_EMPTY) 478 478 break; 479 479 480 480 /* get number of characters, and check space in RX buffer */ 481 - i = qe_ioread16be(&bdp->length); 481 + i = ioread16be(&bdp->length); 482 482 483 483 /* If we don't have enough room in RX buffer for the entire BD, 484 484 * then we try later, which will be the next RX interrupt. ··· 512 512 qe_clrsetbits_be16(&bdp->status, 513 513 BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID, 514 514 BD_SC_EMPTY); 515 - if (qe_ioread16be(&bdp->status) & BD_SC_WRAP) 515 + if (ioread16be(&bdp->status) & BD_SC_WRAP) 516 516 bdp = qe_port->rx_bd_base; 517 517 else 518 518 bdp++; ··· 569 569 u16 events; 570 570 571 571 /* Clear the interrupts */ 572 - events = qe_ioread16be(&uccp->ucce); 573 - qe_iowrite16be(events, &uccp->ucce); 572 + events = ioread16be(&uccp->ucce); 573 + iowrite16be(events, &uccp->ucce); 574 574 575 575 if (events & UCC_UART_UCCE_BRKE) 576 576 uart_handle_break(&qe_port->port); ··· 601 601 bdp = qe_port->rx_bd_base; 602 602 qe_port->rx_cur = qe_port->rx_bd_base; 603 603 for (i = 0; i < (qe_port->rx_nrfifos - 1); i++) { 604 - qe_iowrite16be(BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status); 605 - qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf); 606 - qe_iowrite16be(0, &bdp->length); 604 + iowrite16be(BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status); 605 + iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf); 606 + iowrite16be(0, &bdp->length); 607 607 bd_virt += qe_port->rx_fifosize; 608 608 bdp++; 609 609 } 610 610 611 611 /* */ 612 - qe_iowrite16be(BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status); 613 - qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf); 614 - qe_iowrite16be(0, &bdp->length); 612 + iowrite16be(BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status); 613 + iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf); 614 + iowrite16be(0, &bdp->length); 615 615 616 616 /* Set the physical address of the host memory 617 617 * buffers in the buffer descriptors, and the ··· 622 622 qe_port->tx_cur = qe_port->tx_bd_base; 623 623 bdp = qe_port->tx_bd_base; 624 624 for (i = 0; i < (qe_port->tx_nrfifos - 1); i++) { 625 - qe_iowrite16be(BD_SC_INTRPT, &bdp->status); 626 - qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf); 627 - qe_iowrite16be(0, &bdp->length); 625 + iowrite16be(BD_SC_INTRPT, &bdp->status); 626 + iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf); 627 + iowrite16be(0, &bdp->length); 628 628 bd_virt += qe_port->tx_fifosize; 629 629 bdp++; 630 630 } ··· 634 634 qe_setbits_be16(&qe_port->tx_cur->status, BD_SC_P); 635 635 #endif 636 636 637 - qe_iowrite16be(BD_SC_WRAP | BD_SC_INTRPT, &bdp->status); 638 - qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf); 639 - qe_iowrite16be(0, &bdp->length); 637 + iowrite16be(BD_SC_WRAP | BD_SC_INTRPT, &bdp->status); 638 + iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf); 639 + iowrite16be(0, &bdp->length); 640 640 } 641 641 642 642 /* ··· 658 658 ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX); 659 659 660 660 /* Program the UCC UART parameter RAM */ 661 - qe_iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.rbmr); 662 - qe_iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.tbmr); 663 - qe_iowrite16be(qe_port->rx_fifosize, &uccup->common.mrblr); 664 - qe_iowrite16be(0x10, &uccup->maxidl); 665 - qe_iowrite16be(1, &uccup->brkcr); 666 - qe_iowrite16be(0, &uccup->parec); 667 - qe_iowrite16be(0, &uccup->frmec); 668 - qe_iowrite16be(0, &uccup->nosec); 669 - qe_iowrite16be(0, &uccup->brkec); 670 - qe_iowrite16be(0, &uccup->uaddr[0]); 671 - qe_iowrite16be(0, &uccup->uaddr[1]); 672 - qe_iowrite16be(0, &uccup->toseq); 661 + iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.rbmr); 662 + iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.tbmr); 663 + iowrite16be(qe_port->rx_fifosize, &uccup->common.mrblr); 664 + iowrite16be(0x10, &uccup->maxidl); 665 + iowrite16be(1, &uccup->brkcr); 666 + iowrite16be(0, &uccup->parec); 667 + iowrite16be(0, &uccup->frmec); 668 + iowrite16be(0, &uccup->nosec); 669 + iowrite16be(0, &uccup->brkec); 670 + iowrite16be(0, &uccup->uaddr[0]); 671 + iowrite16be(0, &uccup->uaddr[1]); 672 + iowrite16be(0, &uccup->toseq); 673 673 for (i = 0; i < 8; i++) 674 - qe_iowrite16be(0xC000, &uccup->cchars[i]); 675 - qe_iowrite16be(0xc0ff, &uccup->rccm); 674 + iowrite16be(0xC000, &uccup->cchars[i]); 675 + iowrite16be(0xc0ff, &uccup->rccm); 676 676 677 677 /* Configure the GUMR registers for UART */ 678 678 if (soft_uart) { ··· 702 702 #endif 703 703 704 704 /* Disable rx interrupts and clear all pending events. */ 705 - qe_iowrite16be(0, &uccp->uccm); 706 - qe_iowrite16be(0xffff, &uccp->ucce); 707 - qe_iowrite16be(0x7e7e, &uccp->udsr); 705 + iowrite16be(0, &uccp->uccm); 706 + iowrite16be(0xffff, &uccp->ucce); 707 + iowrite16be(0x7e7e, &uccp->udsr); 708 708 709 709 /* Initialize UPSMR */ 710 - qe_iowrite16be(0, &uccp->upsmr); 710 + iowrite16be(0, &uccp->upsmr); 711 711 712 712 if (soft_uart) { 713 - qe_iowrite16be(0x30, &uccup->supsmr); 714 - qe_iowrite16be(0, &uccup->res92); 715 - qe_iowrite32be(0, &uccup->rx_state); 716 - qe_iowrite32be(0, &uccup->rx_cnt); 717 - qe_iowrite8(0, &uccup->rx_bitmark); 718 - qe_iowrite8(10, &uccup->rx_length); 719 - qe_iowrite32be(0x4000, &uccup->dump_ptr); 720 - qe_iowrite8(0, &uccup->rx_temp_dlst_qe); 721 - qe_iowrite32be(0, &uccup->rx_frame_rem); 722 - qe_iowrite8(0, &uccup->rx_frame_rem_size); 713 + iowrite16be(0x30, &uccup->supsmr); 714 + iowrite16be(0, &uccup->res92); 715 + iowrite32be(0, &uccup->rx_state); 716 + iowrite32be(0, &uccup->rx_cnt); 717 + iowrite8(0, &uccup->rx_bitmark); 718 + iowrite8(10, &uccup->rx_length); 719 + iowrite32be(0x4000, &uccup->dump_ptr); 720 + iowrite8(0, &uccup->rx_temp_dlst_qe); 721 + iowrite32be(0, &uccup->rx_frame_rem); 722 + iowrite8(0, &uccup->rx_frame_rem_size); 723 723 /* Soft-UART requires TX to be 1X */ 724 - qe_iowrite8(UCC_UART_TX_STATE_UART | UCC_UART_TX_STATE_X1, 724 + iowrite8(UCC_UART_TX_STATE_UART | UCC_UART_TX_STATE_X1, 725 725 &uccup->tx_mode); 726 - qe_iowrite16be(0, &uccup->tx_state); 727 - qe_iowrite8(0, &uccup->resD4); 728 - qe_iowrite16be(0, &uccup->resD5); 726 + iowrite16be(0, &uccup->tx_state); 727 + iowrite8(0, &uccup->resD4); 728 + iowrite16be(0, &uccup->resD5); 729 729 730 730 /* Set UART mode. 731 731 * Enable receive and transmit. ··· 850 850 struct ucc_slow __iomem *uccp = qe_port->uccp; 851 851 unsigned int baud; 852 852 unsigned long flags; 853 - u16 upsmr = qe_ioread16be(&uccp->upsmr); 853 + u16 upsmr = ioread16be(&uccp->upsmr); 854 854 struct ucc_uart_pram __iomem *uccup = qe_port->uccup; 855 - u16 supsmr = qe_ioread16be(&uccup->supsmr); 855 + u16 supsmr = ioread16be(&uccup->supsmr); 856 856 u8 char_length = 2; /* 1 + CL + PEN + 1 + SL */ 857 857 858 858 /* Character length programmed into the mode register is the ··· 950 950 /* Update the per-port timeout. */ 951 951 uart_update_timeout(port, termios->c_cflag, baud); 952 952 953 - qe_iowrite16be(upsmr, &uccp->upsmr); 953 + iowrite16be(upsmr, &uccp->upsmr); 954 954 if (soft_uart) { 955 - qe_iowrite16be(supsmr, &uccup->supsmr); 956 - qe_iowrite8(char_length, &uccup->rx_length); 955 + iowrite16be(supsmr, &uccup->supsmr); 956 + iowrite8(char_length, &uccup->rx_length); 957 957 958 958 /* Soft-UART requires a 1X multiplier for TX */ 959 959 qe_setbrg(qe_port->us_info.rx_clock, baud, 16);
+2 -2
include/linux/fsl/guts.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 - /** 2 + /* 3 3 * Freecale 85xx and 86xx Global Utilties register set 4 4 * 5 5 * Authors: Jeff Brown ··· 14 14 #include <linux/types.h> 15 15 #include <linux/io.h> 16 16 17 - /** 17 + /* 18 18 * Global Utility Registers. 19 19 * 20 20 * Not all registers defined in this structure are available on all chips, so
+9 -25
include/soc/fsl/qe/qe.h
··· 239 239 #define qe_muram_dma cpm_muram_dma 240 240 #define qe_muram_free_addr cpm_muram_free_addr 241 241 242 - #ifdef CONFIG_PPC32 243 - #define qe_iowrite8(val, addr) out_8(addr, val) 244 - #define qe_iowrite16be(val, addr) out_be16(addr, val) 245 - #define qe_iowrite32be(val, addr) out_be32(addr, val) 246 - #define qe_ioread8(addr) in_8(addr) 247 - #define qe_ioread16be(addr) in_be16(addr) 248 - #define qe_ioread32be(addr) in_be32(addr) 249 - #else 250 - #define qe_iowrite8(val, addr) iowrite8(val, addr) 251 - #define qe_iowrite16be(val, addr) iowrite16be(val, addr) 252 - #define qe_iowrite32be(val, addr) iowrite32be(val, addr) 253 - #define qe_ioread8(addr) ioread8(addr) 254 - #define qe_ioread16be(addr) ioread16be(addr) 255 - #define qe_ioread32be(addr) ioread32be(addr) 256 - #endif 242 + #define qe_setbits_be32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr)) 243 + #define qe_clrbits_be32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr)) 257 244 258 - #define qe_setbits_be32(_addr, _v) qe_iowrite32be(qe_ioread32be(_addr) | (_v), (_addr)) 259 - #define qe_clrbits_be32(_addr, _v) qe_iowrite32be(qe_ioread32be(_addr) & ~(_v), (_addr)) 245 + #define qe_setbits_be16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr)) 246 + #define qe_clrbits_be16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr)) 260 247 261 - #define qe_setbits_be16(_addr, _v) qe_iowrite16be(qe_ioread16be(_addr) | (_v), (_addr)) 262 - #define qe_clrbits_be16(_addr, _v) qe_iowrite16be(qe_ioread16be(_addr) & ~(_v), (_addr)) 263 - 264 - #define qe_setbits_8(_addr, _v) qe_iowrite8(qe_ioread8(_addr) | (_v), (_addr)) 265 - #define qe_clrbits_8(_addr, _v) qe_iowrite8(qe_ioread8(_addr) & ~(_v), (_addr)) 248 + #define qe_setbits_8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr)) 249 + #define qe_clrbits_8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr)) 266 250 267 251 #define qe_clrsetbits_be32(addr, clear, set) \ 268 - qe_iowrite32be((qe_ioread32be(addr) & ~(clear)) | (set), (addr)) 252 + iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr)) 269 253 #define qe_clrsetbits_be16(addr, clear, set) \ 270 - qe_iowrite16be((qe_ioread16be(addr) & ~(clear)) | (set), (addr)) 254 + iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr)) 271 255 #define qe_clrsetbits_8(addr, clear, set) \ 272 - qe_iowrite8((qe_ioread8(addr) & ~(clear)) | (set), (addr)) 256 + iowrite8((ioread8(addr) & ~(clear)) | (set), (addr)) 273 257 274 258 /* Structure that defines QE firmware binary files. 275 259 *