Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'soc-fsl-next-v5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux into arm/drivers

NXP/FSL SoC driver updates for v5.3

DPAA2 Console driver
- Add driver to export two char devices to dump logs for MC and
AIOP

DPAA2 DPIO driver
- Add support for memory backed QBMan portals
- Increase the timeout period to prevent false error
- Add APIs to retrieve QBMan portal probing status

DPAA Qman driver
- Only make liodn fixup on powerpc SoCs with PAMU iommu

* tag 'soc-fsl-next-v5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux:
soc: fsl: qbman_portals: add APIs to retrieve the probing status
soc: fsl: qman: fixup liodns only on ppc targets
soc: fsl: dpio: Add support for memory backed QBMan portals
bus: mc-bus: Add support for mapping shareable portals
soc: fsl: dpio: Increase timeout for QBMan Management Commands
soc: fsl: add DPAA2 console support
Documentation: DT: Add entry for DPAA2 console
soc: fsl: guts: Add definition for LX2160A

Signed-off-by: Olof Johansson <olof@lixom.net>

+618 -51
+11
Documentation/devicetree/bindings/misc/fsl,dpaa2-console.txt
··· 1 + DPAA2 console support 2 + 3 + Required properties: 4 + 5 + - compatible 6 + Value type: <string> 7 + Definition: Must be "fsl,dpaa2-console". 8 + - reg 9 + Value type: <prop-encoded-array> 10 + Definition: A standard property. Specifies the region where the MCFBA 11 + (MC firmware base address) register can be found.
+1
MAINTAINERS
··· 6416 6416 L: linuxppc-dev@lists.ozlabs.org 6417 6417 L: linux-arm-kernel@lists.infradead.org 6418 6418 S: Maintained 6419 + F: Documentation/devicetree/bindings/misc/fsl,dpaa2-console.txt 6419 6420 F: Documentation/devicetree/bindings/soc/fsl/ 6420 6421 F: drivers/soc/fsl/ 6421 6422 F: include/linux/fsl/
+27 -3
drivers/bus/fsl-mc/dprc.c
··· 443 443 struct fsl_mc_command cmd = { 0 }; 444 444 struct dprc_cmd_get_obj_region *cmd_params; 445 445 struct dprc_rsp_get_obj_region *rsp_params; 446 + u16 major_ver, minor_ver; 446 447 int err; 447 448 448 449 /* prepare command */ 449 - cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG, 450 - cmd_flags, token); 450 + err = dprc_get_api_version(mc_io, 0, 451 + &major_ver, 452 + &minor_ver); 453 + if (err) 454 + return err; 455 + 456 + /** 457 + * MC API version 6.3 introduced a new field to the region 458 + * descriptor: base_address. If the older API is in use then the base 459 + * address is set to zero to indicate it needs to be obtained elsewhere 460 + * (typically the device tree). 461 + */ 462 + if (major_ver > 6 || (major_ver == 6 && minor_ver >= 3)) 463 + cmd.header = 464 + mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V2, 465 + cmd_flags, token); 466 + else 467 + cmd.header = 468 + mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG, 469 + cmd_flags, token); 470 + 451 471 cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params; 452 472 cmd_params->obj_id = cpu_to_le32(obj_id); 453 473 cmd_params->region_index = region_index; ··· 481 461 482 462 /* retrieve response parameters */ 483 463 rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params; 484 - region_desc->base_offset = le64_to_cpu(rsp_params->base_addr); 464 + region_desc->base_offset = le64_to_cpu(rsp_params->base_offset); 485 465 region_desc->size = le32_to_cpu(rsp_params->size); 466 + if (major_ver > 6 || (major_ver == 6 && minor_ver >= 3)) 467 + region_desc->base_address = le64_to_cpu(rsp_params->base_addr); 468 + else 469 + region_desc->base_address = 0; 486 470 487 471 return 0; 488 472 }
+13 -2
drivers/bus/fsl-mc/fsl-mc-bus.c
··· 487 487 "dprc_get_obj_region() failed: %d\n", error); 488 488 goto error_cleanup_regions; 489 489 } 490 - 491 - error = translate_mc_addr(mc_dev, mc_region_type, 490 + /* 491 + * Older MC only returned region offset and no base address 492 + * If base address is in the region_desc use it otherwise 493 + * revert to old mechanism 494 + */ 495 + if (region_desc.base_address) 496 + regions[i].start = region_desc.base_address + 497 + region_desc.base_offset; 498 + else 499 + error = translate_mc_addr(mc_dev, mc_region_type, 492 500 region_desc.base_offset, 493 501 &regions[i].start); 502 + 494 503 if (error < 0) { 495 504 dev_err(parent_dev, 496 505 "Invalid MC offset: %#x (for %s.%d\'s region %d)\n", ··· 513 504 regions[i].flags = IORESOURCE_IO; 514 505 if (region_desc.flags & DPRC_REGION_CACHEABLE) 515 506 regions[i].flags |= IORESOURCE_CACHEABLE; 507 + if (region_desc.flags & DPRC_REGION_SHAREABLE) 508 + regions[i].flags |= IORESOURCE_MEM; 516 509 } 517 510 518 511 mc_dev->regions = regions;
+15 -2
drivers/bus/fsl-mc/fsl-mc-private.h
··· 79 79 80 80 /* DPRC command versioning */ 81 81 #define DPRC_CMD_BASE_VERSION 1 82 + #define DPRC_CMD_2ND_VERSION 2 82 83 #define DPRC_CMD_ID_OFFSET 4 83 84 84 85 #define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION) 86 + #define DPRC_CMD_V2(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_2ND_VERSION) 85 87 86 88 /* DPRC command IDs */ 87 89 #define DPRC_CMDID_CLOSE DPRC_CMD(0x800) ··· 102 100 #define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159) 103 101 #define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A) 104 102 #define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E) 103 + #define DPRC_CMDID_GET_OBJ_REG_V2 DPRC_CMD_V2(0x15E) 105 104 #define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F) 106 105 107 106 struct dprc_cmd_open { ··· 202 199 /* response word 0 */ 203 200 __le64 pad; 204 201 /* response word 1 */ 205 - __le64 base_addr; 202 + __le64 base_offset; 206 203 /* response word 2 */ 207 204 __le32 size; 205 + __le32 pad2; 206 + /* response word 3 */ 207 + __le32 flags; 208 + __le32 pad3; 209 + /* response word 4 */ 210 + /* base_addr may be zero if older MC firmware is used */ 211 + __le64 base_addr; 208 212 }; 209 213 210 214 struct dprc_cmd_set_obj_irq { ··· 344 334 /* Region flags */ 345 335 /* Cacheable - Indicates that region should be mapped as cacheable */ 346 336 #define DPRC_REGION_CACHEABLE 0x00000001 337 + #define DPRC_REGION_SHAREABLE 0x00000002 347 338 348 339 /** 349 340 * enum dprc_region_type - Region type ··· 353 342 */ 354 343 enum dprc_region_type { 355 344 DPRC_REGION_TYPE_MC_PORTAL, 356 - DPRC_REGION_TYPE_QBMAN_PORTAL 345 + DPRC_REGION_TYPE_QBMAN_PORTAL, 346 + DPRC_REGION_TYPE_QBMAN_MEM_BACKED_PORTAL 357 347 }; 358 348 359 349 /** ··· 372 360 u32 size; 373 361 u32 flags; 374 362 enum dprc_region_type type; 363 + u64 base_address; 375 364 }; 376 365 377 366 int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+10
drivers/soc/fsl/Kconfig
··· 30 30 other DPAA2 objects. This driver does not expose the DPIO 31 31 objects individually, but groups them under a service layer 32 32 API. 33 + 34 + config DPAA2_CONSOLE 35 + tristate "QorIQ DPAA2 console driver" 36 + depends on OF && (ARCH_LAYERSCAPE || COMPILE_TEST) 37 + default y 38 + help 39 + Console driver for DPAA2 platforms. Exports 2 char devices, 40 + /dev/dpaa2_mc_console and /dev/dpaa2_aiop_console, 41 + which can be used to dump the Management Complex and AIOP 42 + firmware logs. 33 43 endmenu
+1
drivers/soc/fsl/Makefile
··· 8 8 obj-$(CONFIG_CPM) += qe/ 9 9 obj-$(CONFIG_FSL_GUTS) += guts.o 10 10 obj-$(CONFIG_FSL_MC_DPIO) += dpio/ 11 + obj-$(CONFIG_DPAA2_CONSOLE) += dpaa2-console.o
+329
drivers/soc/fsl/dpaa2-console.c
··· 1 + // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 + /* 3 + * Freescale DPAA2 Platforms Console Driver 4 + * 5 + * Copyright 2015-2016 Freescale Semiconductor Inc. 6 + * Copyright 2018 NXP 7 + */ 8 + 9 + #define pr_fmt(fmt) "dpaa2-console: " fmt 10 + 11 + #include <linux/module.h> 12 + #include <linux/of_device.h> 13 + #include <linux/of_address.h> 14 + #include <linux/miscdevice.h> 15 + #include <linux/uaccess.h> 16 + #include <linux/slab.h> 17 + #include <linux/fs.h> 18 + #include <linux/io.h> 19 + 20 + /* MC firmware base low/high registers indexes */ 21 + #define MCFBALR_OFFSET 0 22 + #define MCFBAHR_OFFSET 1 23 + 24 + /* Bit masks used to get the most/least significant part of the MC base addr */ 25 + #define MC_FW_ADDR_MASK_HIGH 0x1FFFF 26 + #define MC_FW_ADDR_MASK_LOW 0xE0000000 27 + 28 + #define MC_BUFFER_OFFSET 0x01000000 29 + #define MC_BUFFER_SIZE (1024 * 1024 * 16) 30 + #define MC_OFFSET_DELTA MC_BUFFER_OFFSET 31 + 32 + #define AIOP_BUFFER_OFFSET 0x06000000 33 + #define AIOP_BUFFER_SIZE (1024 * 1024 * 16) 34 + #define AIOP_OFFSET_DELTA 0 35 + 36 + #define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000 37 + #define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND)) 38 + 39 + /* MC and AIOP Magic words */ 40 + #define MAGIC_MC 0x4d430100 41 + #define MAGIC_AIOP 0x41494F50 42 + 43 + struct log_header { 44 + __le32 magic_word; 45 + char reserved[4]; 46 + __le32 buf_start; 47 + __le32 buf_length; 48 + __le32 last_byte; 49 + }; 50 + 51 + struct console_data { 52 + void __iomem *map_addr; 53 + struct log_header __iomem *hdr; 54 + void __iomem *start_addr; 55 + void __iomem *end_addr; 56 + void __iomem *end_of_data; 57 + void __iomem *cur_ptr; 58 + }; 59 + 60 + static struct resource mc_base_addr; 61 + 62 + static inline void adjust_end(struct console_data *cd) 63 + { 64 + u32 last_byte = readl(&cd->hdr->last_byte); 65 + 66 + cd->end_of_data = cd->start_addr + LAST_BYTE(last_byte); 67 + } 68 + 69 + static u64 get_mc_fw_base_address(void) 70 + { 71 + u64 mcfwbase = 0ULL; 72 + u32 __iomem *mcfbaregs; 73 + 74 + mcfbaregs = ioremap(mc_base_addr.start, resource_size(&mc_base_addr)); 75 + if (!mcfbaregs) { 76 + pr_err("could not map MC Firmaware Base registers\n"); 77 + return 0; 78 + } 79 + 80 + mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) & 81 + MC_FW_ADDR_MASK_HIGH; 82 + mcfwbase <<= 32; 83 + mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_ADDR_MASK_LOW; 84 + iounmap(mcfbaregs); 85 + 86 + pr_debug("MC base address at 0x%016llx\n", mcfwbase); 87 + return mcfwbase; 88 + } 89 + 90 + static ssize_t dpaa2_console_size(struct console_data *cd) 91 + { 92 + ssize_t size; 93 + 94 + if (cd->cur_ptr <= cd->end_of_data) 95 + size = cd->end_of_data - cd->cur_ptr; 96 + else 97 + size = (cd->end_addr - cd->cur_ptr) + 98 + (cd->end_of_data - cd->start_addr); 99 + 100 + return size; 101 + } 102 + 103 + static int dpaa2_generic_console_open(struct inode *node, struct file *fp, 104 + u64 offset, u64 size, 105 + u32 expected_magic, 106 + u32 offset_delta) 107 + { 108 + u32 read_magic, wrapped, last_byte, buf_start, buf_length; 109 + struct console_data *cd; 110 + u64 base_addr; 111 + int err; 112 + 113 + cd = kmalloc(sizeof(*cd), GFP_KERNEL); 114 + if (!cd) 115 + return -ENOMEM; 116 + 117 + base_addr = get_mc_fw_base_address(); 118 + if (!base_addr) { 119 + err = -EIO; 120 + goto err_fwba; 121 + } 122 + 123 + cd->map_addr = ioremap(base_addr + offset, size); 124 + if (!cd->map_addr) { 125 + pr_err("cannot map console log memory\n"); 126 + err = -EIO; 127 + goto err_ioremap; 128 + } 129 + 130 + cd->hdr = (struct log_header __iomem *)cd->map_addr; 131 + read_magic = readl(&cd->hdr->magic_word); 132 + last_byte = readl(&cd->hdr->last_byte); 133 + buf_start = readl(&cd->hdr->buf_start); 134 + buf_length = readl(&cd->hdr->buf_length); 135 + 136 + if (read_magic != expected_magic) { 137 + pr_warn("expected = %08x, read = %08x\n", 138 + expected_magic, read_magic); 139 + err = -EIO; 140 + goto err_magic; 141 + } 142 + 143 + cd->start_addr = cd->map_addr + buf_start - offset_delta; 144 + cd->end_addr = cd->start_addr + buf_length; 145 + 146 + wrapped = last_byte & LOG_HEADER_FLAG_BUFFER_WRAPAROUND; 147 + 148 + adjust_end(cd); 149 + if (wrapped && cd->end_of_data != cd->end_addr) 150 + cd->cur_ptr = cd->end_of_data + 1; 151 + else 152 + cd->cur_ptr = cd->start_addr; 153 + 154 + fp->private_data = cd; 155 + 156 + return 0; 157 + 158 + err_magic: 159 + iounmap(cd->map_addr); 160 + 161 + err_ioremap: 162 + err_fwba: 163 + kfree(cd); 164 + 165 + return err; 166 + } 167 + 168 + static int dpaa2_mc_console_open(struct inode *node, struct file *fp) 169 + { 170 + return dpaa2_generic_console_open(node, fp, 171 + MC_BUFFER_OFFSET, MC_BUFFER_SIZE, 172 + MAGIC_MC, MC_OFFSET_DELTA); 173 + } 174 + 175 + static int dpaa2_aiop_console_open(struct inode *node, struct file *fp) 176 + { 177 + return dpaa2_generic_console_open(node, fp, 178 + AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE, 179 + MAGIC_AIOP, AIOP_OFFSET_DELTA); 180 + } 181 + 182 + static int dpaa2_console_close(struct inode *node, struct file *fp) 183 + { 184 + struct console_data *cd = fp->private_data; 185 + 186 + iounmap(cd->map_addr); 187 + kfree(cd); 188 + return 0; 189 + } 190 + 191 + static ssize_t dpaa2_console_read(struct file *fp, char __user *buf, 192 + size_t count, loff_t *f_pos) 193 + { 194 + struct console_data *cd = fp->private_data; 195 + size_t bytes = dpaa2_console_size(cd); 196 + size_t bytes_end = cd->end_addr - cd->cur_ptr; 197 + size_t written = 0; 198 + void *kbuf; 199 + int err; 200 + 201 + /* Check if we need to adjust the end of data addr */ 202 + adjust_end(cd); 203 + 204 + if (cd->end_of_data == cd->cur_ptr) 205 + return 0; 206 + 207 + if (count < bytes) 208 + bytes = count; 209 + 210 + kbuf = kmalloc(bytes, GFP_KERNEL); 211 + if (!kbuf) 212 + return -ENOMEM; 213 + 214 + if (bytes > bytes_end) { 215 + memcpy_fromio(kbuf, cd->cur_ptr, bytes_end); 216 + if (copy_to_user(buf, kbuf, bytes_end)) { 217 + err = -EFAULT; 218 + goto err_free_buf; 219 + } 220 + buf += bytes_end; 221 + cd->cur_ptr = cd->start_addr; 222 + bytes -= bytes_end; 223 + written += bytes_end; 224 + } 225 + 226 + memcpy_fromio(kbuf, cd->cur_ptr, bytes); 227 + if (copy_to_user(buf, kbuf, bytes)) { 228 + err = -EFAULT; 229 + goto err_free_buf; 230 + } 231 + cd->cur_ptr += bytes; 232 + written += bytes; 233 + 234 + return written; 235 + 236 + err_free_buf: 237 + kfree(kbuf); 238 + 239 + return err; 240 + } 241 + 242 + static const struct file_operations dpaa2_mc_console_fops = { 243 + .owner = THIS_MODULE, 244 + .open = dpaa2_mc_console_open, 245 + .release = dpaa2_console_close, 246 + .read = dpaa2_console_read, 247 + }; 248 + 249 + static struct miscdevice dpaa2_mc_console_dev = { 250 + .minor = MISC_DYNAMIC_MINOR, 251 + .name = "dpaa2_mc_console", 252 + .fops = &dpaa2_mc_console_fops 253 + }; 254 + 255 + static const struct file_operations dpaa2_aiop_console_fops = { 256 + .owner = THIS_MODULE, 257 + .open = dpaa2_aiop_console_open, 258 + .release = dpaa2_console_close, 259 + .read = dpaa2_console_read, 260 + }; 261 + 262 + static struct miscdevice dpaa2_aiop_console_dev = { 263 + .minor = MISC_DYNAMIC_MINOR, 264 + .name = "dpaa2_aiop_console", 265 + .fops = &dpaa2_aiop_console_fops 266 + }; 267 + 268 + static int dpaa2_console_probe(struct platform_device *pdev) 269 + { 270 + int error; 271 + 272 + error = of_address_to_resource(pdev->dev.of_node, 0, &mc_base_addr); 273 + if (error < 0) { 274 + pr_err("of_address_to_resource() failed for %pOF with %d\n", 275 + pdev->dev.of_node, error); 276 + return error; 277 + } 278 + 279 + error = misc_register(&dpaa2_mc_console_dev); 280 + if (error) { 281 + pr_err("cannot register device %s\n", 282 + dpaa2_mc_console_dev.name); 283 + goto err_register_mc; 284 + } 285 + 286 + error = misc_register(&dpaa2_aiop_console_dev); 287 + if (error) { 288 + pr_err("cannot register device %s\n", 289 + dpaa2_aiop_console_dev.name); 290 + goto err_register_aiop; 291 + } 292 + 293 + return 0; 294 + 295 + err_register_aiop: 296 + misc_deregister(&dpaa2_mc_console_dev); 297 + err_register_mc: 298 + return error; 299 + } 300 + 301 + static int dpaa2_console_remove(struct platform_device *pdev) 302 + { 303 + misc_deregister(&dpaa2_mc_console_dev); 304 + misc_deregister(&dpaa2_aiop_console_dev); 305 + 306 + return 0; 307 + } 308 + 309 + static const struct of_device_id dpaa2_console_match_table[] = { 310 + { .compatible = "fsl,dpaa2-console",}, 311 + {}, 312 + }; 313 + 314 + MODULE_DEVICE_TABLE(of, dpaa2_console_match_table); 315 + 316 + static struct platform_driver dpaa2_console_driver = { 317 + .driver = { 318 + .name = "dpaa2-console", 319 + .pm = NULL, 320 + .of_match_table = dpaa2_console_match_table, 321 + }, 322 + .probe = dpaa2_console_probe, 323 + .remove = dpaa2_console_remove, 324 + }; 325 + module_platform_driver(dpaa2_console_driver); 326 + 327 + MODULE_LICENSE("Dual BSD/GPL"); 328 + MODULE_AUTHOR("Roy Pledge <roy.pledge@nxp.com>"); 329 + MODULE_DESCRIPTION("DPAA2 console driver");
+16 -7
drivers/soc/fsl/dpio/dpio-driver.c
··· 197 197 desc.cpu); 198 198 } 199 199 200 - /* 201 - * Set the CENA regs to be the cache inhibited area of the portal to 202 - * avoid coherency issues if a user migrates to another core. 203 - */ 204 - desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start, 205 - resource_size(&dpio_dev->regions[1]), 206 - MEMREMAP_WC); 200 + if (dpio_dev->obj_desc.region_count < 3) { 201 + /* No support for DDR backed portals, use classic mapping */ 202 + /* 203 + * Set the CENA regs to be the cache inhibited area of the 204 + * portal to avoid coherency issues if a user migrates to 205 + * another core. 206 + */ 207 + desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start, 208 + resource_size(&dpio_dev->regions[1]), 209 + MEMREMAP_WC); 210 + } else { 211 + desc.regs_cena = devm_memremap(dev, dpio_dev->regions[2].start, 212 + resource_size(&dpio_dev->regions[2]), 213 + MEMREMAP_WB); 214 + } 215 + 207 216 if (IS_ERR(desc.regs_cena)) { 208 217 dev_err(dev, "devm_memremap failed\n"); 209 218 err = PTR_ERR(desc.regs_cena);
+123 -25
drivers/soc/fsl/dpio/qbman-portal.c
··· 15 15 #define QMAN_REV_4000 0x04000000 16 16 #define QMAN_REV_4100 0x04010000 17 17 #define QMAN_REV_4101 0x04010001 18 + #define QMAN_REV_5000 0x05000000 19 + 18 20 #define QMAN_REV_MASK 0xffff0000 19 21 20 22 /* All QBMan command and result structures use this "valid bit" encoding */ ··· 27 25 #define QBMAN_WQCHAN_CONFIGURE 0x46 28 26 29 27 /* CINH register offsets */ 28 + #define QBMAN_CINH_SWP_EQCR_PI 0x800 30 29 #define QBMAN_CINH_SWP_EQAR 0x8c0 30 + #define QBMAN_CINH_SWP_CR_RT 0x900 31 + #define QBMAN_CINH_SWP_VDQCR_RT 0x940 32 + #define QBMAN_CINH_SWP_EQCR_AM_RT 0x980 33 + #define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0 31 34 #define QBMAN_CINH_SWP_DQPI 0xa00 32 35 #define QBMAN_CINH_SWP_DCAP 0xac0 33 36 #define QBMAN_CINH_SWP_SDQCR 0xb00 37 + #define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40 38 + #define QBMAN_CINH_SWP_RCR_PI 0xc00 34 39 #define QBMAN_CINH_SWP_RAR 0xcc0 35 40 #define QBMAN_CINH_SWP_ISR 0xe00 36 41 #define QBMAN_CINH_SWP_IER 0xe40 ··· 51 42 #define QBMAN_CENA_SWP_CR 0x600 52 43 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1)) 53 44 #define QBMAN_CENA_SWP_VDQCR 0x780 45 + 46 + /* CENA register offsets in memory-backed mode */ 47 + #define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6)) 48 + #define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6)) 49 + #define QBMAN_CENA_SWP_CR_MEM 0x1600 50 + #define QBMAN_CENA_SWP_RR_MEM 0x1680 51 + #define QBMAN_CENA_SWP_VDQCR_MEM 0x1780 54 52 55 53 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */ 56 54 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6) ··· 112 96 113 97 #define SWP_CFG_DQRR_MF_SHIFT 20 114 98 #define SWP_CFG_EST_SHIFT 16 99 + #define SWP_CFG_CPBS_SHIFT 15 115 100 #define SWP_CFG_WN_SHIFT 14 116 101 #define SWP_CFG_RPM_SHIFT 12 117 102 #define SWP_CFG_DCM_SHIFT 10 118 103 #define SWP_CFG_EPM_SHIFT 8 104 + #define SWP_CFG_VPM_SHIFT 7 105 + #define SWP_CFG_CPM_SHIFT 6 119 106 #define SWP_CFG_SD_SHIFT 5 120 107 #define SWP_CFG_SP_SHIFT 4 121 108 #define SWP_CFG_SE_SHIFT 3 ··· 144 125 ep << SWP_CFG_EP_SHIFT); 145 126 } 146 127 128 + #define QMAN_RT_MODE 0x00000100 129 + 147 130 /** 148 131 * qbman_swp_init() - Create a functional object representing the given 149 132 * QBMan portal descriptor. ··· 167 146 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT; 168 147 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT; 169 148 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT; 149 + if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) 150 + p->mr.valid_bit = QB_VALID_BIT; 170 151 171 152 atomic_set(&p->vdq.available, 1); 172 153 p->vdq.valid_bit = QB_VALID_BIT; ··· 186 163 p->addr_cena = d->cena_bar; 187 164 p->addr_cinh = d->cinh_bar; 188 165 166 + if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) 167 + memset(p->addr_cena, 0, 64 * 1024); 168 + 189 169 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size, 190 170 1, /* Writes Non-cacheable */ 191 171 0, /* EQCR_CI stashing threshold */ ··· 201 175 1, /* dequeue stashing priority == TRUE */ 202 176 0, /* dequeue stashing enable == FALSE */ 203 177 0); /* EQCR_CI stashing priority == FALSE */ 178 + if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) 179 + reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */ 180 + 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */ 181 + 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */ 204 182 205 183 qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg); 206 184 reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG); ··· 214 184 return NULL; 215 185 } 216 186 187 + if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) { 188 + qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE); 189 + qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE); 190 + } 217 191 /* 218 192 * SDQCR needs to be initialized to 0 when no channels are 219 193 * being dequeued from or else the QMan HW will indicate an ··· 312 278 */ 313 279 void *qbman_swp_mc_start(struct qbman_swp *p) 314 280 { 315 - return qbman_get_cmd(p, QBMAN_CENA_SWP_CR); 281 + if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) 282 + return qbman_get_cmd(p, QBMAN_CENA_SWP_CR); 283 + else 284 + return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM); 316 285 } 317 286 318 287 /* ··· 326 289 { 327 290 u8 *v = cmd; 328 291 329 - dma_wmb(); 330 - *v = cmd_verb | p->mc.valid_bit; 292 + if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { 293 + dma_wmb(); 294 + *v = cmd_verb | p->mc.valid_bit; 295 + } else { 296 + *v = cmd_verb | p->mc.valid_bit; 297 + dma_wmb(); 298 + qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE); 299 + } 331 300 } 332 301 333 302 /* ··· 344 301 { 345 302 u32 *ret, verb; 346 303 347 - ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); 304 + if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { 305 + ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); 306 + /* Remove the valid-bit - command completed if the rest 307 + * is non-zero. 308 + */ 309 + verb = ret[0] & ~QB_VALID_BIT; 310 + if (!verb) 311 + return NULL; 312 + p->mc.valid_bit ^= QB_VALID_BIT; 313 + } else { 314 + ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM); 315 + /* Command completed if the valid bit is toggled */ 316 + if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT)) 317 + return NULL; 318 + /* Command completed if the rest is non-zero */ 319 + verb = ret[0] & ~QB_VALID_BIT; 320 + if (!verb) 321 + return NULL; 322 + p->mr.valid_bit ^= QB_VALID_BIT; 323 + } 348 324 349 - /* Remove the valid-bit - command completed if the rest is non-zero */ 350 - verb = ret[0] & ~QB_VALID_BIT; 351 - if (!verb) 352 - return NULL; 353 - p->mc.valid_bit ^= QB_VALID_BIT; 354 325 return ret; 355 326 } 356 327 ··· 441 384 #define EQAR_VB(eqar) ((eqar) & 0x80) 442 385 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100) 443 386 387 + static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p, 388 + u8 idx) 389 + { 390 + if (idx < 16) 391 + qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4, 392 + QMAN_RT_MODE); 393 + else 394 + qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT2 + 395 + (idx - 16) * 4, 396 + QMAN_RT_MODE); 397 + } 398 + 444 399 /** 445 400 * qbman_swp_enqueue() - Issue an enqueue command 446 401 * @s: the software portal used for enqueue ··· 477 408 memcpy(&p->dca, &d->dca, 31); 478 409 memcpy(&p->fd, fd, sizeof(*fd)); 479 410 480 - /* Set the verb byte, have to substitute in the valid-bit */ 481 - dma_wmb(); 482 - p->verb = d->verb | EQAR_VB(eqar); 411 + if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { 412 + /* Set the verb byte, have to substitute in the valid-bit */ 413 + dma_wmb(); 414 + p->verb = d->verb | EQAR_VB(eqar); 415 + } else { 416 + p->verb = d->verb | EQAR_VB(eqar); 417 + dma_wmb(); 418 + qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar)); 419 + } 483 420 484 421 return 0; 485 422 } ··· 662 587 return -EBUSY; 663 588 } 664 589 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt; 665 - p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR); 590 + if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) 591 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR); 592 + else 593 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM); 666 594 p->numf = d->numf; 667 595 p->tok = QMAN_DQ_TOKEN_VALID; 668 596 p->dq_src = d->dq_src; 669 597 p->rsp_addr = d->rsp_addr; 670 598 p->rsp_addr_virt = d->rsp_addr_virt; 671 - dma_wmb(); 672 599 673 - /* Set the verb byte, have to substitute in the valid-bit */ 674 - p->verb = d->verb | s->vdq.valid_bit; 675 - s->vdq.valid_bit ^= QB_VALID_BIT; 600 + if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { 601 + dma_wmb(); 602 + /* Set the verb byte, have to substitute in the valid-bit */ 603 + p->verb = d->verb | s->vdq.valid_bit; 604 + s->vdq.valid_bit ^= QB_VALID_BIT; 605 + } else { 606 + p->verb = d->verb | s->vdq.valid_bit; 607 + s->vdq.valid_bit ^= QB_VALID_BIT; 608 + dma_wmb(); 609 + qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE); 610 + } 676 611 677 612 return 0; 678 613 } ··· 740 655 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 741 656 } 742 657 743 - p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); 658 + if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) 659 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); 660 + else 661 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx)); 744 662 verb = p->dq.verb; 745 663 746 664 /* ··· 895 807 return -EBUSY; 896 808 897 809 /* Start the release command */ 898 - p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); 810 + if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) 811 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); 812 + else 813 + p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar))); 899 814 /* Copy the caller's buffer pointers to the command */ 900 815 for (i = 0; i < num_buffers; i++) 901 816 p->buf[i] = cpu_to_le64(buffers[i]); 902 817 p->bpid = d->bpid; 903 818 904 - /* 905 - * Set the verb byte, have to substitute in the valid-bit and the number 906 - * of buffers. 907 - */ 908 - dma_wmb(); 909 - p->verb = d->verb | RAR_VB(rar) | num_buffers; 819 + if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) { 820 + /* 821 + * Set the verb byte, have to substitute in the valid-bit 822 + * and the number of buffers. 823 + */ 824 + dma_wmb(); 825 + p->verb = d->verb | RAR_VB(rar) | num_buffers; 826 + } else { 827 + p->verb = d->verb | RAR_VB(rar) | num_buffers; 828 + dma_wmb(); 829 + qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT + 830 + RAR_IDX(rar) * 4, QMAN_RT_MODE); 831 + } 910 832 911 833 return 0; 912 834 }
+7 -2
drivers/soc/fsl/dpio/qbman-portal.h
··· 1 1 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ 2 2 /* 3 3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. 4 - * Copyright 2016 NXP 4 + * Copyright 2016-2019 NXP 5 5 * 6 6 */ 7 7 #ifndef __FSL_QBMAN_PORTAL_H ··· 109 109 struct { 110 110 u32 valid_bit; /* 0x00 or 0x80 */ 111 111 } mc; 112 + 113 + /* Management response */ 114 + struct { 115 + u32 valid_bit; /* 0x00 or 0x80 */ 116 + } mr; 112 117 113 118 /* Push dequeues */ 114 119 u32 sdq; ··· 433 428 static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd, 434 429 u8 cmd_verb) 435 430 { 436 - int loopvar = 1000; 431 + int loopvar = 2000; 437 432 438 433 qbman_swp_mc_submit(swp, cmd, cmd_verb); 439 434
+6
drivers/soc/fsl/guts.c
··· 97 97 .svr = 0x87000000, 98 98 .mask = 0xfff70000, 99 99 }, 100 + /* Die: LX2160A, SoC: LX2160A/LX2120A/LX2080A */ 101 + { .die = "LX2160A", 102 + .svr = 0x87360000, 103 + .mask = 0xff3f0000, 104 + }, 100 105 { }, 101 106 }; 102 107 ··· 223 218 { .compatible = "fsl,ls1088a-dcfg", }, 224 219 { .compatible = "fsl,ls1012a-dcfg", }, 225 220 { .compatible = "fsl,ls1046a-dcfg", }, 221 + { .compatible = "fsl,lx2160a-dcfg", }, 226 222 {} 227 223 }; 228 224 MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
+16 -4
drivers/soc/fsl/qbman/bman_portal.c
··· 32 32 33 33 static struct bman_portal *affine_bportals[NR_CPUS]; 34 34 static struct cpumask portal_cpus; 35 + static int __bman_portals_probed; 35 36 /* protect bman global registers and global data shared among portals */ 36 37 static DEFINE_SPINLOCK(bman_lock); 37 38 ··· 88 87 return 0; 89 88 } 90 89 90 + int bman_portals_probed(void) 91 + { 92 + return __bman_portals_probed; 93 + } 94 + EXPORT_SYMBOL_GPL(bman_portals_probed); 95 + 91 96 static int bman_portal_probe(struct platform_device *pdev) 92 97 { 93 98 struct device *dev = &pdev->dev; ··· 111 104 } 112 105 113 106 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); 114 - if (!pcfg) 107 + if (!pcfg) { 108 + __bman_portals_probed = -1; 115 109 return -ENOMEM; 110 + } 116 111 117 112 pcfg->dev = dev; 118 113 ··· 122 113 DPAA_PORTAL_CE); 123 114 if (!addr_phys[0]) { 124 115 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node); 125 - return -ENXIO; 116 + goto err_ioremap1; 126 117 } 127 118 128 119 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, 129 120 DPAA_PORTAL_CI); 130 121 if (!addr_phys[1]) { 131 122 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node); 132 - return -ENXIO; 123 + goto err_ioremap1; 133 124 } 134 125 135 126 pcfg->cpu = -1; ··· 137 128 irq = platform_get_irq(pdev, 0); 138 129 if (irq <= 0) { 139 130 dev_err(dev, "Can't get %pOF IRQ'\n", node); 140 - return -ENXIO; 131 + goto err_ioremap1; 141 132 } 142 133 pcfg->irq = irq; 143 134 ··· 159 150 spin_lock(&bman_lock); 160 151 cpu = cpumask_next_zero(-1, &portal_cpus); 161 152 if (cpu >= nr_cpu_ids) { 153 + __bman_portals_probed = 1; 162 154 /* unassigned portal, skip init */ 163 155 spin_unlock(&bman_lock); 164 156 return 0; ··· 185 175 err_ioremap2: 186 176 memunmap(pcfg->addr_virt_ce); 187 177 err_ioremap1: 178 + __bman_portals_probed = -1; 179 + 188 180 return -ENXIO; 189 181 } 190 182
+1 -1
drivers/soc/fsl/qbman/qman_ccsr.c
··· 596 596 } 597 597 598 598 #define LIO_CFG_LIODN_MASK 0x0fff0000 599 - void qman_liodn_fixup(u16 channel) 599 + void __qman_liodn_fixup(u16 channel) 600 600 { 601 601 static int done; 602 602 static u32 liodn_offset;
+17 -4
drivers/soc/fsl/qbman/qman_portal.c
··· 38 38 #define CONFIG_FSL_DPA_PIRQ_FAST 1 39 39 40 40 static struct cpumask portal_cpus; 41 + static int __qman_portals_probed; 41 42 /* protect qman global registers and global data shared among portals */ 42 43 static DEFINE_SPINLOCK(qman_lock); 43 44 ··· 221 220 return 0; 222 221 } 223 222 223 + int qman_portals_probed(void) 224 + { 225 + return __qman_portals_probed; 226 + } 227 + EXPORT_SYMBOL_GPL(qman_portals_probed); 228 + 224 229 static int qman_portal_probe(struct platform_device *pdev) 225 230 { 226 231 struct device *dev = &pdev->dev; ··· 245 238 } 246 239 247 240 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); 248 - if (!pcfg) 241 + if (!pcfg) { 242 + __qman_portals_probed = -1; 249 243 return -ENOMEM; 244 + } 250 245 251 246 pcfg->dev = dev; 252 247 ··· 256 247 DPAA_PORTAL_CE); 257 248 if (!addr_phys[0]) { 258 249 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node); 259 - return -ENXIO; 250 + goto err_ioremap1; 260 251 } 261 252 262 253 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, 263 254 DPAA_PORTAL_CI); 264 255 if (!addr_phys[1]) { 265 256 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node); 266 - return -ENXIO; 257 + goto err_ioremap1; 267 258 } 268 259 269 260 err = of_property_read_u32(node, "cell-index", &val); 270 261 if (err) { 271 262 dev_err(dev, "Can't get %pOF property 'cell-index'\n", node); 263 + __qman_portals_probed = -1; 272 264 return err; 273 265 } 274 266 pcfg->channel = val; ··· 277 267 irq = platform_get_irq(pdev, 0); 278 268 if (irq <= 0) { 279 269 dev_err(dev, "Can't get %pOF IRQ\n", node); 280 - return -ENXIO; 270 + goto err_ioremap1; 281 271 } 282 272 pcfg->irq = irq; 283 273 ··· 301 291 spin_lock(&qman_lock); 302 292 cpu = cpumask_next_zero(-1, &portal_cpus); 303 293 if (cpu >= nr_cpu_ids) { 294 + __qman_portals_probed = 1; 304 295 /* unassigned portal, skip init */ 305 296 spin_unlock(&qman_lock); 306 297 return 0; ··· 332 321 err_ioremap2: 333 322 memunmap(pcfg->addr_virt_ce); 334 323 err_ioremap1: 324 + __qman_portals_probed = -1; 325 + 335 326 return -ENXIO; 336 327 } 337 328
+8 -1
drivers/soc/fsl/qbman/qman_priv.h
··· 193 193 u32 qm_get_pools_sdqcr(void); 194 194 195 195 int qman_wq_alloc(void); 196 - void qman_liodn_fixup(u16 channel); 196 + #ifdef CONFIG_FSL_PAMU 197 + #define qman_liodn_fixup __qman_liodn_fixup 198 + #else 199 + static inline void qman_liodn_fixup(u16 channel) 200 + { 201 + } 202 + #endif 203 + void __qman_liodn_fixup(u16 channel); 197 204 void qman_set_sdest(u16 channel, unsigned int cpu_idx); 198 205 199 206 struct qman_portal *qman_create_affine_portal(
+8
include/soc/fsl/bman.h
··· 133 133 * failed to probe or 0 if the bman driver did not probed yet. 134 134 */ 135 135 int bman_is_probed(void); 136 + /** 137 + * bman_portals_probed - Check if all cpu bound bman portals are probed 138 + * 139 + * Returns 1 if all the required cpu bound bman portals successfully probed, 140 + * -1 if probe errors appeared or 0 if the bman portals did not yet finished 141 + * probing. 142 + */ 143 + int bman_portals_probed(void); 136 144 137 145 #endif /* __FSL_BMAN_H */
+9
include/soc/fsl/qman.h
··· 1195 1195 int qman_is_probed(void); 1196 1196 1197 1197 /** 1198 + * qman_portals_probed - Check if all cpu bound qman portals are probed 1199 + * 1200 + * Returns 1 if all the required cpu bound qman portals successfully probed, 1201 + * -1 if probe errors appeared or 0 if the qman portals did not yet finished 1202 + * probing. 1203 + */ 1204 + int qman_portals_probed(void); 1205 + 1206 + /** 1198 1207 * qman_dqrr_get_ithresh - Get coalesce interrupt threshold 1199 1208 * @portal: portal to get the value for 1200 1209 * @ithresh: threshold pointer