Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

soc/fsl: Introduce DPAA 1.x BMan device driver

This driver enables the Freescale DPAA 1.x Buffer Manager block.
BMan is a hardware accelerator that manages buffer pools. It allows
CPUs and other accelerators connected to the SoC datapath to acquire
and release buffers during data processing.

Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: Scott Wood <oss@buserror.net>

authored by

Claudiu Manoil and committed by
Scott Wood
1f9c0a77 36eb1542

+1621
+28
drivers/soc/fsl/qbman/Kconfig
··· 1 + menuconfig FSL_DPAA 2 + bool "Freescale DPAA 1.x support" 3 + depends on FSL_SOC_BOOKE 4 + select GENERIC_ALLOCATOR 5 + help 6 + The Freescale Data Path Acceleration Architecture (DPAA) is a set of 7 + hardware components on specific QorIQ multicore processors. 8 + This architecture provides the infrastructure to support simplified 9 + sharing of networking interfaces and accelerators by multiple CPUs. 10 + The major h/w blocks composing DPAA are BMan and QMan. 11 + 12 + The Buffer Manager (BMan) is a hardware buffer pool management block 13 + that allows software and accelerators on the datapath to acquire and 14 + release buffers in order to build frames. 15 + 16 + The Queue Manager (QMan) is a hardware queue management block 17 + that allows software and accelerators on the datapath to enqueue and 18 + dequeue frames in order to communicate. 19 + 20 + if FSL_DPAA 21 + 22 + config FSL_DPAA_CHECKING 23 + bool "Additional driver checking" 24 + help 25 + Compiles in additional checks, to sanity-check the drivers and 26 + any use of the exported API. Not recommended for performance. 27 + 28 + endif # FSL_DPAA
+2
drivers/soc/fsl/qbman/Makefile
··· 1 + obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o bman_portal.o \ 2 + bman.o
+797
drivers/soc/fsl/qbman/bman.c
··· 1 + /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. 2 + * 3 + * Redistribution and use in source and binary forms, with or without 4 + * modification, are permitted provided that the following conditions are met: 5 + * * Redistributions of source code must retain the above copyright 6 + * notice, this list of conditions and the following disclaimer. 7 + * * Redistributions in binary form must reproduce the above copyright 8 + * notice, this list of conditions and the following disclaimer in the 9 + * documentation and/or other materials provided with the distribution. 10 + * * Neither the name of Freescale Semiconductor nor the 11 + * names of its contributors may be used to endorse or promote products 12 + * derived from this software without specific prior written permission. 13 + * 14 + * ALTERNATIVELY, this software may be distributed under the terms of the 15 + * GNU General Public License ("GPL") as published by the Free Software 16 + * Foundation, either version 2 of that License or (at your option) any 17 + * later version. 18 + * 19 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 + */ 30 + 31 + #include "bman_priv.h" 32 + 33 + #define IRQNAME "BMan portal %d" 34 + #define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */ 35 + 36 + /* Portal register assists */ 37 + 38 + /* Cache-inhibited register offsets */ 39 + #define BM_REG_RCR_PI_CINH 0x0000 40 + #define BM_REG_RCR_CI_CINH 0x0004 41 + #define BM_REG_RCR_ITR 0x0008 42 + #define BM_REG_CFG 0x0100 43 + #define BM_REG_SCN(n) (0x0200 + ((n) << 2)) 44 + #define BM_REG_ISR 0x0e00 45 + #define BM_REG_IER 0x0e04 46 + #define BM_REG_ISDR 0x0e08 47 + #define BM_REG_IIR 0x0e0c 48 + 49 + /* Cache-enabled register offsets */ 50 + #define BM_CL_CR 0x0000 51 + #define BM_CL_RR0 0x0100 52 + #define BM_CL_RR1 0x0140 53 + #define BM_CL_RCR 0x1000 54 + #define BM_CL_RCR_PI_CENA 0x3000 55 + #define BM_CL_RCR_CI_CENA 0x3100 56 + 57 + /* 58 + * Portal modes. 59 + * Enum types; 60 + * pmode == production mode 61 + * cmode == consumption mode, 62 + * Enum values use 3 letter codes. First letter matches the portal mode, 63 + * remaining two letters indicate; 64 + * ci == cache-inhibited portal register 65 + * ce == cache-enabled portal register 66 + * vb == in-band valid-bit (cache-enabled) 67 + */ 68 + enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */ 69 + bm_rcr_pci = 0, /* PI index, cache-inhibited */ 70 + bm_rcr_pce = 1, /* PI index, cache-enabled */ 71 + bm_rcr_pvb = 2 /* valid-bit */ 72 + }; 73 + enum bm_rcr_cmode { /* s/w-only */ 74 + bm_rcr_cci, /* CI index, cache-inhibited */ 75 + bm_rcr_cce /* CI index, cache-enabled */ 76 + }; 77 + 78 + 79 + /* --- Portal structures --- */ 80 + 81 + #define BM_RCR_SIZE 8 82 + 83 + /* Release Command */ 84 + struct bm_rcr_entry { 85 + union { 86 + struct { 87 + u8 _ncw_verb; /* writes to this are non-coherent */ 88 + u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */ 89 + u8 __reserved1[62]; 90 + }; 91 + struct bm_buffer bufs[8]; 92 + }; 93 + }; 94 + #define BM_RCR_VERB_VBIT 0x80 95 + #define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */ 96 + #define BM_RCR_VERB_CMD_BPID_SINGLE 0x20 97 + #define BM_RCR_VERB_CMD_BPID_MULTI 0x30 98 + #define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */ 99 + 100 + struct bm_rcr { 101 + struct bm_rcr_entry *ring, *cursor; 102 + u8 ci, available, ithresh, vbit; 103 + #ifdef CONFIG_FSL_DPAA_CHECKING 104 + u32 busy; 105 + enum bm_rcr_pmode pmode; 106 + enum bm_rcr_cmode cmode; 107 + #endif 108 + }; 109 + 110 + /* MC (Management Command) command */ 111 + struct bm_mc_command { 112 + u8 _ncw_verb; /* writes to this are non-coherent */ 113 + u8 bpid; /* used by acquire command */ 114 + u8 __reserved[62]; 115 + }; 116 + #define BM_MCC_VERB_VBIT 0x80 117 + #define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */ 118 + #define BM_MCC_VERB_CMD_ACQUIRE 0x10 119 + #define BM_MCC_VERB_CMD_QUERY 0x40 120 + #define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */ 121 + 122 + /* MC result, Acquire and Query Response */ 123 + union bm_mc_result { 124 + struct { 125 + u8 verb; 126 + u8 bpid; 127 + u8 __reserved[62]; 128 + }; 129 + struct bm_buffer bufs[8]; 130 + }; 131 + #define BM_MCR_VERB_VBIT 0x80 132 + #define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK 133 + #define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE 134 + #define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY 135 + #define BM_MCR_VERB_CMD_ERR_INVALID 0x60 136 + #define BM_MCR_VERB_CMD_ERR_ECC 0x70 137 + #define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */ 138 + #define BM_MCR_TIMEOUT 10000 /* us */ 139 + 140 + struct bm_mc { 141 + struct bm_mc_command *cr; 142 + union bm_mc_result *rr; 143 + u8 rridx, vbit; 144 + #ifdef CONFIG_FSL_DPAA_CHECKING 145 + enum { 146 + /* Can only be _mc_start()ed */ 147 + mc_idle, 148 + /* Can only be _mc_commit()ed or _mc_abort()ed */ 149 + mc_user, 150 + /* Can only be _mc_retry()ed */ 151 + mc_hw 152 + } state; 153 + #endif 154 + }; 155 + 156 + struct bm_addr { 157 + void __iomem *ce; /* cache-enabled */ 158 + void __iomem *ci; /* cache-inhibited */ 159 + }; 160 + 161 + struct bm_portal { 162 + struct bm_addr addr; 163 + struct bm_rcr rcr; 164 + struct bm_mc mc; 165 + } ____cacheline_aligned; 166 + 167 + /* Cache-inhibited register access. */ 168 + static inline u32 bm_in(struct bm_portal *p, u32 offset) 169 + { 170 + return __raw_readl(p->addr.ci + offset); 171 + } 172 + 173 + static inline void bm_out(struct bm_portal *p, u32 offset, u32 val) 174 + { 175 + __raw_writel(val, p->addr.ci + offset); 176 + } 177 + 178 + /* Cache Enabled Portal Access */ 179 + static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset) 180 + { 181 + dpaa_invalidate(p->addr.ce + offset); 182 + } 183 + 184 + static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset) 185 + { 186 + dpaa_touch_ro(p->addr.ce + offset); 187 + } 188 + 189 + static inline u32 bm_ce_in(struct bm_portal *p, u32 offset) 190 + { 191 + return __raw_readl(p->addr.ce + offset); 192 + } 193 + 194 + struct bman_portal { 195 + struct bm_portal p; 196 + /* interrupt sources processed by portal_isr(), configurable */ 197 + unsigned long irq_sources; 198 + /* probing time config params for cpu-affine portals */ 199 + const struct bm_portal_config *config; 200 + char irqname[MAX_IRQNAME]; 201 + }; 202 + 203 + static cpumask_t affine_mask; 204 + static DEFINE_SPINLOCK(affine_mask_lock); 205 + static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal); 206 + 207 + static inline struct bman_portal *get_affine_portal(void) 208 + { 209 + return &get_cpu_var(bman_affine_portal); 210 + } 211 + 212 + static inline void put_affine_portal(void) 213 + { 214 + put_cpu_var(bman_affine_portal); 215 + } 216 + 217 + /* 218 + * This object type refers to a pool, it isn't *the* pool. There may be 219 + * more than one such object per BMan buffer pool, eg. if different users of the 220 + * pool are operating via different portals. 221 + */ 222 + struct bman_pool { 223 + /* index of the buffer pool to encapsulate (0-63) */ 224 + u32 bpid; 225 + /* Used for hash-table admin when using depletion notifications. */ 226 + struct bman_portal *portal; 227 + struct bman_pool *next; 228 + }; 229 + 230 + static u32 poll_portal_slow(struct bman_portal *p, u32 is); 231 + 232 + static irqreturn_t portal_isr(int irq, void *ptr) 233 + { 234 + struct bman_portal *p = ptr; 235 + struct bm_portal *portal = &p->p; 236 + u32 clear = p->irq_sources; 237 + u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources; 238 + 239 + if (unlikely(!is)) 240 + return IRQ_NONE; 241 + 242 + clear |= poll_portal_slow(p, is); 243 + bm_out(portal, BM_REG_ISR, clear); 244 + return IRQ_HANDLED; 245 + } 246 + 247 + /* --- RCR API --- */ 248 + 249 + #define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry)) 250 + #define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT) 251 + 252 + /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ 253 + static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p) 254 + { 255 + uintptr_t addr = (uintptr_t)p; 256 + 257 + addr &= ~RCR_CARRY; 258 + 259 + return (struct bm_rcr_entry *)addr; 260 + } 261 + 262 + #ifdef CONFIG_FSL_DPAA_CHECKING 263 + /* Bit-wise logic to convert a ring pointer to a ring index */ 264 + static int rcr_ptr2idx(struct bm_rcr_entry *e) 265 + { 266 + return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1); 267 + } 268 + #endif 269 + 270 + /* Increment the 'cursor' ring pointer, taking 'vbit' into account */ 271 + static inline void rcr_inc(struct bm_rcr *rcr) 272 + { 273 + /* increment to the next RCR pointer and handle overflow and 'vbit' */ 274 + struct bm_rcr_entry *partial = rcr->cursor + 1; 275 + 276 + rcr->cursor = rcr_carryclear(partial); 277 + if (partial != rcr->cursor) 278 + rcr->vbit ^= BM_RCR_VERB_VBIT; 279 + } 280 + 281 + static int bm_rcr_get_avail(struct bm_portal *portal) 282 + { 283 + struct bm_rcr *rcr = &portal->rcr; 284 + 285 + return rcr->available; 286 + } 287 + 288 + static int bm_rcr_get_fill(struct bm_portal *portal) 289 + { 290 + struct bm_rcr *rcr = &portal->rcr; 291 + 292 + return BM_RCR_SIZE - 1 - rcr->available; 293 + } 294 + 295 + static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh) 296 + { 297 + struct bm_rcr *rcr = &portal->rcr; 298 + 299 + rcr->ithresh = ithresh; 300 + bm_out(portal, BM_REG_RCR_ITR, ithresh); 301 + } 302 + 303 + static void bm_rcr_cce_prefetch(struct bm_portal *portal) 304 + { 305 + __maybe_unused struct bm_rcr *rcr = &portal->rcr; 306 + 307 + DPAA_ASSERT(rcr->cmode == bm_rcr_cce); 308 + bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA); 309 + } 310 + 311 + static u8 bm_rcr_cce_update(struct bm_portal *portal) 312 + { 313 + struct bm_rcr *rcr = &portal->rcr; 314 + u8 diff, old_ci = rcr->ci; 315 + 316 + DPAA_ASSERT(rcr->cmode == bm_rcr_cce); 317 + rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1); 318 + bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA); 319 + diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci); 320 + rcr->available += diff; 321 + return diff; 322 + } 323 + 324 + static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal) 325 + { 326 + struct bm_rcr *rcr = &portal->rcr; 327 + 328 + DPAA_ASSERT(!rcr->busy); 329 + if (!rcr->available) 330 + return NULL; 331 + #ifdef CONFIG_FSL_DPAA_CHECKING 332 + rcr->busy = 1; 333 + #endif 334 + dpaa_zero(rcr->cursor); 335 + return rcr->cursor; 336 + } 337 + 338 + static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb) 339 + { 340 + struct bm_rcr *rcr = &portal->rcr; 341 + struct bm_rcr_entry *rcursor; 342 + 343 + DPAA_ASSERT(rcr->busy); 344 + DPAA_ASSERT(rcr->pmode == bm_rcr_pvb); 345 + DPAA_ASSERT(rcr->available >= 1); 346 + dma_wmb(); 347 + rcursor = rcr->cursor; 348 + rcursor->_ncw_verb = myverb | rcr->vbit; 349 + dpaa_flush(rcursor); 350 + rcr_inc(rcr); 351 + rcr->available--; 352 + #ifdef CONFIG_FSL_DPAA_CHECKING 353 + rcr->busy = 0; 354 + #endif 355 + } 356 + 357 + static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode, 358 + enum bm_rcr_cmode cmode) 359 + { 360 + struct bm_rcr *rcr = &portal->rcr; 361 + u32 cfg; 362 + u8 pi; 363 + 364 + rcr->ring = portal->addr.ce + BM_CL_RCR; 365 + rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); 366 + pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); 367 + rcr->cursor = rcr->ring + pi; 368 + rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ? 369 + BM_RCR_VERB_VBIT : 0; 370 + rcr->available = BM_RCR_SIZE - 1 371 + - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi); 372 + rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR); 373 + #ifdef CONFIG_FSL_DPAA_CHECKING 374 + rcr->busy = 0; 375 + rcr->pmode = pmode; 376 + rcr->cmode = cmode; 377 + #endif 378 + cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0) 379 + | (pmode & 0x3); /* BCSP_CFG::RPM */ 380 + bm_out(portal, BM_REG_CFG, cfg); 381 + return 0; 382 + } 383 + 384 + static void bm_rcr_finish(struct bm_portal *portal) 385 + { 386 + #ifdef CONFIG_FSL_DPAA_CHECKING 387 + struct bm_rcr *rcr = &portal->rcr; 388 + int i; 389 + 390 + DPAA_ASSERT(!rcr->busy); 391 + 392 + i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); 393 + if (i != rcr_ptr2idx(rcr->cursor)) 394 + pr_crit("losing uncommited RCR entries\n"); 395 + 396 + i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); 397 + if (i != rcr->ci) 398 + pr_crit("missing existing RCR completions\n"); 399 + if (rcr->ci != rcr_ptr2idx(rcr->cursor)) 400 + pr_crit("RCR destroyed unquiesced\n"); 401 + #endif 402 + } 403 + 404 + /* --- Management command API --- */ 405 + static int bm_mc_init(struct bm_portal *portal) 406 + { 407 + struct bm_mc *mc = &portal->mc; 408 + 409 + mc->cr = portal->addr.ce + BM_CL_CR; 410 + mc->rr = portal->addr.ce + BM_CL_RR0; 411 + mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ? 412 + 0 : 1; 413 + mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0; 414 + #ifdef CONFIG_FSL_DPAA_CHECKING 415 + mc->state = mc_idle; 416 + #endif 417 + return 0; 418 + } 419 + 420 + static void bm_mc_finish(struct bm_portal *portal) 421 + { 422 + #ifdef CONFIG_FSL_DPAA_CHECKING 423 + struct bm_mc *mc = &portal->mc; 424 + 425 + DPAA_ASSERT(mc->state == mc_idle); 426 + if (mc->state != mc_idle) 427 + pr_crit("Losing incomplete MC command\n"); 428 + #endif 429 + } 430 + 431 + static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal) 432 + { 433 + struct bm_mc *mc = &portal->mc; 434 + 435 + DPAA_ASSERT(mc->state == mc_idle); 436 + #ifdef CONFIG_FSL_DPAA_CHECKING 437 + mc->state = mc_user; 438 + #endif 439 + dpaa_zero(mc->cr); 440 + return mc->cr; 441 + } 442 + 443 + static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb) 444 + { 445 + struct bm_mc *mc = &portal->mc; 446 + union bm_mc_result *rr = mc->rr + mc->rridx; 447 + 448 + DPAA_ASSERT(mc->state == mc_user); 449 + dma_wmb(); 450 + mc->cr->_ncw_verb = myverb | mc->vbit; 451 + dpaa_flush(mc->cr); 452 + dpaa_invalidate_touch_ro(rr); 453 + #ifdef CONFIG_FSL_DPAA_CHECKING 454 + mc->state = mc_hw; 455 + #endif 456 + } 457 + 458 + static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal) 459 + { 460 + struct bm_mc *mc = &portal->mc; 461 + union bm_mc_result *rr = mc->rr + mc->rridx; 462 + 463 + DPAA_ASSERT(mc->state == mc_hw); 464 + /* 465 + * The inactive response register's verb byte always returns zero until 466 + * its command is submitted and completed. This includes the valid-bit, 467 + * in case you were wondering... 468 + */ 469 + if (!__raw_readb(&rr->verb)) { 470 + dpaa_invalidate_touch_ro(rr); 471 + return NULL; 472 + } 473 + mc->rridx ^= 1; 474 + mc->vbit ^= BM_MCC_VERB_VBIT; 475 + #ifdef CONFIG_FSL_DPAA_CHECKING 476 + mc->state = mc_idle; 477 + #endif 478 + return rr; 479 + } 480 + 481 + static inline int bm_mc_result_timeout(struct bm_portal *portal, 482 + union bm_mc_result **mcr) 483 + { 484 + int timeout = BM_MCR_TIMEOUT; 485 + 486 + do { 487 + *mcr = bm_mc_result(portal); 488 + if (*mcr) 489 + break; 490 + udelay(1); 491 + } while (--timeout); 492 + 493 + return timeout; 494 + } 495 + 496 + /* Disable all BSCN interrupts for the portal */ 497 + static void bm_isr_bscn_disable(struct bm_portal *portal) 498 + { 499 + bm_out(portal, BM_REG_SCN(0), 0); 500 + bm_out(portal, BM_REG_SCN(1), 0); 501 + } 502 + 503 + static int bman_create_portal(struct bman_portal *portal, 504 + const struct bm_portal_config *c) 505 + { 506 + struct bm_portal *p; 507 + int ret; 508 + 509 + p = &portal->p; 510 + /* 511 + * prep the low-level portal struct with the mapped addresses from the 512 + * config, everything that follows depends on it and "config" is more 513 + * for (de)reference... 514 + */ 515 + p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; 516 + p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; 517 + if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) { 518 + dev_err(c->dev, "RCR initialisation failed\n"); 519 + goto fail_rcr; 520 + } 521 + if (bm_mc_init(p)) { 522 + dev_err(c->dev, "MC initialisation failed\n"); 523 + goto fail_mc; 524 + } 525 + /* 526 + * Default to all BPIDs disabled, we enable as required at 527 + * run-time. 528 + */ 529 + bm_isr_bscn_disable(p); 530 + 531 + /* Write-to-clear any stale interrupt status bits */ 532 + bm_out(p, BM_REG_ISDR, 0xffffffff); 533 + portal->irq_sources = 0; 534 + bm_out(p, BM_REG_IER, 0); 535 + bm_out(p, BM_REG_ISR, 0xffffffff); 536 + snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); 537 + if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { 538 + dev_err(c->dev, "request_irq() failed\n"); 539 + goto fail_irq; 540 + } 541 + if (c->cpu != -1 && irq_can_set_affinity(c->irq) && 542 + irq_set_affinity(c->irq, cpumask_of(c->cpu))) { 543 + dev_err(c->dev, "irq_set_affinity() failed\n"); 544 + goto fail_affinity; 545 + } 546 + 547 + /* Need RCR to be empty before continuing */ 548 + ret = bm_rcr_get_fill(p); 549 + if (ret) { 550 + dev_err(c->dev, "RCR unclean\n"); 551 + goto fail_rcr_empty; 552 + } 553 + /* Success */ 554 + portal->config = c; 555 + 556 + bm_out(p, BM_REG_ISDR, 0); 557 + bm_out(p, BM_REG_IIR, 0); 558 + 559 + return 0; 560 + 561 + fail_rcr_empty: 562 + fail_affinity: 563 + free_irq(c->irq, portal); 564 + fail_irq: 565 + bm_mc_finish(p); 566 + fail_mc: 567 + bm_rcr_finish(p); 568 + fail_rcr: 569 + return -EIO; 570 + } 571 + 572 + struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c) 573 + { 574 + struct bman_portal *portal; 575 + int err; 576 + 577 + portal = &per_cpu(bman_affine_portal, c->cpu); 578 + err = bman_create_portal(portal, c); 579 + if (err) 580 + return NULL; 581 + 582 + spin_lock(&affine_mask_lock); 583 + cpumask_set_cpu(c->cpu, &affine_mask); 584 + spin_unlock(&affine_mask_lock); 585 + 586 + return portal; 587 + } 588 + 589 + static u32 poll_portal_slow(struct bman_portal *p, u32 is) 590 + { 591 + u32 ret = is; 592 + 593 + if (is & BM_PIRQ_RCRI) { 594 + bm_rcr_cce_update(&p->p); 595 + bm_rcr_set_ithresh(&p->p, 0); 596 + bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI); 597 + is &= ~BM_PIRQ_RCRI; 598 + } 599 + 600 + /* There should be no status register bits left undefined */ 601 + DPAA_ASSERT(!is); 602 + return ret; 603 + } 604 + 605 + int bman_p_irqsource_add(struct bman_portal *p, u32 bits) 606 + { 607 + unsigned long irqflags; 608 + 609 + local_irq_save(irqflags); 610 + set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources); 611 + bm_out(&p->p, BM_REG_IER, p->irq_sources); 612 + local_irq_restore(irqflags); 613 + return 0; 614 + } 615 + 616 + static int bm_shutdown_pool(u32 bpid) 617 + { 618 + struct bm_mc_command *bm_cmd; 619 + union bm_mc_result *bm_res; 620 + 621 + while (1) { 622 + struct bman_portal *p = get_affine_portal(); 623 + /* Acquire buffers until empty */ 624 + bm_cmd = bm_mc_start(&p->p); 625 + bm_cmd->bpid = bpid; 626 + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1); 627 + if (!bm_mc_result_timeout(&p->p, &bm_res)) { 628 + put_affine_portal(); 629 + pr_crit("BMan Acquire Command timedout\n"); 630 + return -ETIMEDOUT; 631 + } 632 + if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) { 633 + put_affine_portal(); 634 + /* Pool is empty */ 635 + return 0; 636 + } 637 + put_affine_portal(); 638 + } 639 + 640 + return 0; 641 + } 642 + 643 + struct gen_pool *bm_bpalloc; 644 + 645 + static int bm_alloc_bpid_range(u32 *result, u32 count) 646 + { 647 + unsigned long addr; 648 + 649 + addr = gen_pool_alloc(bm_bpalloc, count); 650 + if (!addr) 651 + return -ENOMEM; 652 + 653 + *result = addr & ~DPAA_GENALLOC_OFF; 654 + 655 + return 0; 656 + } 657 + 658 + static int bm_release_bpid(u32 bpid) 659 + { 660 + int ret; 661 + 662 + ret = bm_shutdown_pool(bpid); 663 + if (ret) { 664 + pr_debug("BPID %d leaked\n", bpid); 665 + return ret; 666 + } 667 + 668 + gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1); 669 + return 0; 670 + } 671 + 672 + struct bman_pool *bman_new_pool(void) 673 + { 674 + struct bman_pool *pool = NULL; 675 + u32 bpid; 676 + 677 + if (bm_alloc_bpid_range(&bpid, 1)) 678 + return NULL; 679 + 680 + pool = kmalloc(sizeof(*pool), GFP_KERNEL); 681 + if (!pool) 682 + goto err; 683 + 684 + pool->bpid = bpid; 685 + 686 + return pool; 687 + err: 688 + bm_release_bpid(bpid); 689 + kfree(pool); 690 + return NULL; 691 + } 692 + EXPORT_SYMBOL(bman_new_pool); 693 + 694 + void bman_free_pool(struct bman_pool *pool) 695 + { 696 + bm_release_bpid(pool->bpid); 697 + 698 + kfree(pool); 699 + } 700 + EXPORT_SYMBOL(bman_free_pool); 701 + 702 + int bman_get_bpid(const struct bman_pool *pool) 703 + { 704 + return pool->bpid; 705 + } 706 + EXPORT_SYMBOL(bman_get_bpid); 707 + 708 + static void update_rcr_ci(struct bman_portal *p, int avail) 709 + { 710 + if (avail) 711 + bm_rcr_cce_prefetch(&p->p); 712 + else 713 + bm_rcr_cce_update(&p->p); 714 + } 715 + 716 + int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num) 717 + { 718 + struct bman_portal *p; 719 + struct bm_rcr_entry *r; 720 + unsigned long irqflags; 721 + int avail, timeout = 1000; /* 1ms */ 722 + int i = num - 1; 723 + 724 + DPAA_ASSERT(num > 0 && num <= 8); 725 + 726 + do { 727 + p = get_affine_portal(); 728 + local_irq_save(irqflags); 729 + avail = bm_rcr_get_avail(&p->p); 730 + if (avail < 2) 731 + update_rcr_ci(p, avail); 732 + r = bm_rcr_start(&p->p); 733 + local_irq_restore(irqflags); 734 + put_affine_portal(); 735 + if (likely(r)) 736 + break; 737 + 738 + udelay(1); 739 + } while (--timeout); 740 + 741 + if (unlikely(!timeout)) 742 + return -ETIMEDOUT; 743 + 744 + p = get_affine_portal(); 745 + local_irq_save(irqflags); 746 + /* 747 + * we can copy all but the first entry, as this can trigger badness 748 + * with the valid-bit 749 + */ 750 + bm_buffer_set64(r->bufs, bm_buffer_get64(bufs)); 751 + bm_buffer_set_bpid(r->bufs, pool->bpid); 752 + if (i) 753 + memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0])); 754 + 755 + bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE | 756 + (num & BM_RCR_VERB_BUFCOUNT_MASK)); 757 + 758 + local_irq_restore(irqflags); 759 + put_affine_portal(); 760 + return 0; 761 + } 762 + EXPORT_SYMBOL(bman_release); 763 + 764 + int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num) 765 + { 766 + struct bman_portal *p = get_affine_portal(); 767 + struct bm_mc_command *mcc; 768 + union bm_mc_result *mcr; 769 + int ret; 770 + 771 + DPAA_ASSERT(num > 0 && num <= 8); 772 + 773 + mcc = bm_mc_start(&p->p); 774 + mcc->bpid = pool->bpid; 775 + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 776 + (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT)); 777 + if (!bm_mc_result_timeout(&p->p, &mcr)) { 778 + put_affine_portal(); 779 + pr_crit("BMan Acquire Timeout\n"); 780 + return -ETIMEDOUT; 781 + } 782 + ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT; 783 + if (bufs) 784 + memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0])); 785 + 786 + put_affine_portal(); 787 + if (ret != num) 788 + ret = -ENOMEM; 789 + return ret; 790 + } 791 + EXPORT_SYMBOL(bman_acquire); 792 + 793 + const struct bm_portal_config * 794 + bman_get_bm_portal_config(const struct bman_portal *portal) 795 + { 796 + return portal->config; 797 + }
+263
drivers/soc/fsl/qbman/bman_ccsr.c
··· 1 + /* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc. 2 + * 3 + * Redistribution and use in source and binary forms, with or without 4 + * modification, are permitted provided that the following conditions are met: 5 + * * Redistributions of source code must retain the above copyright 6 + * notice, this list of conditions and the following disclaimer. 7 + * * Redistributions in binary form must reproduce the above copyright 8 + * notice, this list of conditions and the following disclaimer in the 9 + * documentation and/or other materials provided with the distribution. 10 + * * Neither the name of Freescale Semiconductor nor the 11 + * names of its contributors may be used to endorse or promote products 12 + * derived from this software without specific prior written permission. 13 + * 14 + * ALTERNATIVELY, this software may be distributed under the terms of the 15 + * GNU General Public License ("GPL") as published by the Free Software 16 + * Foundation, either version 2 of that License or (at your option) any 17 + * later version. 18 + * 19 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 + */ 30 + 31 + #include "bman_priv.h" 32 + 33 + u16 bman_ip_rev; 34 + EXPORT_SYMBOL(bman_ip_rev); 35 + 36 + /* Register offsets */ 37 + #define REG_FBPR_FPC 0x0800 38 + #define REG_ECSR 0x0a00 39 + #define REG_ECIR 0x0a04 40 + #define REG_EADR 0x0a08 41 + #define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) 42 + #define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) 43 + #define REG_IP_REV_1 0x0bf8 44 + #define REG_IP_REV_2 0x0bfc 45 + #define REG_FBPR_BARE 0x0c00 46 + #define REG_FBPR_BAR 0x0c04 47 + #define REG_FBPR_AR 0x0c10 48 + #define REG_SRCIDR 0x0d04 49 + #define REG_LIODNR 0x0d08 50 + #define REG_ERR_ISR 0x0e00 51 + #define REG_ERR_IER 0x0e04 52 + #define REG_ERR_ISDR 0x0e08 53 + 54 + /* Used by all error interrupt registers except 'inhibit' */ 55 + #define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */ 56 + #define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */ 57 + #define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */ 58 + #define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */ 59 + #define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */ 60 + 61 + struct bman_hwerr_txt { 62 + u32 mask; 63 + const char *txt; 64 + }; 65 + 66 + static const struct bman_hwerr_txt bman_hwerr_txts[] = { 67 + { BM_EIRQ_IVCI, "Invalid Command Verb" }, 68 + { BM_EIRQ_FLWI, "FBPR Low Watermark" }, 69 + { BM_EIRQ_MBEI, "Multi-bit ECC Error" }, 70 + { BM_EIRQ_SBEI, "Single-bit ECC Error" }, 71 + { BM_EIRQ_BSCN, "Pool State Change Notification" }, 72 + }; 73 + 74 + /* Only trigger low water mark interrupt once only */ 75 + #define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI 76 + 77 + /* Pointer to the start of the BMan's CCSR space */ 78 + static u32 __iomem *bm_ccsr_start; 79 + 80 + static inline u32 bm_ccsr_in(u32 offset) 81 + { 82 + return ioread32be(bm_ccsr_start + offset/4); 83 + } 84 + static inline void bm_ccsr_out(u32 offset, u32 val) 85 + { 86 + iowrite32be(val, bm_ccsr_start + offset/4); 87 + } 88 + 89 + static void bm_get_version(u16 *id, u8 *major, u8 *minor) 90 + { 91 + u32 v = bm_ccsr_in(REG_IP_REV_1); 92 + *id = (v >> 16); 93 + *major = (v >> 8) & 0xff; 94 + *minor = v & 0xff; 95 + } 96 + 97 + /* signal transactions for FBPRs with higher priority */ 98 + #define FBPR_AR_RPRIO_HI BIT(30) 99 + 100 + static void bm_set_memory(u64 ba, u32 size) 101 + { 102 + u32 exp = ilog2(size); 103 + /* choke if size isn't within range */ 104 + DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 && 105 + is_power_of_2(size)); 106 + /* choke if '[e]ba' has lower-alignment than 'size' */ 107 + DPAA_ASSERT(!(ba & (size - 1))); 108 + bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba)); 109 + bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba)); 110 + bm_ccsr_out(REG_FBPR_AR, exp - 1); 111 + } 112 + 113 + /* 114 + * Location and size of BMan private memory 115 + * 116 + * Ideally we would use the DMA API to turn rmem->base into a DMA address 117 + * (especially if iommu translations ever get involved). Unfortunately, the 118 + * DMA API currently does not allow mapping anything that is not backed with 119 + * a struct page. 120 + */ 121 + static dma_addr_t fbpr_a; 122 + static size_t fbpr_sz; 123 + 124 + static int bman_fbpr(struct reserved_mem *rmem) 125 + { 126 + fbpr_a = rmem->base; 127 + fbpr_sz = rmem->size; 128 + 129 + WARN_ON(!(fbpr_a && fbpr_sz)); 130 + 131 + return 0; 132 + } 133 + RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr); 134 + 135 + static irqreturn_t bman_isr(int irq, void *ptr) 136 + { 137 + u32 isr_val, ier_val, ecsr_val, isr_mask, i; 138 + struct device *dev = ptr; 139 + 140 + ier_val = bm_ccsr_in(REG_ERR_IER); 141 + isr_val = bm_ccsr_in(REG_ERR_ISR); 142 + ecsr_val = bm_ccsr_in(REG_ECSR); 143 + isr_mask = isr_val & ier_val; 144 + 145 + if (!isr_mask) 146 + return IRQ_NONE; 147 + 148 + for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) { 149 + if (bman_hwerr_txts[i].mask & isr_mask) { 150 + dev_err_ratelimited(dev, "ErrInt: %s\n", 151 + bman_hwerr_txts[i].txt); 152 + if (bman_hwerr_txts[i].mask & ecsr_val) { 153 + /* Re-arm error capture registers */ 154 + bm_ccsr_out(REG_ECSR, ecsr_val); 155 + } 156 + if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) { 157 + dev_dbg(dev, "Disabling error 0x%x\n", 158 + bman_hwerr_txts[i].mask); 159 + ier_val &= ~bman_hwerr_txts[i].mask; 160 + bm_ccsr_out(REG_ERR_IER, ier_val); 161 + } 162 + } 163 + } 164 + bm_ccsr_out(REG_ERR_ISR, isr_val); 165 + 166 + return IRQ_HANDLED; 167 + } 168 + 169 + static int fsl_bman_probe(struct platform_device *pdev) 170 + { 171 + int ret, err_irq; 172 + struct device *dev = &pdev->dev; 173 + struct device_node *node = dev->of_node; 174 + struct resource *res; 175 + u16 id, bm_pool_cnt; 176 + u8 major, minor; 177 + 178 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 179 + if (!res) { 180 + dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n", 181 + node->full_name); 182 + return -ENXIO; 183 + } 184 + bm_ccsr_start = devm_ioremap(dev, res->start, 185 + res->end - res->start + 1); 186 + if (!bm_ccsr_start) 187 + return -ENXIO; 188 + 189 + bm_get_version(&id, &major, &minor); 190 + if (major == 1 && minor == 0) { 191 + bman_ip_rev = BMAN_REV10; 192 + bm_pool_cnt = BM_POOL_MAX; 193 + } else if (major == 2 && minor == 0) { 194 + bman_ip_rev = BMAN_REV20; 195 + bm_pool_cnt = 8; 196 + } else if (major == 2 && minor == 1) { 197 + bman_ip_rev = BMAN_REV21; 198 + bm_pool_cnt = BM_POOL_MAX; 199 + } else { 200 + dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n", 201 + id, major, minor); 202 + return -ENODEV; 203 + } 204 + 205 + bm_set_memory(fbpr_a, fbpr_sz); 206 + 207 + err_irq = platform_get_irq(pdev, 0); 208 + if (err_irq <= 0) { 209 + dev_info(dev, "Can't get %s IRQ\n", node->full_name); 210 + return -ENODEV; 211 + } 212 + ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err", 213 + dev); 214 + if (ret) { 215 + dev_err(dev, "devm_request_irq() failed %d for '%s'\n", 216 + ret, node->full_name); 217 + return ret; 218 + } 219 + /* Disable Buffer Pool State Change */ 220 + bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN); 221 + /* 222 + * Write-to-clear any stale bits, (eg. starvation being asserted prior 223 + * to resource allocation during driver init). 224 + */ 225 + bm_ccsr_out(REG_ERR_ISR, 0xffffffff); 226 + /* Enable Error Interrupts */ 227 + bm_ccsr_out(REG_ERR_IER, 0xffffffff); 228 + 229 + bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc"); 230 + if (IS_ERR(bm_bpalloc)) { 231 + ret = PTR_ERR(bm_bpalloc); 232 + dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret); 233 + return ret; 234 + } 235 + 236 + /* seed BMan resource pool */ 237 + ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1); 238 + if (ret) { 239 + dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n", 240 + 0, bm_pool_cnt - 1, ret); 241 + return ret; 242 + } 243 + 244 + return 0; 245 + }; 246 + 247 + static const struct of_device_id fsl_bman_ids[] = { 248 + { 249 + .compatible = "fsl,bman", 250 + }, 251 + {} 252 + }; 253 + 254 + static struct platform_driver fsl_bman_driver = { 255 + .driver = { 256 + .name = KBUILD_MODNAME, 257 + .of_match_table = fsl_bman_ids, 258 + .suppress_bind_attrs = true, 259 + }, 260 + .probe = fsl_bman_probe, 261 + }; 262 + 263 + builtin_platform_driver(fsl_bman_driver);
+219
drivers/soc/fsl/qbman/bman_portal.c
··· 1 + /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. 2 + * 3 + * Redistribution and use in source and binary forms, with or without 4 + * modification, are permitted provided that the following conditions are met: 5 + * * Redistributions of source code must retain the above copyright 6 + * notice, this list of conditions and the following disclaimer. 7 + * * Redistributions in binary form must reproduce the above copyright 8 + * notice, this list of conditions and the following disclaimer in the 9 + * documentation and/or other materials provided with the distribution. 10 + * * Neither the name of Freescale Semiconductor nor the 11 + * names of its contributors may be used to endorse or promote products 12 + * derived from this software without specific prior written permission. 13 + * 14 + * ALTERNATIVELY, this software may be distributed under the terms of the 15 + * GNU General Public License ("GPL") as published by the Free Software 16 + * Foundation, either version 2 of that License or (at your option) any 17 + * later version. 18 + * 19 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 + */ 30 + 31 + #include "bman_priv.h" 32 + 33 + static struct bman_portal *affine_bportals[NR_CPUS]; 34 + static struct cpumask portal_cpus; 35 + /* protect bman global registers and global data shared among portals */ 36 + static DEFINE_SPINLOCK(bman_lock); 37 + 38 + static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg) 39 + { 40 + struct bman_portal *p = bman_create_affine_portal(pcfg); 41 + 42 + if (!p) { 43 + dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n", 44 + __func__, pcfg->cpu); 45 + return NULL; 46 + } 47 + 48 + bman_p_irqsource_add(p, BM_PIRQ_RCRI); 49 + affine_bportals[pcfg->cpu] = p; 50 + 51 + dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu); 52 + 53 + return p; 54 + } 55 + 56 + static void bman_offline_cpu(unsigned int cpu) 57 + { 58 + struct bman_portal *p = affine_bportals[cpu]; 59 + const struct bm_portal_config *pcfg; 60 + 61 + if (!p) 62 + return; 63 + 64 + pcfg = bman_get_bm_portal_config(p); 65 + if (!pcfg) 66 + return; 67 + 68 + irq_set_affinity(pcfg->irq, cpumask_of(0)); 69 + } 70 + 71 + static void bman_online_cpu(unsigned int cpu) 72 + { 73 + struct bman_portal *p = affine_bportals[cpu]; 74 + const struct bm_portal_config *pcfg; 75 + 76 + if (!p) 77 + return; 78 + 79 + pcfg = bman_get_bm_portal_config(p); 80 + if (!pcfg) 81 + return; 82 + 83 + irq_set_affinity(pcfg->irq, cpumask_of(cpu)); 84 + } 85 + 86 + static int bman_hotplug_cpu_callback(struct notifier_block *nfb, 87 + unsigned long action, void *hcpu) 88 + { 89 + unsigned int cpu = (unsigned long)hcpu; 90 + 91 + switch (action) { 92 + case CPU_ONLINE: 93 + case CPU_ONLINE_FROZEN: 94 + bman_online_cpu(cpu); 95 + break; 96 + case CPU_DOWN_PREPARE: 97 + case CPU_DOWN_PREPARE_FROZEN: 98 + bman_offline_cpu(cpu); 99 + } 100 + 101 + return NOTIFY_OK; 102 + } 103 + 104 + static struct notifier_block bman_hotplug_cpu_notifier = { 105 + .notifier_call = bman_hotplug_cpu_callback, 106 + }; 107 + 108 + static int bman_portal_probe(struct platform_device *pdev) 109 + { 110 + struct device *dev = &pdev->dev; 111 + struct device_node *node = dev->of_node; 112 + struct bm_portal_config *pcfg; 113 + struct resource *addr_phys[2]; 114 + void __iomem *va; 115 + int irq, cpu; 116 + 117 + pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); 118 + if (!pcfg) 119 + return -ENOMEM; 120 + 121 + pcfg->dev = dev; 122 + 123 + addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM, 124 + DPAA_PORTAL_CE); 125 + if (!addr_phys[0]) { 126 + dev_err(dev, "Can't get %s property 'reg::CE'\n", 127 + node->full_name); 128 + return -ENXIO; 129 + } 130 + 131 + addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, 132 + DPAA_PORTAL_CI); 133 + if (!addr_phys[1]) { 134 + dev_err(dev, "Can't get %s property 'reg::CI'\n", 135 + node->full_name); 136 + return -ENXIO; 137 + } 138 + 139 + pcfg->cpu = -1; 140 + 141 + irq = platform_get_irq(pdev, 0); 142 + if (irq <= 0) { 143 + dev_err(dev, "Can't get %s IRQ'\n", node->full_name); 144 + return -ENXIO; 145 + } 146 + pcfg->irq = irq; 147 + 148 + va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); 149 + if (!va) 150 + goto err_ioremap1; 151 + 152 + pcfg->addr_virt[DPAA_PORTAL_CE] = va; 153 + 154 + va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), 155 + _PAGE_GUARDED | _PAGE_NO_CACHE); 156 + if (!va) 157 + goto err_ioremap2; 158 + 159 + pcfg->addr_virt[DPAA_PORTAL_CI] = va; 160 + 161 + spin_lock(&bman_lock); 162 + cpu = cpumask_next_zero(-1, &portal_cpus); 163 + if (cpu >= nr_cpu_ids) { 164 + /* unassigned portal, skip init */ 165 + spin_unlock(&bman_lock); 166 + return 0; 167 + } 168 + 169 + cpumask_set_cpu(cpu, &portal_cpus); 170 + spin_unlock(&bman_lock); 171 + pcfg->cpu = cpu; 172 + 173 + if (!init_pcfg(pcfg)) 174 + goto err_ioremap2; 175 + 176 + /* clear irq affinity if assigned cpu is offline */ 177 + if (!cpu_online(cpu)) 178 + bman_offline_cpu(cpu); 179 + 180 + return 0; 181 + 182 + err_ioremap2: 183 + iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); 184 + err_ioremap1: 185 + dev_err(dev, "ioremap failed\n"); 186 + return -ENXIO; 187 + } 188 + 189 + static const struct of_device_id bman_portal_ids[] = { 190 + { 191 + .compatible = "fsl,bman-portal", 192 + }, 193 + {} 194 + }; 195 + MODULE_DEVICE_TABLE(of, bman_portal_ids); 196 + 197 + static struct platform_driver bman_portal_driver = { 198 + .driver = { 199 + .name = KBUILD_MODNAME, 200 + .of_match_table = bman_portal_ids, 201 + }, 202 + .probe = bman_portal_probe, 203 + }; 204 + 205 + static int __init bman_portal_driver_register(struct platform_driver *drv) 206 + { 207 + int ret; 208 + 209 + ret = platform_driver_register(drv); 210 + if (ret < 0) 211 + return ret; 212 + 213 + register_hotcpu_notifier(&bman_hotplug_cpu_notifier); 214 + 215 + return 0; 216 + } 217 + 218 + module_driver(bman_portal_driver, 219 + bman_portal_driver_register, platform_driver_unregister);
+80
drivers/soc/fsl/qbman/bman_priv.h
··· 1 + /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. 2 + * 3 + * Redistribution and use in source and binary forms, with or without 4 + * modification, are permitted provided that the following conditions are met: 5 + * * Redistributions of source code must retain the above copyright 6 + * notice, this list of conditions and the following disclaimer. 7 + * * Redistributions in binary form must reproduce the above copyright 8 + * notice, this list of conditions and the following disclaimer in the 9 + * documentation and/or other materials provided with the distribution. 10 + * * Neither the name of Freescale Semiconductor nor the 11 + * names of its contributors may be used to endorse or promote products 12 + * derived from this software without specific prior written permission. 13 + * 14 + * ALTERNATIVELY, this software may be distributed under the terms of the 15 + * GNU General Public License ("GPL") as published by the Free Software 16 + * Foundation, either version 2 of that License or (at your option) any 17 + * later version. 18 + * 19 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 + */ 30 + 31 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32 + 33 + #include "dpaa_sys.h" 34 + 35 + #include <soc/fsl/bman.h> 36 + 37 + /* Portal processing (interrupt) sources */ 38 + #define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */ 39 + 40 + /* Revision info (for errata and feature handling) */ 41 + #define BMAN_REV10 0x0100 42 + #define BMAN_REV20 0x0200 43 + #define BMAN_REV21 0x0201 44 + extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */ 45 + 46 + extern struct gen_pool *bm_bpalloc; 47 + 48 + struct bm_portal_config { 49 + /* 50 + * Corenet portal addresses; 51 + * [0]==cache-enabled, [1]==cache-inhibited. 52 + */ 53 + void __iomem *addr_virt[2]; 54 + /* Allow these to be joined in lists */ 55 + struct list_head list; 56 + struct device *dev; 57 + /* User-visible portal configuration settings */ 58 + /* portal is affined to this cpu */ 59 + int cpu; 60 + /* portal interrupt line */ 61 + int irq; 62 + }; 63 + 64 + struct bman_portal *bman_create_affine_portal( 65 + const struct bm_portal_config *config); 66 + /* 67 + * The below bman_p_***() variant might be called in a situation that the cpu 68 + * which the portal affine to is not online yet. 69 + * @bman_portal specifies which portal the API will use. 70 + */ 71 + int bman_p_irqsource_add(struct bman_portal *p, u32 bits); 72 + 73 + /* 74 + * Used by all portal interrupt registers except 'inhibit' 75 + * This mask contains all the "irqsource" bits visible to API users 76 + */ 77 + #define BM_PIRQ_VISIBLE BM_PIRQ_RCRI 78 + 79 + const struct bm_portal_config * 80 + bman_get_bm_portal_config(const struct bman_portal *portal);
+103
drivers/soc/fsl/qbman/dpaa_sys.h
··· 1 + /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. 2 + * 3 + * Redistribution and use in source and binary forms, with or without 4 + * modification, are permitted provided that the following conditions are met: 5 + * * Redistributions of source code must retain the above copyright 6 + * notice, this list of conditions and the following disclaimer. 7 + * * Redistributions in binary form must reproduce the above copyright 8 + * notice, this list of conditions and the following disclaimer in the 9 + * documentation and/or other materials provided with the distribution. 10 + * * Neither the name of Freescale Semiconductor nor the 11 + * names of its contributors may be used to endorse or promote products 12 + * derived from this software without specific prior written permission. 13 + * 14 + * ALTERNATIVELY, this software may be distributed under the terms of the 15 + * GNU General Public License ("GPL") as published by the Free Software 16 + * Foundation, either version 2 of that License or (at your option) any 17 + * later version. 18 + * 19 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 + */ 30 + 31 + #ifndef __DPAA_SYS_H 32 + #define __DPAA_SYS_H 33 + 34 + #include <linux/cpu.h> 35 + #include <linux/slab.h> 36 + #include <linux/module.h> 37 + #include <linux/interrupt.h> 38 + #include <linux/kthread.h> 39 + #include <linux/vmalloc.h> 40 + #include <linux/platform_device.h> 41 + #include <linux/of_reserved_mem.h> 42 + #include <linux/prefetch.h> 43 + #include <linux/genalloc.h> 44 + #include <asm/cacheflush.h> 45 + 46 + /* For 2-element tables related to cache-inhibited and cache-enabled mappings */ 47 + #define DPAA_PORTAL_CE 0 48 + #define DPAA_PORTAL_CI 1 49 + 50 + #if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64) 51 + #error "Unsupported Cacheline Size" 52 + #endif 53 + 54 + static inline void dpaa_flush(void *p) 55 + { 56 + #ifdef CONFIG_PPC 57 + flush_dcache_range((unsigned long)p, (unsigned long)p+64); 58 + #elif defined(CONFIG_ARM32) 59 + __cpuc_flush_dcache_area(p, 64); 60 + #elif defined(CONFIG_ARM64) 61 + __flush_dcache_area(p, 64); 62 + #endif 63 + } 64 + 65 + #define dpaa_invalidate(p) dpaa_flush(p) 66 + 67 + #define dpaa_zero(p) memset(p, 0, 64) 68 + 69 + static inline void dpaa_touch_ro(void *p) 70 + { 71 + #if (L1_CACHE_BYTES == 32) 72 + prefetch(p+32); 73 + #endif 74 + prefetch(p); 75 + } 76 + 77 + /* Commonly used combo */ 78 + static inline void dpaa_invalidate_touch_ro(void *p) 79 + { 80 + dpaa_invalidate(p); 81 + dpaa_touch_ro(p); 82 + } 83 + 84 + 85 + #ifdef CONFIG_FSL_DPAA_CHECKING 86 + #define DPAA_ASSERT(x) WARN_ON(!(x)) 87 + #else 88 + #define DPAA_ASSERT(x) 89 + #endif 90 + 91 + /* cyclic helper for rings */ 92 + static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last) 93 + { 94 + /* 'first' is included, 'last' is excluded */ 95 + if (first <= last) 96 + return last - first; 97 + return ringsize + last - first; 98 + } 99 + 100 + /* Offset applied to genalloc pools due to zero being an error return */ 101 + #define DPAA_GENALLOC_OFF 0x80000000 102 + 103 + #endif /* __DPAA_SYS_H */
+129
include/soc/fsl/bman.h
··· 1 + /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. 2 + * 3 + * Redistribution and use in source and binary forms, with or without 4 + * modification, are permitted provided that the following conditions are met: 5 + * * Redistributions of source code must retain the above copyright 6 + * notice, this list of conditions and the following disclaimer. 7 + * * Redistributions in binary form must reproduce the above copyright 8 + * notice, this list of conditions and the following disclaimer in the 9 + * documentation and/or other materials provided with the distribution. 10 + * * Neither the name of Freescale Semiconductor nor the 11 + * names of its contributors may be used to endorse or promote products 12 + * derived from this software without specific prior written permission. 13 + * 14 + * ALTERNATIVELY, this software may be distributed under the terms of the 15 + * GNU General Public License ("GPL") as published by the Free Software 16 + * Foundation, either version 2 of that License or (at your option) any 17 + * later version. 18 + * 19 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 + */ 30 + 31 + #ifndef __FSL_BMAN_H 32 + #define __FSL_BMAN_H 33 + 34 + /* wrapper for 48-bit buffers */ 35 + struct bm_buffer { 36 + union { 37 + struct { 38 + __be16 bpid; /* hi 8-bits reserved */ 39 + __be16 hi; /* High 16-bits of 48-bit address */ 40 + __be32 lo; /* Low 32-bits of 48-bit address */ 41 + }; 42 + __be64 data; 43 + }; 44 + } __aligned(8); 45 + /* 46 + * Restore the 48 bit address previously stored in BMan 47 + * hardware pools as a dma_addr_t 48 + */ 49 + static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf) 50 + { 51 + return be64_to_cpu(buf->data) & 0xffffffffffffLLU; 52 + } 53 + 54 + static inline u64 bm_buffer_get64(const struct bm_buffer *buf) 55 + { 56 + return be64_to_cpu(buf->data) & 0xffffffffffffLLU; 57 + } 58 + 59 + static inline void bm_buffer_set64(struct bm_buffer *buf, u64 addr) 60 + { 61 + buf->hi = cpu_to_be16(upper_32_bits(addr)); 62 + buf->lo = cpu_to_be32(lower_32_bits(addr)); 63 + } 64 + 65 + static inline u8 bm_buffer_get_bpid(const struct bm_buffer *buf) 66 + { 67 + return be16_to_cpu(buf->bpid) & 0xff; 68 + } 69 + 70 + static inline void bm_buffer_set_bpid(struct bm_buffer *buf, int bpid) 71 + { 72 + buf->bpid = cpu_to_be16(bpid & 0xff); 73 + } 74 + 75 + /* Managed portal, high-level i/face */ 76 + 77 + /* Portal and Buffer Pools */ 78 + struct bman_portal; 79 + struct bman_pool; 80 + 81 + #define BM_POOL_MAX 64 /* max # of buffer pools */ 82 + 83 + /** 84 + * bman_new_pool - Allocates a Buffer Pool object 85 + * 86 + * Creates a pool object, and returns a reference to it or NULL on error. 87 + */ 88 + struct bman_pool *bman_new_pool(void); 89 + 90 + /** 91 + * bman_free_pool - Deallocates a Buffer Pool object 92 + * @pool: the pool object to release 93 + */ 94 + void bman_free_pool(struct bman_pool *pool); 95 + 96 + /** 97 + * bman_get_bpid - Returns a pool object's BPID. 98 + * @pool: the pool object 99 + * 100 + * The returned value is the index of the encapsulated buffer pool, 101 + * in the range of [0, @BM_POOL_MAX-1]. 102 + */ 103 + int bman_get_bpid(const struct bman_pool *pool); 104 + 105 + /** 106 + * bman_release - Release buffer(s) to the buffer pool 107 + * @pool: the buffer pool object to release to 108 + * @bufs: an array of buffers to release 109 + * @num: the number of buffers in @bufs (1-8) 110 + * 111 + * Adds the given buffers to RCR entries. If the RCR ring is unresponsive, 112 + * the function will return -ETIMEDOUT. Otherwise, it returns zero. 113 + */ 114 + int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num); 115 + 116 + /** 117 + * bman_acquire - Acquire buffer(s) from a buffer pool 118 + * @pool: the buffer pool object to acquire from 119 + * @bufs: array for storing the acquired buffers 120 + * @num: the number of buffers desired (@bufs is at least this big) 121 + * 122 + * Issues an "Acquire" command via the portal's management command interface. 123 + * The return value will be the number of buffers obtained from the pool, or a 124 + * negative error code if a h/w error or pool starvation was encountered. In 125 + * the latter case, the content of @bufs is undefined. 126 + */ 127 + int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num); 128 + 129 + #endif /* __FSL_BMAN_H */