Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

soc/qman: Add self-test for QMan driver

Add self tests for the DPAA 1.x Queue Manager driver. The tests
ensure that the driver can properly enqueue and dequeue to/from
frame queues using the QMan portal infrastructure.

Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
Signed-off-by: Scott Wood <oss@buserror.net>

authored by

Claudiu Manoil and committed by
Scott Wood
de775623 97e0d385

+995
+23
drivers/soc/fsl/qbman/Kconfig
··· 41 41 high-level API testing with them (whichever portal(s) are affine 42 42 to the cpu(s) the test executes on). 43 43 44 + config FSL_QMAN_TEST 45 + tristate "QMan self-tests" 46 + help 47 + Compile self-test code for QMan. 48 + 49 + config FSL_QMAN_TEST_API 50 + bool "QMan high-level self-test" 51 + depends on FSL_QMAN_TEST 52 + default y 53 + help 54 + This requires the presence of cpu-affine portals, and performs 55 + high-level API testing with them (whichever portal(s) are affine to 56 + the cpu(s) the test executes on). 57 + 58 + config FSL_QMAN_TEST_STASH 59 + bool "QMan 'hot potato' data-stashing self-test" 60 + depends on FSL_QMAN_TEST 61 + default y 62 + help 63 + This performs a "hot potato" style test enqueuing/dequeuing a frame 64 + across a series of FQs scheduled to different portals (and cpus), with 65 + DQRR, data and context stashing always on. 66 + 44 67 endif # FSL_DPAA
+5
drivers/soc/fsl/qbman/Makefile
··· 5 5 obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o 6 6 bman-test-y = bman_test.o 7 7 bman-test-$(CONFIG_FSL_BMAN_TEST_API) += bman_test_api.o 8 + 9 + obj-$(CONFIG_FSL_QMAN_TEST) += qman-test.o 10 + qman-test-y = qman_test.o 11 + qman-test-$(CONFIG_FSL_QMAN_TEST_API) += qman_test_api.o 12 + qman-test-$(CONFIG_FSL_QMAN_TEST_STASH) += qman_test_stash.o
+62
drivers/soc/fsl/qbman/qman_test.c
··· 1 + /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. 2 + * 3 + * Redistribution and use in source and binary forms, with or without 4 + * modification, are permitted provided that the following conditions are met: 5 + * * Redistributions of source code must retain the above copyright 6 + * notice, this list of conditions and the following disclaimer. 7 + * * Redistributions in binary form must reproduce the above copyright 8 + * notice, this list of conditions and the following disclaimer in the 9 + * documentation and/or other materials provided with the distribution. 10 + * * Neither the name of Freescale Semiconductor nor the 11 + * names of its contributors may be used to endorse or promote products 12 + * derived from this software without specific prior written permission. 13 + * 14 + * ALTERNATIVELY, this software may be distributed under the terms of the 15 + * GNU General Public License ("GPL") as published by the Free Software 16 + * Foundation, either version 2 of that License or (at your option) any 17 + * later version. 18 + * 19 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 + */ 30 + 31 + #include "qman_test.h" 32 + 33 + MODULE_AUTHOR("Geoff Thorpe"); 34 + MODULE_LICENSE("Dual BSD/GPL"); 35 + MODULE_DESCRIPTION("QMan testing"); 36 + 37 + static int test_init(void) 38 + { 39 + int loop = 1; 40 + int err = 0; 41 + 42 + while (loop--) { 43 + #ifdef CONFIG_FSL_QMAN_TEST_STASH 44 + err = qman_test_stash(); 45 + if (err) 46 + break; 47 + #endif 48 + #ifdef CONFIG_FSL_QMAN_TEST_API 49 + err = qman_test_api(); 50 + if (err) 51 + break; 52 + #endif 53 + } 54 + return err; 55 + } 56 + 57 + static void test_exit(void) 58 + { 59 + } 60 + 61 + module_init(test_init); 62 + module_exit(test_exit);
+36
drivers/soc/fsl/qbman/qman_test.h
··· 1 + /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. 2 + * 3 + * Redistribution and use in source and binary forms, with or without 4 + * modification, are permitted provided that the following conditions are met: 5 + * * Redistributions of source code must retain the above copyright 6 + * notice, this list of conditions and the following disclaimer. 7 + * * Redistributions in binary form must reproduce the above copyright 8 + * notice, this list of conditions and the following disclaimer in the 9 + * documentation and/or other materials provided with the distribution. 10 + * * Neither the name of Freescale Semiconductor nor the 11 + * names of its contributors may be used to endorse or promote products 12 + * derived from this software without specific prior written permission. 13 + * 14 + * ALTERNATIVELY, this software may be distributed under the terms of the 15 + * GNU General Public License ("GPL") as published by the Free Software 16 + * Foundation, either version 2 of that License or (at your option) any 17 + * later version. 18 + * 19 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 + */ 30 + 31 + #include "qman_priv.h" 32 + 33 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 + 35 + int qman_test_stash(void); 36 + int qman_test_api(void);
+252
drivers/soc/fsl/qbman/qman_test_api.c
··· 1 + /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. 2 + * 3 + * Redistribution and use in source and binary forms, with or without 4 + * modification, are permitted provided that the following conditions are met: 5 + * * Redistributions of source code must retain the above copyright 6 + * notice, this list of conditions and the following disclaimer. 7 + * * Redistributions in binary form must reproduce the above copyright 8 + * notice, this list of conditions and the following disclaimer in the 9 + * documentation and/or other materials provided with the distribution. 10 + * * Neither the name of Freescale Semiconductor nor the 11 + * names of its contributors may be used to endorse or promote products 12 + * derived from this software without specific prior written permission. 13 + * 14 + * ALTERNATIVELY, this software may be distributed under the terms of the 15 + * GNU General Public License ("GPL") as published by the Free Software 16 + * Foundation, either version 2 of that License or (at your option) any 17 + * later version. 18 + * 19 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 + */ 30 + 31 + #include "qman_test.h" 32 + 33 + #define CGR_ID 27 34 + #define POOL_ID 2 35 + #define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID 36 + #define NUM_ENQUEUES 10 37 + #define NUM_PARTIAL 4 38 + #define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \ 39 + QM_SDQCR_TYPE_PRIO_QOS | \ 40 + QM_SDQCR_TOKEN_SET(0x98) | \ 41 + QM_SDQCR_CHANNELS_DEDICATED | \ 42 + QM_SDQCR_CHANNELS_POOL(POOL_ID)) 43 + #define PORTAL_OPAQUE ((void *)0xf00dbeef) 44 + #define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH) 45 + 46 + static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *, 47 + struct qman_fq *, 48 + const struct qm_dqrr_entry *); 49 + static void cb_ern(struct qman_portal *, struct qman_fq *, 50 + const union qm_mr_entry *); 51 + static void cb_fqs(struct qman_portal *, struct qman_fq *, 52 + const union qm_mr_entry *); 53 + 54 + static struct qm_fd fd, fd_dq; 55 + static struct qman_fq fq_base = { 56 + .cb.dqrr = cb_dqrr, 57 + .cb.ern = cb_ern, 58 + .cb.fqs = cb_fqs 59 + }; 60 + static DECLARE_WAIT_QUEUE_HEAD(waitqueue); 61 + static int retire_complete, sdqcr_complete; 62 + 63 + /* Helpers for initialising and "incrementing" a frame descriptor */ 64 + static void fd_init(struct qm_fd *fd) 65 + { 66 + qm_fd_addr_set64(fd, 0xabdeadbeefLLU); 67 + qm_fd_set_contig_big(fd, 0x0000ffff); 68 + fd->cmd = 0xfeedf00d; 69 + } 70 + 71 + static void fd_inc(struct qm_fd *fd) 72 + { 73 + u64 t = qm_fd_addr_get64(fd); 74 + int z = t >> 40; 75 + unsigned int len, off; 76 + enum qm_fd_format fmt; 77 + 78 + t <<= 1; 79 + if (z) 80 + t |= 1; 81 + qm_fd_addr_set64(fd, t); 82 + 83 + fmt = qm_fd_get_format(fd); 84 + off = qm_fd_get_offset(fd); 85 + len = qm_fd_get_length(fd); 86 + len--; 87 + qm_fd_set_param(fd, fmt, off, len); 88 + 89 + fd->cmd++; 90 + } 91 + 92 + /* The only part of the 'fd' we can't memcmp() is the ppid */ 93 + static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b) 94 + { 95 + int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1; 96 + 97 + if (!r) { 98 + enum qm_fd_format fmt_a, fmt_b; 99 + 100 + fmt_a = qm_fd_get_format(a); 101 + fmt_b = qm_fd_get_format(b); 102 + r = fmt_a - fmt_b; 103 + } 104 + if (!r) 105 + r = a->cfg - b->cfg; 106 + if (!r) 107 + r = a->cmd - b->cmd; 108 + return r; 109 + } 110 + 111 + /* test */ 112 + static int do_enqueues(struct qman_fq *fq) 113 + { 114 + unsigned int loop; 115 + int err = 0; 116 + 117 + for (loop = 0; loop < NUM_ENQUEUES; loop++) { 118 + if (qman_enqueue(fq, &fd)) { 119 + pr_crit("qman_enqueue() failed\n"); 120 + err = -EIO; 121 + } 122 + fd_inc(&fd); 123 + } 124 + 125 + return err; 126 + } 127 + 128 + int qman_test_api(void) 129 + { 130 + unsigned int flags, frmcnt; 131 + int err; 132 + struct qman_fq *fq = &fq_base; 133 + 134 + pr_info("%s(): Starting\n", __func__); 135 + fd_init(&fd); 136 + fd_init(&fd_dq); 137 + 138 + /* Initialise (parked) FQ */ 139 + err = qman_create_fq(0, FQ_FLAGS, fq); 140 + if (err) { 141 + pr_crit("qman_create_fq() failed\n"); 142 + goto failed; 143 + } 144 + err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL); 145 + if (err) { 146 + pr_crit("qman_init_fq() failed\n"); 147 + goto failed; 148 + } 149 + /* Do enqueues + VDQCR, twice. (Parked FQ) */ 150 + err = do_enqueues(fq); 151 + if (err) 152 + goto failed; 153 + pr_info("VDQCR (till-empty);\n"); 154 + frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY; 155 + err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); 156 + if (err) { 157 + pr_crit("qman_volatile_dequeue() failed\n"); 158 + goto failed; 159 + } 160 + err = do_enqueues(fq); 161 + if (err) 162 + goto failed; 163 + pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES); 164 + frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL); 165 + err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); 166 + if (err) { 167 + pr_crit("qman_volatile_dequeue() failed\n"); 168 + goto failed; 169 + } 170 + pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL, 171 + NUM_ENQUEUES); 172 + frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL); 173 + err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); 174 + if (err) { 175 + pr_err("qman_volatile_dequeue() failed\n"); 176 + goto failed; 177 + } 178 + 179 + err = do_enqueues(fq); 180 + if (err) 181 + goto failed; 182 + pr_info("scheduled dequeue (till-empty)\n"); 183 + err = qman_schedule_fq(fq); 184 + if (err) { 185 + pr_crit("qman_schedule_fq() failed\n"); 186 + goto failed; 187 + } 188 + wait_event(waitqueue, sdqcr_complete); 189 + 190 + /* Retire and OOS the FQ */ 191 + err = qman_retire_fq(fq, &flags); 192 + if (err < 0) { 193 + pr_crit("qman_retire_fq() failed\n"); 194 + goto failed; 195 + } 196 + wait_event(waitqueue, retire_complete); 197 + if (flags & QMAN_FQ_STATE_BLOCKOOS) { 198 + err = -EIO; 199 + pr_crit("leaking frames\n"); 200 + goto failed; 201 + } 202 + err = qman_oos_fq(fq); 203 + if (err) { 204 + pr_crit("qman_oos_fq() failed\n"); 205 + goto failed; 206 + } 207 + qman_destroy_fq(fq); 208 + pr_info("%s(): Finished\n", __func__); 209 + return 0; 210 + 211 + failed: 212 + WARN_ON(1); 213 + return err; 214 + } 215 + 216 + static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p, 217 + struct qman_fq *fq, 218 + const struct qm_dqrr_entry *dq) 219 + { 220 + if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) { 221 + pr_err("BADNESS: dequeued frame doesn't match;\n"); 222 + return qman_cb_dqrr_consume; 223 + } 224 + fd_inc(&fd_dq); 225 + if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) { 226 + sdqcr_complete = 1; 227 + wake_up(&waitqueue); 228 + } 229 + return qman_cb_dqrr_consume; 230 + } 231 + 232 + static void cb_ern(struct qman_portal *p, struct qman_fq *fq, 233 + const union qm_mr_entry *msg) 234 + { 235 + pr_crit("cb_ern() unimplemented"); 236 + WARN_ON(1); 237 + } 238 + 239 + static void cb_fqs(struct qman_portal *p, struct qman_fq *fq, 240 + const union qm_mr_entry *msg) 241 + { 242 + u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK); 243 + 244 + if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) { 245 + pr_crit("unexpected FQS message"); 246 + WARN_ON(1); 247 + return; 248 + } 249 + pr_info("Retirement message received\n"); 250 + retire_complete = 1; 251 + wake_up(&waitqueue); 252 + }
+617
drivers/soc/fsl/qbman/qman_test_stash.c
··· 1 + /* Copyright 2009 - 2016 Freescale Semiconductor, Inc. 2 + * 3 + * Redistribution and use in source and binary forms, with or without 4 + * modification, are permitted provided that the following conditions are met: 5 + * * Redistributions of source code must retain the above copyright 6 + * notice, this list of conditions and the following disclaimer. 7 + * * Redistributions in binary form must reproduce the above copyright 8 + * notice, this list of conditions and the following disclaimer in the 9 + * documentation and/or other materials provided with the distribution. 10 + * * Neither the name of Freescale Semiconductor nor the 11 + * names of its contributors may be used to endorse or promote products 12 + * derived from this software without specific prior written permission. 13 + * 14 + * ALTERNATIVELY, this software may be distributed under the terms of the 15 + * GNU General Public License ("GPL") as published by the Free Software 16 + * Foundation, either version 2 of that License or (at your option) any 17 + * later version. 18 + * 19 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 20 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 23 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 + */ 30 + 31 + #include "qman_test.h" 32 + 33 + #include <linux/dma-mapping.h> 34 + #include <linux/delay.h> 35 + 36 + /* 37 + * Algorithm: 38 + * 39 + * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates 40 + * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The 41 + * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will 42 + * shuttle a "hot potato" frame around them such that every forwarding action 43 + * moves it from one cpu to another. (The use of more than one handler per cpu 44 + * is to allow enough handlers/FQs to truly test the significance of caching - 45 + * ie. when cache-expiries are occurring.) 46 + * 47 + * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the 48 + * first and last words of the frame data will undergo a transformation step on 49 + * each forwarding action. To achieve this, each handler will be assigned a 50 + * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is 51 + * received by a handler, the mixer of the expected sender is XOR'd into all 52 + * words of the entire frame, which is then validated against the original 53 + * values. Then, before forwarding, the entire frame is XOR'd with the mixer of 54 + * the current handler. Apart from validating that the frame is taking the 55 + * expected path, this also provides some quasi-realistic overheads to each 56 + * forwarding action - dereferencing *all* the frame data, computation, and 57 + * conditional branching. There is a "special" handler designated to act as the 58 + * instigator of the test by creating an enqueuing the "hot potato" frame, and 59 + * to determine when the test has completed by counting HP_LOOPS iterations. 60 + * 61 + * Init phases: 62 + * 63 + * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them 64 + * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU 65 + * handlers and link-list them (but do no other handler setup). 66 + * 67 + * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each 68 + * hp_cpu's 'iterator' to point to its first handler. With each loop, 69 + * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler 70 + * and advance the iterator for the next loop. This includes a final fixup, 71 + * which connects the last handler to the first (and which is why phase 2 72 + * and 3 are separate). 73 + * 74 + * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each 75 + * hp_cpu's 'iterator' to point to its first handler. With each loop, 76 + * initialise FQ objects and advance the iterator for the next loop. 77 + * Moreover, do this initialisation on the cpu it applies to so that Rx FQ 78 + * initialisation targets the correct cpu. 79 + */ 80 + 81 + /* 82 + * helper to run something on all cpus (can't use on_each_cpu(), as that invokes 83 + * the fn from irq context, which is too restrictive). 84 + */ 85 + struct bstrap { 86 + int (*fn)(void); 87 + atomic_t started; 88 + }; 89 + static int bstrap_fn(void *bs) 90 + { 91 + struct bstrap *bstrap = bs; 92 + int err; 93 + 94 + atomic_inc(&bstrap->started); 95 + err = bstrap->fn(); 96 + if (err) 97 + return err; 98 + while (!kthread_should_stop()) 99 + msleep(20); 100 + return 0; 101 + } 102 + static int on_all_cpus(int (*fn)(void)) 103 + { 104 + int cpu; 105 + 106 + for_each_cpu(cpu, cpu_online_mask) { 107 + struct bstrap bstrap = { 108 + .fn = fn, 109 + .started = ATOMIC_INIT(0) 110 + }; 111 + struct task_struct *k = kthread_create(bstrap_fn, &bstrap, 112 + "hotpotato%d", cpu); 113 + int ret; 114 + 115 + if (IS_ERR(k)) 116 + return -ENOMEM; 117 + kthread_bind(k, cpu); 118 + wake_up_process(k); 119 + /* 120 + * If we call kthread_stop() before the "wake up" has had an 121 + * effect, then the thread may exit with -EINTR without ever 122 + * running the function. So poll until it's started before 123 + * requesting it to stop. 124 + */ 125 + while (!atomic_read(&bstrap.started)) 126 + msleep(20); 127 + ret = kthread_stop(k); 128 + if (ret) 129 + return ret; 130 + } 131 + return 0; 132 + } 133 + 134 + struct hp_handler { 135 + 136 + /* The following data is stashed when 'rx' is dequeued; */ 137 + /* -------------- */ 138 + /* The Rx FQ, dequeues of which will stash the entire hp_handler */ 139 + struct qman_fq rx; 140 + /* The Tx FQ we should forward to */ 141 + struct qman_fq tx; 142 + /* The value we XOR post-dequeue, prior to validating */ 143 + u32 rx_mixer; 144 + /* The value we XOR pre-enqueue, after validating */ 145 + u32 tx_mixer; 146 + /* what the hotpotato address should be on dequeue */ 147 + dma_addr_t addr; 148 + u32 *frame_ptr; 149 + 150 + /* The following data isn't (necessarily) stashed on dequeue; */ 151 + /* -------------- */ 152 + u32 fqid_rx, fqid_tx; 153 + /* list node for linking us into 'hp_cpu' */ 154 + struct list_head node; 155 + /* Just to check ... */ 156 + unsigned int processor_id; 157 + } ____cacheline_aligned; 158 + 159 + struct hp_cpu { 160 + /* identify the cpu we run on; */ 161 + unsigned int processor_id; 162 + /* root node for the per-cpu list of handlers */ 163 + struct list_head handlers; 164 + /* list node for linking us into 'hp_cpu_list' */ 165 + struct list_head node; 166 + /* 167 + * when repeatedly scanning 'hp_list', each time linking the n'th 168 + * handlers together, this is used as per-cpu iterator state 169 + */ 170 + struct hp_handler *iterator; 171 + }; 172 + 173 + /* Each cpu has one of these */ 174 + static DEFINE_PER_CPU(struct hp_cpu, hp_cpus); 175 + 176 + /* links together the hp_cpu structs, in first-come first-serve order. */ 177 + static LIST_HEAD(hp_cpu_list); 178 + static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock); 179 + 180 + static unsigned int hp_cpu_list_length; 181 + 182 + /* the "special" handler, that starts and terminates the test. */ 183 + static struct hp_handler *special_handler; 184 + static int loop_counter; 185 + 186 + /* handlers are allocated out of this, so they're properly aligned. */ 187 + static struct kmem_cache *hp_handler_slab; 188 + 189 + /* this is the frame data */ 190 + static void *__frame_ptr; 191 + static u32 *frame_ptr; 192 + static dma_addr_t frame_dma; 193 + 194 + /* the main function waits on this */ 195 + static DECLARE_WAIT_QUEUE_HEAD(queue); 196 + 197 + #define HP_PER_CPU 2 198 + #define HP_LOOPS 8 199 + /* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */ 200 + #define HP_NUM_WORDS 80 201 + /* First word of the LFSR-based frame data */ 202 + #define HP_FIRST_WORD 0xabbaf00d 203 + 204 + static inline u32 do_lfsr(u32 prev) 205 + { 206 + return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u); 207 + } 208 + 209 + static int allocate_frame_data(void) 210 + { 211 + u32 lfsr = HP_FIRST_WORD; 212 + int loop; 213 + struct platform_device *pdev = platform_device_alloc("foobar", -1); 214 + 215 + if (!pdev) { 216 + pr_crit("platform_device_alloc() failed"); 217 + return -EIO; 218 + } 219 + if (platform_device_add(pdev)) { 220 + pr_crit("platform_device_add() failed"); 221 + return -EIO; 222 + } 223 + __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL); 224 + if (!__frame_ptr) 225 + return -ENOMEM; 226 + 227 + frame_ptr = PTR_ALIGN(__frame_ptr, 64); 228 + for (loop = 0; loop < HP_NUM_WORDS; loop++) { 229 + frame_ptr[loop] = lfsr; 230 + lfsr = do_lfsr(lfsr); 231 + } 232 + frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS, 233 + DMA_BIDIRECTIONAL); 234 + platform_device_del(pdev); 235 + platform_device_put(pdev); 236 + return 0; 237 + } 238 + 239 + static void deallocate_frame_data(void) 240 + { 241 + kfree(__frame_ptr); 242 + } 243 + 244 + static inline int process_frame_data(struct hp_handler *handler, 245 + const struct qm_fd *fd) 246 + { 247 + u32 *p = handler->frame_ptr; 248 + u32 lfsr = HP_FIRST_WORD; 249 + int loop; 250 + 251 + if (qm_fd_addr_get64(fd) != handler->addr) { 252 + pr_crit("bad frame address"); 253 + return -EIO; 254 + } 255 + for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { 256 + *p ^= handler->rx_mixer; 257 + if (*p != lfsr) { 258 + pr_crit("corrupt frame data"); 259 + return -EIO; 260 + } 261 + *p ^= handler->tx_mixer; 262 + lfsr = do_lfsr(lfsr); 263 + } 264 + return 0; 265 + } 266 + 267 + static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal, 268 + struct qman_fq *fq, 269 + const struct qm_dqrr_entry *dqrr) 270 + { 271 + struct hp_handler *handler = (struct hp_handler *)fq; 272 + 273 + if (process_frame_data(handler, &dqrr->fd)) { 274 + WARN_ON(1); 275 + goto skip; 276 + } 277 + if (qman_enqueue(&handler->tx, &dqrr->fd)) { 278 + pr_crit("qman_enqueue() failed"); 279 + WARN_ON(1); 280 + } 281 + skip: 282 + return qman_cb_dqrr_consume; 283 + } 284 + 285 + static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal, 286 + struct qman_fq *fq, 287 + const struct qm_dqrr_entry *dqrr) 288 + { 289 + struct hp_handler *handler = (struct hp_handler *)fq; 290 + 291 + process_frame_data(handler, &dqrr->fd); 292 + if (++loop_counter < HP_LOOPS) { 293 + if (qman_enqueue(&handler->tx, &dqrr->fd)) { 294 + pr_crit("qman_enqueue() failed"); 295 + WARN_ON(1); 296 + goto skip; 297 + } 298 + } else { 299 + pr_info("Received final (%dth) frame\n", loop_counter); 300 + wake_up(&queue); 301 + } 302 + skip: 303 + return qman_cb_dqrr_consume; 304 + } 305 + 306 + static int create_per_cpu_handlers(void) 307 + { 308 + struct hp_handler *handler; 309 + int loop; 310 + struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus); 311 + 312 + hp_cpu->processor_id = smp_processor_id(); 313 + spin_lock(&hp_lock); 314 + list_add_tail(&hp_cpu->node, &hp_cpu_list); 315 + hp_cpu_list_length++; 316 + spin_unlock(&hp_lock); 317 + INIT_LIST_HEAD(&hp_cpu->handlers); 318 + for (loop = 0; loop < HP_PER_CPU; loop++) { 319 + handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL); 320 + if (!handler) { 321 + pr_crit("kmem_cache_alloc() failed"); 322 + WARN_ON(1); 323 + return -EIO; 324 + } 325 + handler->processor_id = hp_cpu->processor_id; 326 + handler->addr = frame_dma; 327 + handler->frame_ptr = frame_ptr; 328 + list_add_tail(&handler->node, &hp_cpu->handlers); 329 + } 330 + return 0; 331 + } 332 + 333 + static int destroy_per_cpu_handlers(void) 334 + { 335 + struct list_head *loop, *tmp; 336 + struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus); 337 + 338 + spin_lock(&hp_lock); 339 + list_del(&hp_cpu->node); 340 + spin_unlock(&hp_lock); 341 + list_for_each_safe(loop, tmp, &hp_cpu->handlers) { 342 + u32 flags = 0; 343 + struct hp_handler *handler = list_entry(loop, struct hp_handler, 344 + node); 345 + if (qman_retire_fq(&handler->rx, &flags) || 346 + (flags & QMAN_FQ_STATE_BLOCKOOS)) { 347 + pr_crit("qman_retire_fq(rx) failed, flags: %x", flags); 348 + WARN_ON(1); 349 + return -EIO; 350 + } 351 + if (qman_oos_fq(&handler->rx)) { 352 + pr_crit("qman_oos_fq(rx) failed"); 353 + WARN_ON(1); 354 + return -EIO; 355 + } 356 + qman_destroy_fq(&handler->rx); 357 + qman_destroy_fq(&handler->tx); 358 + qman_release_fqid(handler->fqid_rx); 359 + list_del(&handler->node); 360 + kmem_cache_free(hp_handler_slab, handler); 361 + } 362 + return 0; 363 + } 364 + 365 + static inline u8 num_cachelines(u32 offset) 366 + { 367 + u8 res = (offset + (L1_CACHE_BYTES - 1)) 368 + / (L1_CACHE_BYTES); 369 + if (res > 3) 370 + return 3; 371 + return res; 372 + } 373 + #define STASH_DATA_CL \ 374 + num_cachelines(HP_NUM_WORDS * 4) 375 + #define STASH_CTX_CL \ 376 + num_cachelines(offsetof(struct hp_handler, fqid_rx)) 377 + 378 + static int init_handler(void *h) 379 + { 380 + struct qm_mcc_initfq opts; 381 + struct hp_handler *handler = h; 382 + int err; 383 + 384 + if (handler->processor_id != smp_processor_id()) { 385 + err = -EIO; 386 + goto failed; 387 + } 388 + /* Set up rx */ 389 + memset(&handler->rx, 0, sizeof(handler->rx)); 390 + if (handler == special_handler) 391 + handler->rx.cb.dqrr = special_dqrr; 392 + else 393 + handler->rx.cb.dqrr = normal_dqrr; 394 + err = qman_create_fq(handler->fqid_rx, 0, &handler->rx); 395 + if (err) { 396 + pr_crit("qman_create_fq(rx) failed"); 397 + goto failed; 398 + } 399 + memset(&opts, 0, sizeof(opts)); 400 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; 401 + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING; 402 + qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL); 403 + err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | 404 + QMAN_INITFQ_FLAG_LOCAL, &opts); 405 + if (err) { 406 + pr_crit("qman_init_fq(rx) failed"); 407 + goto failed; 408 + } 409 + /* Set up tx */ 410 + memset(&handler->tx, 0, sizeof(handler->tx)); 411 + err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY, 412 + &handler->tx); 413 + if (err) { 414 + pr_crit("qman_create_fq(tx) failed"); 415 + goto failed; 416 + } 417 + 418 + return 0; 419 + failed: 420 + return err; 421 + } 422 + 423 + static void init_handler_cb(void *h) 424 + { 425 + if (init_handler(h)) 426 + WARN_ON(1); 427 + } 428 + 429 + static int init_phase2(void) 430 + { 431 + int loop; 432 + u32 fqid = 0; 433 + u32 lfsr = 0xdeadbeef; 434 + struct hp_cpu *hp_cpu; 435 + struct hp_handler *handler; 436 + 437 + for (loop = 0; loop < HP_PER_CPU; loop++) { 438 + list_for_each_entry(hp_cpu, &hp_cpu_list, node) { 439 + int err; 440 + 441 + if (!loop) 442 + hp_cpu->iterator = list_first_entry( 443 + &hp_cpu->handlers, 444 + struct hp_handler, node); 445 + else 446 + hp_cpu->iterator = list_entry( 447 + hp_cpu->iterator->node.next, 448 + struct hp_handler, node); 449 + /* Rx FQID is the previous handler's Tx FQID */ 450 + hp_cpu->iterator->fqid_rx = fqid; 451 + /* Allocate new FQID for Tx */ 452 + err = qman_alloc_fqid(&fqid); 453 + if (err) { 454 + pr_crit("qman_alloc_fqid() failed"); 455 + return err; 456 + } 457 + hp_cpu->iterator->fqid_tx = fqid; 458 + /* Rx mixer is the previous handler's Tx mixer */ 459 + hp_cpu->iterator->rx_mixer = lfsr; 460 + /* Get new mixer for Tx */ 461 + lfsr = do_lfsr(lfsr); 462 + hp_cpu->iterator->tx_mixer = lfsr; 463 + } 464 + } 465 + /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */ 466 + hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node); 467 + handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node); 468 + if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef) 469 + return 1; 470 + handler->fqid_rx = fqid; 471 + handler->rx_mixer = lfsr; 472 + /* and tag it as our "special" handler */ 473 + special_handler = handler; 474 + return 0; 475 + } 476 + 477 + static int init_phase3(void) 478 + { 479 + int loop, err; 480 + struct hp_cpu *hp_cpu; 481 + 482 + for (loop = 0; loop < HP_PER_CPU; loop++) { 483 + list_for_each_entry(hp_cpu, &hp_cpu_list, node) { 484 + if (!loop) 485 + hp_cpu->iterator = list_first_entry( 486 + &hp_cpu->handlers, 487 + struct hp_handler, node); 488 + else 489 + hp_cpu->iterator = list_entry( 490 + hp_cpu->iterator->node.next, 491 + struct hp_handler, node); 492 + preempt_disable(); 493 + if (hp_cpu->processor_id == smp_processor_id()) { 494 + err = init_handler(hp_cpu->iterator); 495 + if (err) 496 + return err; 497 + } else { 498 + smp_call_function_single(hp_cpu->processor_id, 499 + init_handler_cb, hp_cpu->iterator, 1); 500 + } 501 + preempt_enable(); 502 + } 503 + } 504 + return 0; 505 + } 506 + 507 + static int send_first_frame(void *ignore) 508 + { 509 + u32 *p = special_handler->frame_ptr; 510 + u32 lfsr = HP_FIRST_WORD; 511 + int loop, err; 512 + struct qm_fd fd; 513 + 514 + if (special_handler->processor_id != smp_processor_id()) { 515 + err = -EIO; 516 + goto failed; 517 + } 518 + memset(&fd, 0, sizeof(fd)); 519 + qm_fd_addr_set64(&fd, special_handler->addr); 520 + qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4); 521 + for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { 522 + if (*p != lfsr) { 523 + err = -EIO; 524 + pr_crit("corrupt frame data"); 525 + goto failed; 526 + } 527 + *p ^= special_handler->tx_mixer; 528 + lfsr = do_lfsr(lfsr); 529 + } 530 + pr_info("Sending first frame\n"); 531 + err = qman_enqueue(&special_handler->tx, &fd); 532 + if (err) { 533 + pr_crit("qman_enqueue() failed"); 534 + goto failed; 535 + } 536 + 537 + return 0; 538 + failed: 539 + return err; 540 + } 541 + 542 + static void send_first_frame_cb(void *ignore) 543 + { 544 + if (send_first_frame(NULL)) 545 + WARN_ON(1); 546 + } 547 + 548 + int qman_test_stash(void) 549 + { 550 + int err; 551 + 552 + if (cpumask_weight(cpu_online_mask) < 2) { 553 + pr_info("%s(): skip - only 1 CPU\n", __func__); 554 + return 0; 555 + } 556 + 557 + pr_info("%s(): Starting\n", __func__); 558 + 559 + hp_cpu_list_length = 0; 560 + loop_counter = 0; 561 + hp_handler_slab = kmem_cache_create("hp_handler_slab", 562 + sizeof(struct hp_handler), L1_CACHE_BYTES, 563 + SLAB_HWCACHE_ALIGN, NULL); 564 + if (!hp_handler_slab) { 565 + err = -EIO; 566 + pr_crit("kmem_cache_create() failed"); 567 + goto failed; 568 + } 569 + 570 + err = allocate_frame_data(); 571 + if (err) 572 + goto failed; 573 + 574 + /* Init phase 1 */ 575 + pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU); 576 + if (on_all_cpus(create_per_cpu_handlers)) { 577 + err = -EIO; 578 + pr_crit("on_each_cpu() failed"); 579 + goto failed; 580 + } 581 + pr_info("Number of cpus: %d, total of %d handlers\n", 582 + hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU); 583 + 584 + err = init_phase2(); 585 + if (err) 586 + goto failed; 587 + 588 + err = init_phase3(); 589 + if (err) 590 + goto failed; 591 + 592 + preempt_disable(); 593 + if (special_handler->processor_id == smp_processor_id()) { 594 + err = send_first_frame(NULL); 595 + if (err) 596 + goto failed; 597 + } else { 598 + smp_call_function_single(special_handler->processor_id, 599 + send_first_frame_cb, NULL, 1); 600 + } 601 + preempt_enable(); 602 + 603 + wait_event(queue, loop_counter == HP_LOOPS); 604 + deallocate_frame_data(); 605 + if (on_all_cpus(destroy_per_cpu_handlers)) { 606 + err = -EIO; 607 + pr_crit("on_each_cpu() failed"); 608 + goto failed; 609 + } 610 + kmem_cache_destroy(hp_handler_slab); 611 + pr_info("%s(): Finished\n", __func__); 612 + 613 + return 0; 614 + failed: 615 + WARN_ON(1); 616 + return err; 617 + }