Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2015 Intel Corporation. All rights reserved.
8 * Copyright(c) 2017 T-Platforms. All Rights Reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * BSD LICENSE
15 *
16 * Copyright(c) 2015 Intel Corporation. All rights reserved.
17 * Copyright(c) 2017 T-Platforms. All Rights Reserved.
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 *
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
28 * distribution.
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 *
45 * PCIe NTB Perf Linux driver
46 */
47
48/*
49 * How to use this tool, by example.
50 *
51 * Assuming $DBG_DIR is something like:
52 * '/sys/kernel/debug/ntb_perf/0000:00:03.0'
53 * Suppose aside from local device there is at least one remote device
54 * connected to NTB with index 0.
55 *-----------------------------------------------------------------------------
56 * Eg: install driver with specified chunk/total orders and dma-enabled flag
57 *
58 * root@self# insmod ntb_perf.ko chunk_order=19 total_order=28 use_dma
59 *-----------------------------------------------------------------------------
60 * Eg: check NTB ports (index) and MW mapping information
61 *
62 * root@self# cat $DBG_DIR/info
63 *-----------------------------------------------------------------------------
64 * Eg: start performance test with peer (index 0) and get the test metrics
65 *
66 * root@self# echo 0 > $DBG_DIR/run
67 * root@self# cat $DBG_DIR/run
68 */
69
70#include <linux/init.h>
71#include <linux/kernel.h>
72#include <linux/module.h>
73#include <linux/sched.h>
74#include <linux/wait.h>
75#include <linux/dma-mapping.h>
76#include <linux/dmaengine.h>
77#include <linux/pci.h>
78#include <linux/ktime.h>
79#include <linux/slab.h>
80#include <linux/delay.h>
81#include <linux/sizes.h>
82#include <linux/workqueue.h>
83#include <linux/debugfs.h>
84#include <linux/random.h>
85#include <linux/ntb.h>
86
87#define DRIVER_NAME "ntb_perf"
88#define DRIVER_VERSION "2.0"
89
90MODULE_LICENSE("Dual BSD/GPL");
91MODULE_VERSION(DRIVER_VERSION);
92MODULE_AUTHOR("Dave Jiang <dave.jiang@intel.com>");
93MODULE_DESCRIPTION("PCIe NTB Performance Measurement Tool");
94
95#define MAX_THREADS_CNT 32
96#define DEF_THREADS_CNT 1
97#define MAX_CHUNK_SIZE SZ_1M
98#define MAX_CHUNK_ORDER 20 /* no larger than 1M */
99
100#define DMA_TRIES 100
101#define DMA_MDELAY 10
102
103#define MSG_TRIES 1000
104#define MSG_UDELAY_LOW 1000
105#define MSG_UDELAY_HIGH 2000
106
107#define PERF_BUF_LEN 1024
108
109static unsigned long max_mw_size;
110module_param(max_mw_size, ulong, 0644);
111MODULE_PARM_DESC(max_mw_size, "Upper limit of memory window size");
112
113static unsigned char chunk_order = 19; /* 512K */
114module_param(chunk_order, byte, 0644);
115MODULE_PARM_DESC(chunk_order, "Data chunk order [2^n] to transfer");
116
117static unsigned char total_order = 30; /* 1G */
118module_param(total_order, byte, 0644);
119MODULE_PARM_DESC(total_order, "Total data order [2^n] to transfer");
120
121static bool use_dma; /* default to 0 */
122module_param(use_dma, bool, 0644);
123MODULE_PARM_DESC(use_dma, "Use DMA engine to measure performance");
124
125/*==============================================================================
126 * Perf driver data definition
127 *==============================================================================
128 */
129
130enum perf_cmd {
131 PERF_CMD_INVAL = -1,/* invalid spad command */
132 PERF_CMD_SSIZE = 0, /* send out buffer size */
133 PERF_CMD_RSIZE = 1, /* recv in buffer size */
134 PERF_CMD_SXLAT = 2, /* send in buffer xlat */
135 PERF_CMD_RXLAT = 3, /* recv out buffer xlat */
136 PERF_CMD_CLEAR = 4, /* clear allocated memory */
137 PERF_STS_DONE = 5, /* init is done */
138 PERF_STS_LNKUP = 6, /* link up state flag */
139};
140
141struct perf_ctx;
142
143struct perf_peer {
144 struct perf_ctx *perf;
145 int pidx;
146 int gidx;
147
148 /* Outbound MW params */
149 u64 outbuf_xlat;
150 resource_size_t outbuf_size;
151 void __iomem *outbuf;
152 phys_addr_t out_phys_addr;
153 dma_addr_t dma_dst_addr;
154 /* Inbound MW params */
155 dma_addr_t inbuf_xlat;
156 resource_size_t inbuf_size;
157 void *inbuf;
158
159 /* NTB connection setup service */
160 struct work_struct service;
161 unsigned long sts;
162};
163#define to_peer_service(__work) \
164 container_of(__work, struct perf_peer, service)
165
166struct perf_thread {
167 struct perf_ctx *perf;
168 int tidx;
169
170 /* DMA-based test sync parameters */
171 atomic_t dma_sync;
172 wait_queue_head_t dma_wait;
173 struct dma_chan *dma_chan;
174
175 /* Data source and measured statistics */
176 void *src;
177 u64 copied;
178 ktime_t duration;
179 int status;
180 struct work_struct work;
181};
182#define to_thread_work(__work) \
183 container_of(__work, struct perf_thread, work)
184
185struct perf_ctx {
186 struct ntb_dev *ntb;
187
188 /* Global device index and peers descriptors */
189 int gidx;
190 int pcnt;
191 struct perf_peer *peers;
192
193 /* Performance measuring work-threads interface */
194 unsigned long busy_flag;
195 wait_queue_head_t twait;
196 atomic_t tsync;
197 u8 tcnt;
198 struct perf_peer *test_peer;
199 struct perf_thread threads[MAX_THREADS_CNT];
200
201 /* Scratchpad/Message IO operations */
202 int (*cmd_send)(struct perf_peer *peer, enum perf_cmd cmd, u64 data);
203 int (*cmd_recv)(struct perf_ctx *perf, int *pidx, enum perf_cmd *cmd,
204 u64 *data);
205
206 struct dentry *dbgfs_dir;
207};
208
209/*
210 * Scratchpads-base commands interface
211 */
212#define PERF_SPAD_CNT(_pcnt) \
213 (3*((_pcnt) + 1))
214#define PERF_SPAD_CMD(_gidx) \
215 (3*(_gidx))
216#define PERF_SPAD_LDATA(_gidx) \
217 (3*(_gidx) + 1)
218#define PERF_SPAD_HDATA(_gidx) \
219 (3*(_gidx) + 2)
220#define PERF_SPAD_NOTIFY(_gidx) \
221 (BIT_ULL(_gidx))
222
223/*
224 * Messages-base commands interface
225 */
226#define PERF_MSG_CNT 3
227#define PERF_MSG_CMD 0
228#define PERF_MSG_LDATA 1
229#define PERF_MSG_HDATA 2
230
231/*==============================================================================
232 * Static data declarations
233 *==============================================================================
234 */
235
236static struct dentry *perf_dbgfs_topdir;
237
238static struct workqueue_struct *perf_wq __read_mostly;
239
240/*==============================================================================
241 * NTB cross-link commands execution service
242 *==============================================================================
243 */
244
245static void perf_terminate_test(struct perf_ctx *perf);
246
247static inline bool perf_link_is_up(struct perf_peer *peer)
248{
249 u64 link;
250
251 link = ntb_link_is_up(peer->perf->ntb, NULL, NULL);
252 return !!(link & BIT_ULL_MASK(peer->pidx));
253}
254
255static int perf_spad_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
256 u64 data)
257{
258 struct perf_ctx *perf = peer->perf;
259 int try;
260 u32 sts;
261
262 dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data);
263
264 /*
265 * Perform predefined number of attempts before give up.
266 * We are sending the data to the port specific scratchpad, so
267 * to prevent a multi-port access race-condition. Additionally
268 * there is no need in local locking since only thread-safe
269 * service work is using this method.
270 */
271 for (try = 0; try < MSG_TRIES; try++) {
272 if (!perf_link_is_up(peer))
273 return -ENOLINK;
274
275 sts = ntb_peer_spad_read(perf->ntb, peer->pidx,
276 PERF_SPAD_CMD(perf->gidx));
277 if (sts != PERF_CMD_INVAL) {
278 usleep_range(MSG_UDELAY_LOW, MSG_UDELAY_HIGH);
279 continue;
280 }
281
282 ntb_peer_spad_write(perf->ntb, peer->pidx,
283 PERF_SPAD_LDATA(perf->gidx),
284 lower_32_bits(data));
285 ntb_peer_spad_write(perf->ntb, peer->pidx,
286 PERF_SPAD_HDATA(perf->gidx),
287 upper_32_bits(data));
288 ntb_peer_spad_write(perf->ntb, peer->pidx,
289 PERF_SPAD_CMD(perf->gidx),
290 cmd);
291 ntb_peer_db_set(perf->ntb, PERF_SPAD_NOTIFY(peer->gidx));
292
293 dev_dbg(&perf->ntb->dev, "DB ring peer %#llx\n",
294 PERF_SPAD_NOTIFY(peer->gidx));
295
296 break;
297 }
298
299 return try < MSG_TRIES ? 0 : -EAGAIN;
300}
301
302static int perf_spad_cmd_recv(struct perf_ctx *perf, int *pidx,
303 enum perf_cmd *cmd, u64 *data)
304{
305 struct perf_peer *peer;
306 u32 val;
307
308 ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
309
310 /*
311 * We start scanning all over, since cleared DB may have been set
312 * by any peer. Yes, it makes peer with smaller index being
313 * serviced with greater priority, but it's convenient for spad
314 * and message code unification and simplicity.
315 */
316 for (*pidx = 0; *pidx < perf->pcnt; (*pidx)++) {
317 peer = &perf->peers[*pidx];
318
319 if (!perf_link_is_up(peer))
320 continue;
321
322 val = ntb_spad_read(perf->ntb, PERF_SPAD_CMD(peer->gidx));
323 if (val == PERF_CMD_INVAL)
324 continue;
325
326 *cmd = val;
327
328 val = ntb_spad_read(perf->ntb, PERF_SPAD_LDATA(peer->gidx));
329 *data = val;
330
331 val = ntb_spad_read(perf->ntb, PERF_SPAD_HDATA(peer->gidx));
332 *data |= (u64)val << 32;
333
334 /* Next command can be retrieved from now */
335 ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx),
336 PERF_CMD_INVAL);
337
338 dev_dbg(&perf->ntb->dev, "CMD recv: %d 0x%llx\n", *cmd, *data);
339
340 return 0;
341 }
342
343 return -ENODATA;
344}
345
346static int perf_msg_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
347 u64 data)
348{
349 struct perf_ctx *perf = peer->perf;
350 int try, ret;
351 u64 outbits;
352
353 dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data);
354
355 /*
356 * Perform predefined number of attempts before give up. Message
357 * registers are free of race-condition problem when accessed
358 * from different ports, so we don't need splitting registers
359 * by global device index. We also won't have local locking,
360 * since the method is used from service work only.
361 */
362 outbits = ntb_msg_outbits(perf->ntb);
363 for (try = 0; try < MSG_TRIES; try++) {
364 if (!perf_link_is_up(peer))
365 return -ENOLINK;
366
367 ret = ntb_msg_clear_sts(perf->ntb, outbits);
368 if (ret)
369 return ret;
370
371 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_LDATA,
372 lower_32_bits(data));
373
374 if (ntb_msg_read_sts(perf->ntb) & outbits) {
375 usleep_range(MSG_UDELAY_LOW, MSG_UDELAY_HIGH);
376 continue;
377 }
378
379 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_HDATA,
380 upper_32_bits(data));
381
382 /* This call shall trigger peer message event */
383 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_CMD, cmd);
384
385 break;
386 }
387
388 return try < MSG_TRIES ? 0 : -EAGAIN;
389}
390
391static int perf_msg_cmd_recv(struct perf_ctx *perf, int *pidx,
392 enum perf_cmd *cmd, u64 *data)
393{
394 u64 inbits;
395 u32 val;
396
397 inbits = ntb_msg_inbits(perf->ntb);
398
399 if (hweight64(ntb_msg_read_sts(perf->ntb) & inbits) < 3)
400 return -ENODATA;
401
402 val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_CMD);
403 *cmd = val;
404
405 val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_LDATA);
406 *data = val;
407
408 val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_HDATA);
409 *data |= (u64)val << 32;
410
411 /* Next command can be retrieved from now */
412 ntb_msg_clear_sts(perf->ntb, inbits);
413
414 dev_dbg(&perf->ntb->dev, "CMD recv: %d 0x%llx\n", *cmd, *data);
415
416 return 0;
417}
418
419static int perf_cmd_send(struct perf_peer *peer, enum perf_cmd cmd, u64 data)
420{
421 struct perf_ctx *perf = peer->perf;
422
423 if (cmd == PERF_CMD_SSIZE || cmd == PERF_CMD_SXLAT)
424 return perf->cmd_send(peer, cmd, data);
425
426 dev_err(&perf->ntb->dev, "Send invalid command\n");
427 return -EINVAL;
428}
429
430static int perf_cmd_exec(struct perf_peer *peer, enum perf_cmd cmd)
431{
432 switch (cmd) {
433 case PERF_CMD_SSIZE:
434 case PERF_CMD_RSIZE:
435 case PERF_CMD_SXLAT:
436 case PERF_CMD_RXLAT:
437 case PERF_CMD_CLEAR:
438 break;
439 default:
440 dev_err(&peer->perf->ntb->dev, "Exec invalid command\n");
441 return -EINVAL;
442 }
443
444 /* No need of memory barrier, since bit ops have invernal lock */
445 set_bit(cmd, &peer->sts);
446
447 dev_dbg(&peer->perf->ntb->dev, "CMD exec: %d\n", cmd);
448
449 (void)queue_work(system_highpri_wq, &peer->service);
450
451 return 0;
452}
453
454static int perf_cmd_recv(struct perf_ctx *perf)
455{
456 struct perf_peer *peer;
457 int ret, pidx, cmd;
458 u64 data;
459
460 while (!(ret = perf->cmd_recv(perf, &pidx, &cmd, &data))) {
461 peer = &perf->peers[pidx];
462
463 switch (cmd) {
464 case PERF_CMD_SSIZE:
465 peer->inbuf_size = data;
466 return perf_cmd_exec(peer, PERF_CMD_RSIZE);
467 case PERF_CMD_SXLAT:
468 peer->outbuf_xlat = data;
469 return perf_cmd_exec(peer, PERF_CMD_RXLAT);
470 default:
471 dev_err(&perf->ntb->dev, "Recv invalid command\n");
472 return -EINVAL;
473 }
474 }
475
476 /* Return 0 if no data left to process, otherwise an error */
477 return ret == -ENODATA ? 0 : ret;
478}
479
480static void perf_link_event(void *ctx)
481{
482 struct perf_ctx *perf = ctx;
483 struct perf_peer *peer;
484 bool lnk_up;
485 int pidx;
486
487 for (pidx = 0; pidx < perf->pcnt; pidx++) {
488 peer = &perf->peers[pidx];
489
490 lnk_up = perf_link_is_up(peer);
491
492 if (lnk_up &&
493 !test_and_set_bit(PERF_STS_LNKUP, &peer->sts)) {
494 perf_cmd_exec(peer, PERF_CMD_SSIZE);
495 } else if (!lnk_up &&
496 test_and_clear_bit(PERF_STS_LNKUP, &peer->sts)) {
497 perf_cmd_exec(peer, PERF_CMD_CLEAR);
498 }
499 }
500}
501
502static void perf_db_event(void *ctx, int vec)
503{
504 struct perf_ctx *perf = ctx;
505
506 dev_dbg(&perf->ntb->dev, "DB vec %d mask %#llx bits %#llx\n", vec,
507 ntb_db_vector_mask(perf->ntb, vec), ntb_db_read(perf->ntb));
508
509 /* Just receive all available commands */
510 (void)perf_cmd_recv(perf);
511}
512
513static void perf_msg_event(void *ctx)
514{
515 struct perf_ctx *perf = ctx;
516
517 dev_dbg(&perf->ntb->dev, "Msg status bits %#llx\n",
518 ntb_msg_read_sts(perf->ntb));
519
520 /* Messages are only sent one-by-one */
521 (void)perf_cmd_recv(perf);
522}
523
524static const struct ntb_ctx_ops perf_ops = {
525 .link_event = perf_link_event,
526 .db_event = perf_db_event,
527 .msg_event = perf_msg_event
528};
529
530static void perf_free_outbuf(struct perf_peer *peer)
531{
532 (void)ntb_peer_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
533}
534
535static int perf_setup_outbuf(struct perf_peer *peer)
536{
537 struct perf_ctx *perf = peer->perf;
538 int ret;
539
540 /* Outbuf size can be unaligned due to custom max_mw_size */
541 ret = ntb_peer_mw_set_trans(perf->ntb, peer->pidx, peer->gidx,
542 peer->outbuf_xlat, peer->outbuf_size);
543 if (ret) {
544 dev_err(&perf->ntb->dev, "Failed to set outbuf translation\n");
545 return ret;
546 }
547
548 /* Initialization is finally done */
549 set_bit(PERF_STS_DONE, &peer->sts);
550
551 return 0;
552}
553
554static void perf_free_inbuf(struct perf_peer *peer)
555{
556 if (!peer->inbuf)
557 return;
558
559 (void)ntb_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
560 dma_free_coherent(&peer->perf->ntb->dev, peer->inbuf_size,
561 peer->inbuf, peer->inbuf_xlat);
562 peer->inbuf = NULL;
563}
564
565static int perf_setup_inbuf(struct perf_peer *peer)
566{
567 resource_size_t xlat_align, size_align, size_max;
568 struct perf_ctx *perf = peer->perf;
569 int ret;
570
571 /* Get inbound MW parameters */
572 ret = ntb_mw_get_align(perf->ntb, peer->pidx, perf->gidx,
573 &xlat_align, &size_align, &size_max);
574 if (ret) {
575 dev_err(&perf->ntb->dev, "Couldn't get inbuf restrictions\n");
576 return ret;
577 }
578
579 if (peer->inbuf_size > size_max) {
580 dev_err(&perf->ntb->dev, "Too big inbuf size %pa > %pa\n",
581 &peer->inbuf_size, &size_max);
582 return -EINVAL;
583 }
584
585 peer->inbuf_size = round_up(peer->inbuf_size, size_align);
586
587 perf_free_inbuf(peer);
588
589 peer->inbuf = dma_alloc_coherent(&perf->ntb->dev, peer->inbuf_size,
590 &peer->inbuf_xlat, GFP_KERNEL);
591 if (!peer->inbuf) {
592 dev_err(&perf->ntb->dev, "Failed to alloc inbuf of %pa\n",
593 &peer->inbuf_size);
594 return -ENOMEM;
595 }
596 if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) {
597 dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n");
598 goto err_free_inbuf;
599 }
600
601 ret = ntb_mw_set_trans(perf->ntb, peer->pidx, peer->gidx,
602 peer->inbuf_xlat, peer->inbuf_size);
603 if (ret) {
604 dev_err(&perf->ntb->dev, "Failed to set inbuf translation\n");
605 goto err_free_inbuf;
606 }
607
608 /*
609 * We submit inbuf xlat transmission cmd for execution here to follow
610 * the code architecture, even though this method is called from service
611 * work itself so the command will be executed right after it returns.
612 */
613 (void)perf_cmd_exec(peer, PERF_CMD_SXLAT);
614
615 return 0;
616
617err_free_inbuf:
618 perf_free_inbuf(peer);
619
620 return ret;
621}
622
623static void perf_service_work(struct work_struct *work)
624{
625 struct perf_peer *peer = to_peer_service(work);
626
627 if (test_and_clear_bit(PERF_CMD_SSIZE, &peer->sts))
628 perf_cmd_send(peer, PERF_CMD_SSIZE, peer->outbuf_size);
629
630 if (test_and_clear_bit(PERF_CMD_RSIZE, &peer->sts))
631 perf_setup_inbuf(peer);
632
633 if (test_and_clear_bit(PERF_CMD_SXLAT, &peer->sts))
634 perf_cmd_send(peer, PERF_CMD_SXLAT, peer->inbuf_xlat);
635
636 if (test_and_clear_bit(PERF_CMD_RXLAT, &peer->sts))
637 perf_setup_outbuf(peer);
638
639 if (test_and_clear_bit(PERF_CMD_CLEAR, &peer->sts)) {
640 clear_bit(PERF_STS_DONE, &peer->sts);
641 if (test_bit(0, &peer->perf->busy_flag) &&
642 peer == peer->perf->test_peer) {
643 dev_warn(&peer->perf->ntb->dev,
644 "Freeing while test on-fly\n");
645 perf_terminate_test(peer->perf);
646 }
647 perf_free_outbuf(peer);
648 perf_free_inbuf(peer);
649 }
650}
651
652static int perf_init_service(struct perf_ctx *perf)
653{
654 u64 mask;
655
656 if (ntb_peer_mw_count(perf->ntb) < perf->pcnt + 1) {
657 dev_err(&perf->ntb->dev, "Not enough memory windows\n");
658 return -EINVAL;
659 }
660
661 if (ntb_msg_count(perf->ntb) >= PERF_MSG_CNT) {
662 perf->cmd_send = perf_msg_cmd_send;
663 perf->cmd_recv = perf_msg_cmd_recv;
664
665 dev_dbg(&perf->ntb->dev, "Message service initialized\n");
666
667 return 0;
668 }
669
670 dev_dbg(&perf->ntb->dev, "Message service unsupported\n");
671
672 mask = GENMASK_ULL(perf->pcnt, 0);
673 if (ntb_spad_count(perf->ntb) >= PERF_SPAD_CNT(perf->pcnt) &&
674 (ntb_db_valid_mask(perf->ntb) & mask) == mask) {
675 perf->cmd_send = perf_spad_cmd_send;
676 perf->cmd_recv = perf_spad_cmd_recv;
677
678 dev_dbg(&perf->ntb->dev, "Scratchpad service initialized\n");
679
680 return 0;
681 }
682
683 dev_dbg(&perf->ntb->dev, "Scratchpad service unsupported\n");
684
685 dev_err(&perf->ntb->dev, "Command services unsupported\n");
686
687 return -EINVAL;
688}
689
690static int perf_enable_service(struct perf_ctx *perf)
691{
692 u64 mask, incmd_bit;
693 int ret, sidx, scnt;
694
695 mask = ntb_db_valid_mask(perf->ntb);
696 (void)ntb_db_set_mask(perf->ntb, mask);
697
698 ret = ntb_set_ctx(perf->ntb, perf, &perf_ops);
699 if (ret)
700 return ret;
701
702 if (perf->cmd_send == perf_msg_cmd_send) {
703 u64 inbits, outbits;
704
705 inbits = ntb_msg_inbits(perf->ntb);
706 outbits = ntb_msg_outbits(perf->ntb);
707 (void)ntb_msg_set_mask(perf->ntb, inbits | outbits);
708
709 incmd_bit = BIT_ULL(__ffs64(inbits));
710 ret = ntb_msg_clear_mask(perf->ntb, incmd_bit);
711
712 dev_dbg(&perf->ntb->dev, "MSG sts unmasked %#llx\n", incmd_bit);
713 } else {
714 scnt = ntb_spad_count(perf->ntb);
715 for (sidx = 0; sidx < scnt; sidx++)
716 ntb_spad_write(perf->ntb, sidx, PERF_CMD_INVAL);
717 incmd_bit = PERF_SPAD_NOTIFY(perf->gidx);
718 ret = ntb_db_clear_mask(perf->ntb, incmd_bit);
719
720 dev_dbg(&perf->ntb->dev, "DB bits unmasked %#llx\n", incmd_bit);
721 }
722 if (ret) {
723 ntb_clear_ctx(perf->ntb);
724 return ret;
725 }
726
727 ntb_link_enable(perf->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
728 /* Might be not necessary */
729 ntb_link_event(perf->ntb);
730
731 return 0;
732}
733
734static void perf_disable_service(struct perf_ctx *perf)
735{
736 int pidx;
737
738 if (perf->cmd_send == perf_msg_cmd_send) {
739 u64 inbits;
740
741 inbits = ntb_msg_inbits(perf->ntb);
742 (void)ntb_msg_set_mask(perf->ntb, inbits);
743 } else {
744 (void)ntb_db_set_mask(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
745 }
746
747 ntb_clear_ctx(perf->ntb);
748
749 for (pidx = 0; pidx < perf->pcnt; pidx++)
750 perf_cmd_exec(&perf->peers[pidx], PERF_CMD_CLEAR);
751
752 for (pidx = 0; pidx < perf->pcnt; pidx++)
753 flush_work(&perf->peers[pidx].service);
754
755 for (pidx = 0; pidx < perf->pcnt; pidx++) {
756 struct perf_peer *peer = &perf->peers[pidx];
757
758 ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx), 0);
759 }
760
761 ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
762
763 ntb_link_disable(perf->ntb);
764}
765
766/*==============================================================================
767 * Performance measuring work-thread
768 *==============================================================================
769 */
770
771static void perf_dma_copy_callback(void *data)
772{
773 struct perf_thread *pthr = data;
774
775 atomic_dec(&pthr->dma_sync);
776 wake_up(&pthr->dma_wait);
777}
778
779static int perf_copy_chunk(struct perf_thread *pthr,
780 void __iomem *dst, void *src, size_t len)
781{
782 struct dma_async_tx_descriptor *tx;
783 struct dmaengine_unmap_data *unmap;
784 struct device *dma_dev;
785 int try = 0, ret = 0;
786 struct perf_peer *peer = pthr->perf->test_peer;
787 void __iomem *vbase;
788 void __iomem *dst_vaddr;
789 dma_addr_t dst_dma_addr;
790
791 if (!use_dma) {
792 memcpy_toio(dst, src, len);
793 goto ret_check_tsync;
794 }
795
796 dma_dev = pthr->dma_chan->device->dev;
797
798 if (!is_dma_copy_aligned(pthr->dma_chan->device, offset_in_page(src),
799 offset_in_page(dst), len))
800 return -EIO;
801
802 vbase = peer->outbuf;
803 dst_vaddr = dst;
804 dst_dma_addr = peer->dma_dst_addr + (dst_vaddr - vbase);
805
806 unmap = dmaengine_get_unmap_data(dma_dev, 2, GFP_NOWAIT);
807 if (!unmap)
808 return -ENOMEM;
809
810 unmap->len = len;
811 unmap->addr[0] = dma_map_page(dma_dev, virt_to_page(src),
812 offset_in_page(src), len, DMA_TO_DEVICE);
813 if (dma_mapping_error(dma_dev, unmap->addr[0])) {
814 ret = -EIO;
815 goto err_free_resource;
816 }
817 unmap->to_cnt = 1;
818
819 unmap->addr[1] = dst_dma_addr;
820 if (dma_mapping_error(dma_dev, unmap->addr[1])) {
821 ret = -EIO;
822 goto err_free_resource;
823 }
824 unmap->from_cnt = 1;
825
826 do {
827 tx = dmaengine_prep_dma_memcpy(pthr->dma_chan, unmap->addr[1],
828 unmap->addr[0], len, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
829 if (!tx)
830 msleep(DMA_MDELAY);
831 } while (!tx && (try++ < DMA_TRIES));
832
833 if (!tx) {
834 ret = -EIO;
835 goto err_free_resource;
836 }
837
838 tx->callback = perf_dma_copy_callback;
839 tx->callback_param = pthr;
840 dma_set_unmap(tx, unmap);
841
842 ret = dma_submit_error(dmaengine_submit(tx));
843 if (ret) {
844 dmaengine_unmap_put(unmap);
845 goto err_free_resource;
846 }
847
848 dmaengine_unmap_put(unmap);
849
850 atomic_inc(&pthr->dma_sync);
851 dma_async_issue_pending(pthr->dma_chan);
852
853ret_check_tsync:
854 return likely(atomic_read(&pthr->perf->tsync) > 0) ? 0 : -EINTR;
855
856err_free_resource:
857 dmaengine_unmap_put(unmap);
858
859 return ret;
860}
861
862static bool perf_dma_filter(struct dma_chan *chan, void *data)
863{
864 struct perf_ctx *perf = data;
865 int node;
866
867 node = dev_to_node(&perf->ntb->dev);
868
869 return node == NUMA_NO_NODE || node == dev_to_node(chan->device->dev);
870}
871
872static int perf_init_test(struct perf_thread *pthr)
873{
874 struct perf_ctx *perf = pthr->perf;
875 dma_cap_mask_t dma_mask;
876 struct perf_peer *peer = pthr->perf->test_peer;
877
878 pthr->src = kmalloc_node(perf->test_peer->outbuf_size, GFP_KERNEL,
879 dev_to_node(&perf->ntb->dev));
880 if (!pthr->src)
881 return -ENOMEM;
882
883 get_random_bytes(pthr->src, perf->test_peer->outbuf_size);
884
885 if (!use_dma)
886 return 0;
887
888 dma_cap_zero(dma_mask);
889 dma_cap_set(DMA_MEMCPY, dma_mask);
890 pthr->dma_chan = dma_request_channel(dma_mask, perf_dma_filter, perf);
891 if (!pthr->dma_chan) {
892 dev_err(&perf->ntb->dev, "%d: Failed to get DMA channel\n",
893 pthr->tidx);
894 goto err_free;
895 }
896 peer->dma_dst_addr =
897 dma_map_resource(pthr->dma_chan->device->dev,
898 peer->out_phys_addr, peer->outbuf_size,
899 DMA_FROM_DEVICE, 0);
900 if (dma_mapping_error(pthr->dma_chan->device->dev,
901 peer->dma_dst_addr)) {
902 dev_err(pthr->dma_chan->device->dev, "%d: Failed to map DMA addr\n",
903 pthr->tidx);
904 peer->dma_dst_addr = 0;
905 dma_release_channel(pthr->dma_chan);
906 goto err_free;
907 }
908 dev_dbg(pthr->dma_chan->device->dev, "%d: Map MMIO %pa to DMA addr %pad\n",
909 pthr->tidx,
910 &peer->out_phys_addr,
911 &peer->dma_dst_addr);
912
913 atomic_set(&pthr->dma_sync, 0);
914 return 0;
915
916err_free:
917 atomic_dec(&perf->tsync);
918 wake_up(&perf->twait);
919 kfree(pthr->src);
920 return -ENODEV;
921}
922
923static int perf_run_test(struct perf_thread *pthr)
924{
925 struct perf_peer *peer = pthr->perf->test_peer;
926 struct perf_ctx *perf = pthr->perf;
927 void __iomem *flt_dst, *bnd_dst;
928 u64 total_size, chunk_size;
929 void *flt_src;
930 int ret = 0;
931
932 total_size = 1ULL << total_order;
933 chunk_size = 1ULL << chunk_order;
934 chunk_size = min_t(u64, peer->outbuf_size, chunk_size);
935
936 flt_src = pthr->src;
937 bnd_dst = peer->outbuf + peer->outbuf_size;
938 flt_dst = peer->outbuf;
939
940 pthr->duration = ktime_get();
941
942 /* Copied field is cleared on test launch stage */
943 while (pthr->copied < total_size) {
944 ret = perf_copy_chunk(pthr, flt_dst, flt_src, chunk_size);
945 if (ret) {
946 dev_err(&perf->ntb->dev, "%d: Got error %d on test\n",
947 pthr->tidx, ret);
948 return ret;
949 }
950
951 pthr->copied += chunk_size;
952
953 flt_dst += chunk_size;
954 flt_src += chunk_size;
955 if (flt_dst >= bnd_dst || flt_dst < peer->outbuf) {
956 flt_dst = peer->outbuf;
957 flt_src = pthr->src;
958 }
959
960 /* Give up CPU to give a chance for other threads to use it */
961 schedule();
962 }
963
964 return 0;
965}
966
967static int perf_sync_test(struct perf_thread *pthr)
968{
969 struct perf_ctx *perf = pthr->perf;
970
971 if (!use_dma)
972 goto no_dma_ret;
973
974 wait_event(pthr->dma_wait,
975 (atomic_read(&pthr->dma_sync) == 0 ||
976 atomic_read(&perf->tsync) < 0));
977
978 if (atomic_read(&perf->tsync) < 0)
979 return -EINTR;
980
981no_dma_ret:
982 pthr->duration = ktime_sub(ktime_get(), pthr->duration);
983
984 dev_dbg(&perf->ntb->dev, "%d: copied %llu bytes\n",
985 pthr->tidx, pthr->copied);
986
987 dev_dbg(&perf->ntb->dev, "%d: lasted %llu usecs\n",
988 pthr->tidx, ktime_to_us(pthr->duration));
989
990 dev_dbg(&perf->ntb->dev, "%d: %llu MBytes/s\n", pthr->tidx,
991 div64_u64(pthr->copied, ktime_to_us(pthr->duration)));
992
993 return 0;
994}
995
996static void perf_clear_test(struct perf_thread *pthr)
997{
998 struct perf_ctx *perf = pthr->perf;
999
1000 if (!use_dma)
1001 goto no_dma_notify;
1002
1003 /*
1004 * If test finished without errors, termination isn't needed.
1005 * We call it anyway just to be sure of the transfers completion.
1006 */
1007 (void)dmaengine_terminate_sync(pthr->dma_chan);
1008 if (pthr->perf->test_peer->dma_dst_addr)
1009 dma_unmap_resource(pthr->dma_chan->device->dev,
1010 pthr->perf->test_peer->dma_dst_addr,
1011 pthr->perf->test_peer->outbuf_size,
1012 DMA_FROM_DEVICE, 0);
1013 if (pthr->dma_chan)
1014 dma_release_channel(pthr->dma_chan);
1015
1016no_dma_notify:
1017 atomic_dec(&perf->tsync);
1018 wake_up(&perf->twait);
1019 kfree(pthr->src);
1020}
1021
1022static void perf_thread_work(struct work_struct *work)
1023{
1024 struct perf_thread *pthr = to_thread_work(work);
1025 int ret;
1026
1027 /*
1028 * Perform stages in compliance with use_dma flag value.
1029 * Test status is changed only if error happened, otherwise
1030 * status -ENODATA is kept while test is on-fly. Results
1031 * synchronization is performed only if test fininshed
1032 * without an error or interruption.
1033 */
1034 ret = perf_init_test(pthr);
1035 if (ret) {
1036 pthr->status = ret;
1037 return;
1038 }
1039
1040 ret = perf_run_test(pthr);
1041 if (ret) {
1042 pthr->status = ret;
1043 goto err_clear_test;
1044 }
1045
1046 pthr->status = perf_sync_test(pthr);
1047
1048err_clear_test:
1049 perf_clear_test(pthr);
1050}
1051
1052static int perf_set_tcnt(struct perf_ctx *perf, u8 tcnt)
1053{
1054 if (tcnt == 0 || tcnt > MAX_THREADS_CNT)
1055 return -EINVAL;
1056
1057 if (test_and_set_bit_lock(0, &perf->busy_flag))
1058 return -EBUSY;
1059
1060 perf->tcnt = tcnt;
1061
1062 clear_bit_unlock(0, &perf->busy_flag);
1063
1064 return 0;
1065}
1066
1067static void perf_terminate_test(struct perf_ctx *perf)
1068{
1069 int tidx;
1070
1071 atomic_set(&perf->tsync, -1);
1072 wake_up(&perf->twait);
1073
1074 for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
1075 wake_up(&perf->threads[tidx].dma_wait);
1076 cancel_work_sync(&perf->threads[tidx].work);
1077 }
1078}
1079
1080static int perf_submit_test(struct perf_peer *peer)
1081{
1082 struct perf_ctx *perf = peer->perf;
1083 struct perf_thread *pthr;
1084 int tidx, ret;
1085
1086 if (!test_bit(PERF_STS_DONE, &peer->sts))
1087 return -ENOLINK;
1088
1089 if (test_and_set_bit_lock(0, &perf->busy_flag))
1090 return -EBUSY;
1091
1092 perf->test_peer = peer;
1093 atomic_set(&perf->tsync, perf->tcnt);
1094
1095 for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
1096 pthr = &perf->threads[tidx];
1097
1098 pthr->status = -ENODATA;
1099 pthr->copied = 0;
1100 pthr->duration = ktime_set(0, 0);
1101 if (tidx < perf->tcnt)
1102 (void)queue_work(perf_wq, &pthr->work);
1103 }
1104
1105 ret = wait_event_interruptible(perf->twait,
1106 atomic_read(&perf->tsync) <= 0);
1107 if (ret == -ERESTARTSYS) {
1108 perf_terminate_test(perf);
1109 ret = -EINTR;
1110 }
1111
1112 clear_bit_unlock(0, &perf->busy_flag);
1113
1114 return ret;
1115}
1116
1117static int perf_read_stats(struct perf_ctx *perf, char *buf,
1118 size_t size, ssize_t *pos)
1119{
1120 struct perf_thread *pthr;
1121 int tidx;
1122
1123 if (test_and_set_bit_lock(0, &perf->busy_flag))
1124 return -EBUSY;
1125
1126 (*pos) += scnprintf(buf + *pos, size - *pos,
1127 " Peer %d test statistics:\n", perf->test_peer->pidx);
1128
1129 for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
1130 pthr = &perf->threads[tidx];
1131
1132 if (pthr->status == -ENODATA)
1133 continue;
1134
1135 if (pthr->status) {
1136 (*pos) += scnprintf(buf + *pos, size - *pos,
1137 "%d: error status %d\n", tidx, pthr->status);
1138 continue;
1139 }
1140
1141 (*pos) += scnprintf(buf + *pos, size - *pos,
1142 "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
1143 tidx, pthr->copied, ktime_to_us(pthr->duration),
1144 div64_u64(pthr->copied, ktime_to_us(pthr->duration)));
1145 }
1146
1147 clear_bit_unlock(0, &perf->busy_flag);
1148
1149 return 0;
1150}
1151
1152static void perf_init_threads(struct perf_ctx *perf)
1153{
1154 struct perf_thread *pthr;
1155 int tidx;
1156
1157 perf->tcnt = DEF_THREADS_CNT;
1158 perf->test_peer = &perf->peers[0];
1159 init_waitqueue_head(&perf->twait);
1160
1161 for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
1162 pthr = &perf->threads[tidx];
1163
1164 pthr->perf = perf;
1165 pthr->tidx = tidx;
1166 pthr->status = -ENODATA;
1167 init_waitqueue_head(&pthr->dma_wait);
1168 INIT_WORK(&pthr->work, perf_thread_work);
1169 }
1170}
1171
1172static void perf_clear_threads(struct perf_ctx *perf)
1173{
1174 perf_terminate_test(perf);
1175}
1176
1177/*==============================================================================
1178 * DebugFS nodes
1179 *==============================================================================
1180 */
1181
1182static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf,
1183 size_t size, loff_t *offp)
1184{
1185 struct perf_ctx *perf = filep->private_data;
1186 struct perf_peer *peer;
1187 size_t buf_size;
1188 ssize_t pos = 0;
1189 int ret, pidx;
1190 char *buf;
1191
1192 buf_size = min_t(size_t, size, 0x1000U);
1193
1194 buf = kmalloc(buf_size, GFP_KERNEL);
1195 if (!buf)
1196 return -ENOMEM;
1197
1198 pos += scnprintf(buf + pos, buf_size - pos,
1199 " Performance measuring tool info:\n\n");
1200
1201 pos += scnprintf(buf + pos, buf_size - pos,
1202 "Local port %d, Global index %d\n", ntb_port_number(perf->ntb),
1203 perf->gidx);
1204 pos += scnprintf(buf + pos, buf_size - pos, "Test status: ");
1205 if (test_bit(0, &perf->busy_flag)) {
1206 pos += scnprintf(buf + pos, buf_size - pos,
1207 "on-fly with port %d (%d)\n",
1208 ntb_peer_port_number(perf->ntb, perf->test_peer->pidx),
1209 perf->test_peer->pidx);
1210 } else {
1211 pos += scnprintf(buf + pos, buf_size - pos, "idle\n");
1212 }
1213
1214 for (pidx = 0; pidx < perf->pcnt; pidx++) {
1215 peer = &perf->peers[pidx];
1216
1217 pos += scnprintf(buf + pos, buf_size - pos,
1218 "Port %d (%d), Global index %d:\n",
1219 ntb_peer_port_number(perf->ntb, peer->pidx), peer->pidx,
1220 peer->gidx);
1221
1222 pos += scnprintf(buf + pos, buf_size - pos,
1223 "\tLink status: %s\n",
1224 test_bit(PERF_STS_LNKUP, &peer->sts) ? "up" : "down");
1225
1226 pos += scnprintf(buf + pos, buf_size - pos,
1227 "\tOut buffer addr 0x%pK\n", peer->outbuf);
1228
1229 pos += scnprintf(buf + pos, buf_size - pos,
1230 "\tOut buff phys addr %pa[p]\n", &peer->out_phys_addr);
1231
1232 pos += scnprintf(buf + pos, buf_size - pos,
1233 "\tOut buffer size %pa\n", &peer->outbuf_size);
1234
1235 pos += scnprintf(buf + pos, buf_size - pos,
1236 "\tOut buffer xlat 0x%016llx[p]\n", peer->outbuf_xlat);
1237
1238 if (!peer->inbuf) {
1239 pos += scnprintf(buf + pos, buf_size - pos,
1240 "\tIn buffer addr: unallocated\n");
1241 continue;
1242 }
1243
1244 pos += scnprintf(buf + pos, buf_size - pos,
1245 "\tIn buffer addr 0x%pK\n", peer->inbuf);
1246
1247 pos += scnprintf(buf + pos, buf_size - pos,
1248 "\tIn buffer size %pa\n", &peer->inbuf_size);
1249
1250 pos += scnprintf(buf + pos, buf_size - pos,
1251 "\tIn buffer xlat %pad[p]\n", &peer->inbuf_xlat);
1252 }
1253
1254 ret = simple_read_from_buffer(ubuf, size, offp, buf, pos);
1255 kfree(buf);
1256
1257 return ret;
1258}
1259
1260static const struct file_operations perf_dbgfs_info = {
1261 .open = simple_open,
1262 .read = perf_dbgfs_read_info
1263};
1264
1265static ssize_t perf_dbgfs_read_run(struct file *filep, char __user *ubuf,
1266 size_t size, loff_t *offp)
1267{
1268 struct perf_ctx *perf = filep->private_data;
1269 ssize_t ret, pos = 0;
1270 char *buf;
1271
1272 buf = kmalloc(PERF_BUF_LEN, GFP_KERNEL);
1273 if (!buf)
1274 return -ENOMEM;
1275
1276 ret = perf_read_stats(perf, buf, PERF_BUF_LEN, &pos);
1277 if (ret)
1278 goto err_free;
1279
1280 ret = simple_read_from_buffer(ubuf, size, offp, buf, pos);
1281err_free:
1282 kfree(buf);
1283
1284 return ret;
1285}
1286
1287static ssize_t perf_dbgfs_write_run(struct file *filep, const char __user *ubuf,
1288 size_t size, loff_t *offp)
1289{
1290 struct perf_ctx *perf = filep->private_data;
1291 struct perf_peer *peer;
1292 int pidx, ret;
1293
1294 ret = kstrtoint_from_user(ubuf, size, 0, &pidx);
1295 if (ret)
1296 return ret;
1297
1298 if (pidx < 0 || pidx >= perf->pcnt)
1299 return -EINVAL;
1300
1301 peer = &perf->peers[pidx];
1302
1303 ret = perf_submit_test(peer);
1304 if (ret)
1305 return ret;
1306
1307 return size;
1308}
1309
1310static const struct file_operations perf_dbgfs_run = {
1311 .open = simple_open,
1312 .read = perf_dbgfs_read_run,
1313 .write = perf_dbgfs_write_run
1314};
1315
1316static ssize_t perf_dbgfs_read_tcnt(struct file *filep, char __user *ubuf,
1317 size_t size, loff_t *offp)
1318{
1319 struct perf_ctx *perf = filep->private_data;
1320 char buf[8];
1321 ssize_t pos;
1322
1323 pos = scnprintf(buf, sizeof(buf), "%hhu\n", perf->tcnt);
1324
1325 return simple_read_from_buffer(ubuf, size, offp, buf, pos);
1326}
1327
1328static ssize_t perf_dbgfs_write_tcnt(struct file *filep,
1329 const char __user *ubuf,
1330 size_t size, loff_t *offp)
1331{
1332 struct perf_ctx *perf = filep->private_data;
1333 int ret;
1334 u8 val;
1335
1336 ret = kstrtou8_from_user(ubuf, size, 0, &val);
1337 if (ret)
1338 return ret;
1339
1340 ret = perf_set_tcnt(perf, val);
1341 if (ret)
1342 return ret;
1343
1344 return size;
1345}
1346
1347static const struct file_operations perf_dbgfs_tcnt = {
1348 .open = simple_open,
1349 .read = perf_dbgfs_read_tcnt,
1350 .write = perf_dbgfs_write_tcnt
1351};
1352
1353static void perf_setup_dbgfs(struct perf_ctx *perf)
1354{
1355 struct pci_dev *pdev = perf->ntb->pdev;
1356
1357 perf->dbgfs_dir = debugfs_create_dir(pci_name(pdev), perf_dbgfs_topdir);
1358 if (!perf->dbgfs_dir) {
1359 dev_warn(&perf->ntb->dev, "DebugFS unsupported\n");
1360 return;
1361 }
1362
1363 debugfs_create_file("info", 0600, perf->dbgfs_dir, perf,
1364 &perf_dbgfs_info);
1365
1366 debugfs_create_file("run", 0600, perf->dbgfs_dir, perf,
1367 &perf_dbgfs_run);
1368
1369 debugfs_create_file("threads_count", 0600, perf->dbgfs_dir, perf,
1370 &perf_dbgfs_tcnt);
1371
1372 /* They are made read-only for test exec safety and integrity */
1373 debugfs_create_u8("chunk_order", 0500, perf->dbgfs_dir, &chunk_order);
1374
1375 debugfs_create_u8("total_order", 0500, perf->dbgfs_dir, &total_order);
1376
1377 debugfs_create_bool("use_dma", 0500, perf->dbgfs_dir, &use_dma);
1378}
1379
1380static void perf_clear_dbgfs(struct perf_ctx *perf)
1381{
1382 debugfs_remove_recursive(perf->dbgfs_dir);
1383}
1384
1385/*==============================================================================
1386 * Basic driver initialization
1387 *==============================================================================
1388 */
1389
1390static struct perf_ctx *perf_create_data(struct ntb_dev *ntb)
1391{
1392 struct perf_ctx *perf;
1393
1394 perf = devm_kzalloc(&ntb->dev, sizeof(*perf), GFP_KERNEL);
1395 if (!perf)
1396 return ERR_PTR(-ENOMEM);
1397
1398 perf->pcnt = ntb_peer_port_count(ntb);
1399 perf->peers = devm_kcalloc(&ntb->dev, perf->pcnt, sizeof(*perf->peers),
1400 GFP_KERNEL);
1401 if (!perf->peers)
1402 return ERR_PTR(-ENOMEM);
1403
1404 perf->ntb = ntb;
1405
1406 return perf;
1407}
1408
1409static int perf_setup_peer_mw(struct perf_peer *peer)
1410{
1411 struct perf_ctx *perf = peer->perf;
1412 phys_addr_t phys_addr;
1413 int ret;
1414
1415 /* Get outbound MW parameters and map it */
1416 ret = ntb_peer_mw_get_addr(perf->ntb, perf->gidx, &phys_addr,
1417 &peer->outbuf_size);
1418 if (ret)
1419 return ret;
1420
1421 peer->outbuf = devm_ioremap_wc(&perf->ntb->dev, phys_addr,
1422 peer->outbuf_size);
1423 if (!peer->outbuf)
1424 return -ENOMEM;
1425
1426 peer->out_phys_addr = phys_addr;
1427
1428 if (max_mw_size && peer->outbuf_size > max_mw_size) {
1429 peer->outbuf_size = max_mw_size;
1430 dev_warn(&peer->perf->ntb->dev,
1431 "Peer %d outbuf reduced to %pa\n", peer->pidx,
1432 &peer->outbuf_size);
1433 }
1434
1435 return 0;
1436}
1437
1438static int perf_init_peers(struct perf_ctx *perf)
1439{
1440 struct perf_peer *peer;
1441 int pidx, lport, ret;
1442
1443 lport = ntb_port_number(perf->ntb);
1444 perf->gidx = -1;
1445 for (pidx = 0; pidx < perf->pcnt; pidx++) {
1446 peer = &perf->peers[pidx];
1447
1448 peer->perf = perf;
1449 peer->pidx = pidx;
1450 if (lport < ntb_peer_port_number(perf->ntb, pidx)) {
1451 if (perf->gidx == -1)
1452 perf->gidx = pidx;
1453 peer->gidx = pidx + 1;
1454 } else {
1455 peer->gidx = pidx;
1456 }
1457 INIT_WORK(&peer->service, perf_service_work);
1458 }
1459 if (perf->gidx == -1)
1460 perf->gidx = pidx;
1461
1462 for (pidx = 0; pidx < perf->pcnt; pidx++) {
1463 ret = perf_setup_peer_mw(&perf->peers[pidx]);
1464 if (ret)
1465 return ret;
1466 }
1467
1468 dev_dbg(&perf->ntb->dev, "Global port index %d\n", perf->gidx);
1469
1470 return 0;
1471}
1472
1473static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
1474{
1475 struct perf_ctx *perf;
1476 int ret;
1477
1478 perf = perf_create_data(ntb);
1479 if (IS_ERR(perf))
1480 return PTR_ERR(perf);
1481
1482 ret = perf_init_peers(perf);
1483 if (ret)
1484 return ret;
1485
1486 perf_init_threads(perf);
1487
1488 ret = perf_init_service(perf);
1489 if (ret)
1490 return ret;
1491
1492 ret = perf_enable_service(perf);
1493 if (ret)
1494 return ret;
1495
1496 perf_setup_dbgfs(perf);
1497
1498 return 0;
1499}
1500
1501static void perf_remove(struct ntb_client *client, struct ntb_dev *ntb)
1502{
1503 struct perf_ctx *perf = ntb->ctx;
1504
1505 perf_clear_dbgfs(perf);
1506
1507 perf_disable_service(perf);
1508
1509 perf_clear_threads(perf);
1510}
1511
1512static struct ntb_client perf_client = {
1513 .ops = {
1514 .probe = perf_probe,
1515 .remove = perf_remove
1516 }
1517};
1518
1519static int __init perf_init(void)
1520{
1521 int ret;
1522
1523 if (chunk_order > MAX_CHUNK_ORDER) {
1524 chunk_order = MAX_CHUNK_ORDER;
1525 pr_info("Chunk order reduced to %hhu\n", chunk_order);
1526 }
1527
1528 if (total_order < chunk_order) {
1529 total_order = chunk_order;
1530 pr_info("Total data order reduced to %hhu\n", total_order);
1531 }
1532
1533 perf_wq = alloc_workqueue("perf_wq", WQ_UNBOUND | WQ_SYSFS, 0);
1534 if (!perf_wq)
1535 return -ENOMEM;
1536
1537 if (debugfs_initialized())
1538 perf_dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1539
1540 ret = ntb_register_client(&perf_client);
1541 if (ret) {
1542 debugfs_remove_recursive(perf_dbgfs_topdir);
1543 destroy_workqueue(perf_wq);
1544 }
1545
1546 return ret;
1547}
1548module_init(perf_init);
1549
1550static void __exit perf_exit(void)
1551{
1552 ntb_unregister_client(&perf_client);
1553 debugfs_remove_recursive(perf_dbgfs_topdir);
1554 destroy_workqueue(perf_wq);
1555}
1556module_exit(perf_exit);
1557