at v4.20-rc7 1231 lines 32 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Loopback bridge driver for the Greybus loopback module. 4 * 5 * Copyright 2014 Google Inc. 6 * Copyright 2014 Linaro Ltd. 7 */ 8 9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11#include <linux/kernel.h> 12#include <linux/module.h> 13#include <linux/mutex.h> 14#include <linux/slab.h> 15#include <linux/kthread.h> 16#include <linux/delay.h> 17#include <linux/random.h> 18#include <linux/sizes.h> 19#include <linux/cdev.h> 20#include <linux/fs.h> 21#include <linux/kfifo.h> 22#include <linux/debugfs.h> 23#include <linux/list_sort.h> 24#include <linux/spinlock.h> 25#include <linux/workqueue.h> 26#include <linux/atomic.h> 27#include <linux/pm_runtime.h> 28 29#include <asm/div64.h> 30 31#include "greybus.h" 32#include "connection.h" 33 34#define NSEC_PER_DAY 86400000000000ULL 35 36struct gb_loopback_stats { 37 u32 min; 38 u32 max; 39 u64 sum; 40 u32 count; 41}; 42 43struct gb_loopback_device { 44 struct dentry *root; 45 u32 count; 46 size_t size_max; 47 48 /* We need to take a lock in atomic context */ 49 spinlock_t lock; 50 struct list_head list; 51 struct list_head list_op_async; 52 wait_queue_head_t wq; 53}; 54 55static struct gb_loopback_device gb_dev; 56 57struct gb_loopback_async_operation { 58 struct gb_loopback *gb; 59 struct gb_operation *operation; 60 ktime_t ts; 61 int (*completion)(struct gb_loopback_async_operation *op_async); 62}; 63 64struct gb_loopback { 65 struct gb_connection *connection; 66 67 struct dentry *file; 68 struct kfifo kfifo_lat; 69 struct mutex mutex; 70 struct task_struct *task; 71 struct list_head entry; 72 struct device *dev; 73 wait_queue_head_t wq; 74 wait_queue_head_t wq_completion; 75 atomic_t outstanding_operations; 76 77 /* Per connection stats */ 78 ktime_t ts; 79 struct gb_loopback_stats latency; 80 struct gb_loopback_stats throughput; 81 struct gb_loopback_stats requests_per_second; 82 struct gb_loopback_stats apbridge_unipro_latency; 83 struct gb_loopback_stats gbphy_firmware_latency; 84 85 int type; 86 int async; 87 int id; 88 u32 size; 89 u32 iteration_max; 90 u32 iteration_count; 91 int us_wait; 92 u32 error; 93 u32 requests_completed; 94 u32 requests_timedout; 95 u32 timeout; 96 u32 jiffy_timeout; 97 u32 timeout_min; 98 u32 timeout_max; 99 u32 outstanding_operations_max; 100 u64 elapsed_nsecs; 101 u32 apbridge_latency_ts; 102 u32 gbphy_latency_ts; 103 104 u32 send_count; 105}; 106 107static struct class loopback_class = { 108 .name = "gb_loopback", 109 .owner = THIS_MODULE, 110}; 111static DEFINE_IDA(loopback_ida); 112 113/* Min/max values in jiffies */ 114#define GB_LOOPBACK_TIMEOUT_MIN 1 115#define GB_LOOPBACK_TIMEOUT_MAX 10000 116 117#define GB_LOOPBACK_FIFO_DEFAULT 8192 118 119static unsigned int kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT; 120module_param(kfifo_depth, uint, 0444); 121 122/* Maximum size of any one send data buffer we support */ 123#define MAX_PACKET_SIZE (PAGE_SIZE * 2) 124 125#define GB_LOOPBACK_US_WAIT_MAX 1000000 126 127/* interface sysfs attributes */ 128#define gb_loopback_ro_attr(field) \ 129static ssize_t field##_show(struct device *dev, \ 130 struct device_attribute *attr, \ 131 char *buf) \ 132{ \ 133 struct gb_loopback *gb = dev_get_drvdata(dev); \ 134 return sprintf(buf, "%u\n", gb->field); \ 135} \ 136static DEVICE_ATTR_RO(field) 137 138#define gb_loopback_ro_stats_attr(name, field, type) \ 139static ssize_t name##_##field##_show(struct device *dev, \ 140 struct device_attribute *attr, \ 141 char *buf) \ 142{ \ 143 struct gb_loopback *gb = dev_get_drvdata(dev); \ 144 /* Report 0 for min and max if no transfer successed */ \ 145 if (!gb->requests_completed) \ 146 return sprintf(buf, "0\n"); \ 147 return sprintf(buf, "%"#type"\n", gb->name.field); \ 148} \ 149static DEVICE_ATTR_RO(name##_##field) 150 151#define gb_loopback_ro_avg_attr(name) \ 152static ssize_t name##_avg_show(struct device *dev, \ 153 struct device_attribute *attr, \ 154 char *buf) \ 155{ \ 156 struct gb_loopback_stats *stats; \ 157 struct gb_loopback *gb; \ 158 u64 avg, rem; \ 159 u32 count; \ 160 gb = dev_get_drvdata(dev); \ 161 stats = &gb->name; \ 162 count = stats->count ? stats->count : 1; \ 163 avg = stats->sum + count / 2000000; /* round closest */ \ 164 rem = do_div(avg, count); \ 165 rem *= 1000000; \ 166 do_div(rem, count); \ 167 return sprintf(buf, "%llu.%06u\n", avg, (u32)rem); \ 168} \ 169static DEVICE_ATTR_RO(name##_avg) 170 171#define gb_loopback_stats_attrs(field) \ 172 gb_loopback_ro_stats_attr(field, min, u); \ 173 gb_loopback_ro_stats_attr(field, max, u); \ 174 gb_loopback_ro_avg_attr(field) 175 176#define gb_loopback_attr(field, type) \ 177static ssize_t field##_show(struct device *dev, \ 178 struct device_attribute *attr, \ 179 char *buf) \ 180{ \ 181 struct gb_loopback *gb = dev_get_drvdata(dev); \ 182 return sprintf(buf, "%"#type"\n", gb->field); \ 183} \ 184static ssize_t field##_store(struct device *dev, \ 185 struct device_attribute *attr, \ 186 const char *buf, \ 187 size_t len) \ 188{ \ 189 int ret; \ 190 struct gb_loopback *gb = dev_get_drvdata(dev); \ 191 mutex_lock(&gb->mutex); \ 192 ret = sscanf(buf, "%"#type, &gb->field); \ 193 if (ret != 1) \ 194 len = -EINVAL; \ 195 else \ 196 gb_loopback_check_attr(gb, bundle); \ 197 mutex_unlock(&gb->mutex); \ 198 return len; \ 199} \ 200static DEVICE_ATTR_RW(field) 201 202#define gb_dev_loopback_ro_attr(field, conn) \ 203static ssize_t field##_show(struct device *dev, \ 204 struct device_attribute *attr, \ 205 char *buf) \ 206{ \ 207 struct gb_loopback *gb = dev_get_drvdata(dev); \ 208 return sprintf(buf, "%u\n", gb->field); \ 209} \ 210static DEVICE_ATTR_RO(field) 211 212#define gb_dev_loopback_rw_attr(field, type) \ 213static ssize_t field##_show(struct device *dev, \ 214 struct device_attribute *attr, \ 215 char *buf) \ 216{ \ 217 struct gb_loopback *gb = dev_get_drvdata(dev); \ 218 return sprintf(buf, "%"#type"\n", gb->field); \ 219} \ 220static ssize_t field##_store(struct device *dev, \ 221 struct device_attribute *attr, \ 222 const char *buf, \ 223 size_t len) \ 224{ \ 225 int ret; \ 226 struct gb_loopback *gb = dev_get_drvdata(dev); \ 227 mutex_lock(&gb->mutex); \ 228 ret = sscanf(buf, "%"#type, &gb->field); \ 229 if (ret != 1) \ 230 len = -EINVAL; \ 231 else \ 232 gb_loopback_check_attr(gb); \ 233 mutex_unlock(&gb->mutex); \ 234 return len; \ 235} \ 236static DEVICE_ATTR_RW(field) 237 238static void gb_loopback_reset_stats(struct gb_loopback *gb); 239static void gb_loopback_check_attr(struct gb_loopback *gb) 240{ 241 if (gb->us_wait > GB_LOOPBACK_US_WAIT_MAX) 242 gb->us_wait = GB_LOOPBACK_US_WAIT_MAX; 243 if (gb->size > gb_dev.size_max) 244 gb->size = gb_dev.size_max; 245 gb->requests_timedout = 0; 246 gb->requests_completed = 0; 247 gb->iteration_count = 0; 248 gb->send_count = 0; 249 gb->error = 0; 250 251 if (kfifo_depth < gb->iteration_max) { 252 dev_warn(gb->dev, 253 "cannot log bytes %u kfifo_depth %u\n", 254 gb->iteration_max, kfifo_depth); 255 } 256 kfifo_reset_out(&gb->kfifo_lat); 257 258 switch (gb->type) { 259 case GB_LOOPBACK_TYPE_PING: 260 case GB_LOOPBACK_TYPE_TRANSFER: 261 case GB_LOOPBACK_TYPE_SINK: 262 gb->jiffy_timeout = usecs_to_jiffies(gb->timeout); 263 if (!gb->jiffy_timeout) 264 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MIN; 265 else if (gb->jiffy_timeout > GB_LOOPBACK_TIMEOUT_MAX) 266 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MAX; 267 gb_loopback_reset_stats(gb); 268 wake_up(&gb->wq); 269 break; 270 default: 271 gb->type = 0; 272 break; 273 } 274} 275 276/* Time to send and receive one message */ 277gb_loopback_stats_attrs(latency); 278/* Number of requests sent per second on this cport */ 279gb_loopback_stats_attrs(requests_per_second); 280/* Quantity of data sent and received on this cport */ 281gb_loopback_stats_attrs(throughput); 282/* Latency across the UniPro link from APBridge's perspective */ 283gb_loopback_stats_attrs(apbridge_unipro_latency); 284/* Firmware induced overhead in the GPBridge */ 285gb_loopback_stats_attrs(gbphy_firmware_latency); 286 287/* Number of errors encountered during loop */ 288gb_loopback_ro_attr(error); 289/* Number of requests successfully completed async */ 290gb_loopback_ro_attr(requests_completed); 291/* Number of requests timed out async */ 292gb_loopback_ro_attr(requests_timedout); 293/* Timeout minimum in useconds */ 294gb_loopback_ro_attr(timeout_min); 295/* Timeout minimum in useconds */ 296gb_loopback_ro_attr(timeout_max); 297 298/* 299 * Type of loopback message to send based on protocol type definitions 300 * 0 => Don't send message 301 * 2 => Send ping message continuously (message without payload) 302 * 3 => Send transfer message continuously (message with payload, 303 * payload returned in response) 304 * 4 => Send a sink message (message with payload, no payload in response) 305 */ 306gb_dev_loopback_rw_attr(type, d); 307/* Size of transfer message payload: 0-4096 bytes */ 308gb_dev_loopback_rw_attr(size, u); 309/* Time to wait between two messages: 0-1000 ms */ 310gb_dev_loopback_rw_attr(us_wait, d); 311/* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */ 312gb_dev_loopback_rw_attr(iteration_max, u); 313/* The current index of the for (i = 0; i < iteration_max; i++) loop */ 314gb_dev_loopback_ro_attr(iteration_count, false); 315/* A flag to indicate synchronous or asynchronous operations */ 316gb_dev_loopback_rw_attr(async, u); 317/* Timeout of an individual asynchronous request */ 318gb_dev_loopback_rw_attr(timeout, u); 319/* Maximum number of in-flight operations before back-off */ 320gb_dev_loopback_rw_attr(outstanding_operations_max, u); 321 322static struct attribute *loopback_attrs[] = { 323 &dev_attr_latency_min.attr, 324 &dev_attr_latency_max.attr, 325 &dev_attr_latency_avg.attr, 326 &dev_attr_requests_per_second_min.attr, 327 &dev_attr_requests_per_second_max.attr, 328 &dev_attr_requests_per_second_avg.attr, 329 &dev_attr_throughput_min.attr, 330 &dev_attr_throughput_max.attr, 331 &dev_attr_throughput_avg.attr, 332 &dev_attr_apbridge_unipro_latency_min.attr, 333 &dev_attr_apbridge_unipro_latency_max.attr, 334 &dev_attr_apbridge_unipro_latency_avg.attr, 335 &dev_attr_gbphy_firmware_latency_min.attr, 336 &dev_attr_gbphy_firmware_latency_max.attr, 337 &dev_attr_gbphy_firmware_latency_avg.attr, 338 &dev_attr_type.attr, 339 &dev_attr_size.attr, 340 &dev_attr_us_wait.attr, 341 &dev_attr_iteration_count.attr, 342 &dev_attr_iteration_max.attr, 343 &dev_attr_async.attr, 344 &dev_attr_error.attr, 345 &dev_attr_requests_completed.attr, 346 &dev_attr_requests_timedout.attr, 347 &dev_attr_timeout.attr, 348 &dev_attr_outstanding_operations_max.attr, 349 &dev_attr_timeout_min.attr, 350 &dev_attr_timeout_max.attr, 351 NULL, 352}; 353ATTRIBUTE_GROUPS(loopback); 354 355static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error); 356 357static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs) 358{ 359 do_div(elapsed_nsecs, NSEC_PER_USEC); 360 return elapsed_nsecs; 361} 362 363static u64 __gb_loopback_calc_latency(u64 t1, u64 t2) 364{ 365 if (t2 > t1) 366 return t2 - t1; 367 else 368 return NSEC_PER_DAY - t2 + t1; 369} 370 371static u64 gb_loopback_calc_latency(ktime_t ts, ktime_t te) 372{ 373 return __gb_loopback_calc_latency(ktime_to_ns(ts), ktime_to_ns(te)); 374} 375 376static int gb_loopback_operation_sync(struct gb_loopback *gb, int type, 377 void *request, int request_size, 378 void *response, int response_size) 379{ 380 struct gb_operation *operation; 381 ktime_t ts, te; 382 int ret; 383 384 ts = ktime_get(); 385 operation = gb_operation_create(gb->connection, type, request_size, 386 response_size, GFP_KERNEL); 387 if (!operation) 388 return -ENOMEM; 389 390 if (request_size) 391 memcpy(operation->request->payload, request, request_size); 392 393 ret = gb_operation_request_send_sync(operation); 394 if (ret) { 395 dev_err(&gb->connection->bundle->dev, 396 "synchronous operation failed: %d\n", ret); 397 goto out_put_operation; 398 } else { 399 if (response_size == operation->response->payload_size) { 400 memcpy(response, operation->response->payload, 401 response_size); 402 } else { 403 dev_err(&gb->connection->bundle->dev, 404 "response size %zu expected %d\n", 405 operation->response->payload_size, 406 response_size); 407 ret = -EINVAL; 408 goto out_put_operation; 409 } 410 } 411 412 te = ktime_get(); 413 414 /* Calculate the total time the message took */ 415 gb->elapsed_nsecs = gb_loopback_calc_latency(ts, te); 416 417out_put_operation: 418 gb_operation_put(operation); 419 420 return ret; 421} 422 423static void gb_loopback_async_wait_all(struct gb_loopback *gb) 424{ 425 wait_event(gb->wq_completion, 426 !atomic_read(&gb->outstanding_operations)); 427} 428 429static void gb_loopback_async_operation_callback(struct gb_operation *operation) 430{ 431 struct gb_loopback_async_operation *op_async; 432 struct gb_loopback *gb; 433 ktime_t te; 434 int result; 435 436 te = ktime_get(); 437 result = gb_operation_result(operation); 438 op_async = gb_operation_get_data(operation); 439 gb = op_async->gb; 440 441 mutex_lock(&gb->mutex); 442 443 if (!result && op_async->completion) 444 result = op_async->completion(op_async); 445 446 if (!result) { 447 gb->elapsed_nsecs = gb_loopback_calc_latency(op_async->ts, te); 448 } else { 449 gb->error++; 450 if (result == -ETIMEDOUT) 451 gb->requests_timedout++; 452 } 453 454 gb->iteration_count++; 455 gb_loopback_calculate_stats(gb, result); 456 457 mutex_unlock(&gb->mutex); 458 459 dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n", 460 operation->id); 461 462 /* Wake up waiters */ 463 atomic_dec(&op_async->gb->outstanding_operations); 464 wake_up(&gb->wq_completion); 465 466 /* Release resources */ 467 gb_operation_put(operation); 468 kfree(op_async); 469} 470 471static int gb_loopback_async_operation(struct gb_loopback *gb, int type, 472 void *request, int request_size, 473 int response_size, 474 void *completion) 475{ 476 struct gb_loopback_async_operation *op_async; 477 struct gb_operation *operation; 478 int ret; 479 480 op_async = kzalloc(sizeof(*op_async), GFP_KERNEL); 481 if (!op_async) 482 return -ENOMEM; 483 484 operation = gb_operation_create(gb->connection, type, request_size, 485 response_size, GFP_KERNEL); 486 if (!operation) { 487 kfree(op_async); 488 return -ENOMEM; 489 } 490 491 if (request_size) 492 memcpy(operation->request->payload, request, request_size); 493 494 gb_operation_set_data(operation, op_async); 495 496 op_async->gb = gb; 497 op_async->operation = operation; 498 op_async->completion = completion; 499 500 op_async->ts = ktime_get(); 501 502 atomic_inc(&gb->outstanding_operations); 503 ret = gb_operation_request_send(operation, 504 gb_loopback_async_operation_callback, 505 jiffies_to_msecs(gb->jiffy_timeout), 506 GFP_KERNEL); 507 if (ret) { 508 atomic_dec(&gb->outstanding_operations); 509 gb_operation_put(operation); 510 kfree(op_async); 511 } 512 return ret; 513} 514 515static int gb_loopback_sync_sink(struct gb_loopback *gb, u32 len) 516{ 517 struct gb_loopback_transfer_request *request; 518 int retval; 519 520 request = kmalloc(len + sizeof(*request), GFP_KERNEL); 521 if (!request) 522 return -ENOMEM; 523 524 request->len = cpu_to_le32(len); 525 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK, 526 request, len + sizeof(*request), 527 NULL, 0); 528 kfree(request); 529 return retval; 530} 531 532static int gb_loopback_sync_transfer(struct gb_loopback *gb, u32 len) 533{ 534 struct gb_loopback_transfer_request *request; 535 struct gb_loopback_transfer_response *response; 536 int retval; 537 538 gb->apbridge_latency_ts = 0; 539 gb->gbphy_latency_ts = 0; 540 541 request = kmalloc(len + sizeof(*request), GFP_KERNEL); 542 if (!request) 543 return -ENOMEM; 544 response = kmalloc(len + sizeof(*response), GFP_KERNEL); 545 if (!response) { 546 kfree(request); 547 return -ENOMEM; 548 } 549 550 memset(request->data, 0x5A, len); 551 552 request->len = cpu_to_le32(len); 553 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER, 554 request, len + sizeof(*request), 555 response, len + sizeof(*response)); 556 if (retval) 557 goto gb_error; 558 559 if (memcmp(request->data, response->data, len)) { 560 dev_err(&gb->connection->bundle->dev, 561 "Loopback Data doesn't match\n"); 562 retval = -EREMOTEIO; 563 } 564 gb->apbridge_latency_ts = (u32)__le32_to_cpu(response->reserved0); 565 gb->gbphy_latency_ts = (u32)__le32_to_cpu(response->reserved1); 566 567gb_error: 568 kfree(request); 569 kfree(response); 570 571 return retval; 572} 573 574static int gb_loopback_sync_ping(struct gb_loopback *gb) 575{ 576 return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING, 577 NULL, 0, NULL, 0); 578} 579 580static int gb_loopback_async_sink(struct gb_loopback *gb, u32 len) 581{ 582 struct gb_loopback_transfer_request *request; 583 int retval; 584 585 request = kmalloc(len + sizeof(*request), GFP_KERNEL); 586 if (!request) 587 return -ENOMEM; 588 589 request->len = cpu_to_le32(len); 590 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_SINK, 591 request, len + sizeof(*request), 592 0, NULL); 593 kfree(request); 594 return retval; 595} 596 597static int gb_loopback_async_transfer_complete( 598 struct gb_loopback_async_operation *op_async) 599{ 600 struct gb_loopback *gb; 601 struct gb_operation *operation; 602 struct gb_loopback_transfer_request *request; 603 struct gb_loopback_transfer_response *response; 604 size_t len; 605 int retval = 0; 606 607 gb = op_async->gb; 608 operation = op_async->operation; 609 request = operation->request->payload; 610 response = operation->response->payload; 611 len = le32_to_cpu(request->len); 612 613 if (memcmp(request->data, response->data, len)) { 614 dev_err(&gb->connection->bundle->dev, 615 "Loopback Data doesn't match operation id %d\n", 616 operation->id); 617 retval = -EREMOTEIO; 618 } else { 619 gb->apbridge_latency_ts = 620 (u32)__le32_to_cpu(response->reserved0); 621 gb->gbphy_latency_ts = 622 (u32)__le32_to_cpu(response->reserved1); 623 } 624 625 return retval; 626} 627 628static int gb_loopback_async_transfer(struct gb_loopback *gb, u32 len) 629{ 630 struct gb_loopback_transfer_request *request; 631 int retval, response_len; 632 633 request = kmalloc(len + sizeof(*request), GFP_KERNEL); 634 if (!request) 635 return -ENOMEM; 636 637 memset(request->data, 0x5A, len); 638 639 request->len = cpu_to_le32(len); 640 response_len = sizeof(struct gb_loopback_transfer_response); 641 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_TRANSFER, 642 request, len + sizeof(*request), 643 len + response_len, 644 gb_loopback_async_transfer_complete); 645 if (retval) 646 goto gb_error; 647 648gb_error: 649 kfree(request); 650 return retval; 651} 652 653static int gb_loopback_async_ping(struct gb_loopback *gb) 654{ 655 return gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_PING, 656 NULL, 0, 0, NULL); 657} 658 659static int gb_loopback_request_handler(struct gb_operation *operation) 660{ 661 struct gb_connection *connection = operation->connection; 662 struct gb_loopback_transfer_request *request; 663 struct gb_loopback_transfer_response *response; 664 struct device *dev = &connection->bundle->dev; 665 size_t len; 666 667 /* By convention, the AP initiates the version operation */ 668 switch (operation->type) { 669 case GB_LOOPBACK_TYPE_PING: 670 case GB_LOOPBACK_TYPE_SINK: 671 return 0; 672 case GB_LOOPBACK_TYPE_TRANSFER: 673 if (operation->request->payload_size < sizeof(*request)) { 674 dev_err(dev, "transfer request too small (%zu < %zu)\n", 675 operation->request->payload_size, 676 sizeof(*request)); 677 return -EINVAL; /* -EMSGSIZE */ 678 } 679 request = operation->request->payload; 680 len = le32_to_cpu(request->len); 681 if (len > gb_dev.size_max) { 682 dev_err(dev, "transfer request too large (%zu > %zu)\n", 683 len, gb_dev.size_max); 684 return -EINVAL; 685 } 686 687 if (!gb_operation_response_alloc(operation, 688 len + sizeof(*response), GFP_KERNEL)) { 689 dev_err(dev, "error allocating response\n"); 690 return -ENOMEM; 691 } 692 response = operation->response->payload; 693 response->len = cpu_to_le32(len); 694 if (len) 695 memcpy(response->data, request->data, len); 696 697 return 0; 698 default: 699 dev_err(dev, "unsupported request: %u\n", operation->type); 700 return -EINVAL; 701 } 702} 703 704static void gb_loopback_reset_stats(struct gb_loopback *gb) 705{ 706 struct gb_loopback_stats reset = { 707 .min = U32_MAX, 708 }; 709 710 /* Reset per-connection stats */ 711 memcpy(&gb->latency, &reset, 712 sizeof(struct gb_loopback_stats)); 713 memcpy(&gb->throughput, &reset, 714 sizeof(struct gb_loopback_stats)); 715 memcpy(&gb->requests_per_second, &reset, 716 sizeof(struct gb_loopback_stats)); 717 memcpy(&gb->apbridge_unipro_latency, &reset, 718 sizeof(struct gb_loopback_stats)); 719 memcpy(&gb->gbphy_firmware_latency, &reset, 720 sizeof(struct gb_loopback_stats)); 721 722 /* Should be initialized at least once per transaction set */ 723 gb->apbridge_latency_ts = 0; 724 gb->gbphy_latency_ts = 0; 725 gb->ts = ktime_set(0, 0); 726} 727 728static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val) 729{ 730 if (stats->min > val) 731 stats->min = val; 732 if (stats->max < val) 733 stats->max = val; 734 stats->sum += val; 735 stats->count++; 736} 737 738static void gb_loopback_update_stats_window(struct gb_loopback_stats *stats, 739 u64 val, u32 count) 740{ 741 stats->sum += val; 742 stats->count += count; 743 744 do_div(val, count); 745 if (stats->min > val) 746 stats->min = val; 747 if (stats->max < val) 748 stats->max = val; 749} 750 751static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency) 752{ 753 u64 req = gb->requests_completed * USEC_PER_SEC; 754 755 gb_loopback_update_stats_window(&gb->requests_per_second, req, latency); 756} 757 758static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency) 759{ 760 u64 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2; 761 762 switch (gb->type) { 763 case GB_LOOPBACK_TYPE_PING: 764 break; 765 case GB_LOOPBACK_TYPE_SINK: 766 aggregate_size += sizeof(struct gb_loopback_transfer_request) + 767 gb->size; 768 break; 769 case GB_LOOPBACK_TYPE_TRANSFER: 770 aggregate_size += sizeof(struct gb_loopback_transfer_request) + 771 sizeof(struct gb_loopback_transfer_response) + 772 gb->size * 2; 773 break; 774 default: 775 return; 776 } 777 778 aggregate_size *= gb->requests_completed; 779 aggregate_size *= USEC_PER_SEC; 780 gb_loopback_update_stats_window(&gb->throughput, aggregate_size, 781 latency); 782} 783 784static void gb_loopback_calculate_latency_stats(struct gb_loopback *gb) 785{ 786 u32 lat; 787 788 /* Express latency in terms of microseconds */ 789 lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs); 790 791 /* Log latency stastic */ 792 gb_loopback_update_stats(&gb->latency, lat); 793 794 /* Raw latency log on a per thread basis */ 795 kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat)); 796 797 /* Log the firmware supplied latency values */ 798 gb_loopback_update_stats(&gb->apbridge_unipro_latency, 799 gb->apbridge_latency_ts); 800 gb_loopback_update_stats(&gb->gbphy_firmware_latency, 801 gb->gbphy_latency_ts); 802} 803 804static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error) 805{ 806 u64 nlat; 807 u32 lat; 808 ktime_t te; 809 810 if (!error) { 811 gb->requests_completed++; 812 gb_loopback_calculate_latency_stats(gb); 813 } 814 815 te = ktime_get(); 816 nlat = gb_loopback_calc_latency(gb->ts, te); 817 if (nlat >= NSEC_PER_SEC || gb->iteration_count == gb->iteration_max) { 818 lat = gb_loopback_nsec_to_usec_latency(nlat); 819 820 gb_loopback_throughput_update(gb, lat); 821 gb_loopback_requests_update(gb, lat); 822 823 if (gb->iteration_count != gb->iteration_max) { 824 gb->ts = te; 825 gb->requests_completed = 0; 826 } 827 } 828} 829 830static void gb_loopback_async_wait_to_send(struct gb_loopback *gb) 831{ 832 if (!(gb->async && gb->outstanding_operations_max)) 833 return; 834 wait_event_interruptible(gb->wq_completion, 835 (atomic_read(&gb->outstanding_operations) < 836 gb->outstanding_operations_max) || 837 kthread_should_stop()); 838} 839 840static int gb_loopback_fn(void *data) 841{ 842 int error = 0; 843 int us_wait = 0; 844 int type; 845 int ret; 846 u32 size; 847 848 struct gb_loopback *gb = data; 849 struct gb_bundle *bundle = gb->connection->bundle; 850 851 ret = gb_pm_runtime_get_sync(bundle); 852 if (ret) 853 return ret; 854 855 while (1) { 856 if (!gb->type) { 857 gb_pm_runtime_put_autosuspend(bundle); 858 wait_event_interruptible(gb->wq, gb->type || 859 kthread_should_stop()); 860 ret = gb_pm_runtime_get_sync(bundle); 861 if (ret) 862 return ret; 863 } 864 865 if (kthread_should_stop()) 866 break; 867 868 /* Limit the maximum number of in-flight async operations */ 869 gb_loopback_async_wait_to_send(gb); 870 if (kthread_should_stop()) 871 break; 872 873 mutex_lock(&gb->mutex); 874 875 /* Optionally terminate */ 876 if (gb->send_count == gb->iteration_max) { 877 mutex_unlock(&gb->mutex); 878 879 /* Wait for synchronous and asynchronus completion */ 880 gb_loopback_async_wait_all(gb); 881 882 /* Mark complete unless user-space has poked us */ 883 mutex_lock(&gb->mutex); 884 if (gb->iteration_count == gb->iteration_max) { 885 gb->type = 0; 886 gb->send_count = 0; 887 sysfs_notify(&gb->dev->kobj, NULL, 888 "iteration_count"); 889 dev_dbg(&bundle->dev, "load test complete\n"); 890 } else { 891 dev_dbg(&bundle->dev, 892 "continuing on with new test set\n"); 893 } 894 mutex_unlock(&gb->mutex); 895 continue; 896 } 897 size = gb->size; 898 us_wait = gb->us_wait; 899 type = gb->type; 900 if (ktime_to_ns(gb->ts) == 0) 901 gb->ts = ktime_get(); 902 903 /* Else operations to perform */ 904 if (gb->async) { 905 if (type == GB_LOOPBACK_TYPE_PING) 906 error = gb_loopback_async_ping(gb); 907 else if (type == GB_LOOPBACK_TYPE_TRANSFER) 908 error = gb_loopback_async_transfer(gb, size); 909 else if (type == GB_LOOPBACK_TYPE_SINK) 910 error = gb_loopback_async_sink(gb, size); 911 912 if (error) { 913 gb->error++; 914 gb->iteration_count++; 915 } 916 } else { 917 /* We are effectively single threaded here */ 918 if (type == GB_LOOPBACK_TYPE_PING) 919 error = gb_loopback_sync_ping(gb); 920 else if (type == GB_LOOPBACK_TYPE_TRANSFER) 921 error = gb_loopback_sync_transfer(gb, size); 922 else if (type == GB_LOOPBACK_TYPE_SINK) 923 error = gb_loopback_sync_sink(gb, size); 924 925 if (error) 926 gb->error++; 927 gb->iteration_count++; 928 gb_loopback_calculate_stats(gb, !!error); 929 } 930 gb->send_count++; 931 mutex_unlock(&gb->mutex); 932 933 if (us_wait) { 934 if (us_wait < 20000) 935 usleep_range(us_wait, us_wait + 100); 936 else 937 msleep(us_wait / 1000); 938 } 939 } 940 941 gb_pm_runtime_put_autosuspend(bundle); 942 943 return 0; 944} 945 946static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s, 947 struct kfifo *kfifo, 948 struct mutex *mutex) 949{ 950 u32 latency; 951 int retval; 952 953 if (kfifo_len(kfifo) == 0) { 954 retval = -EAGAIN; 955 goto done; 956 } 957 958 mutex_lock(mutex); 959 retval = kfifo_out(kfifo, &latency, sizeof(latency)); 960 if (retval > 0) { 961 seq_printf(s, "%u", latency); 962 retval = 0; 963 } 964 mutex_unlock(mutex); 965done: 966 return retval; 967} 968 969static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused) 970{ 971 struct gb_loopback *gb = s->private; 972 973 return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat, 974 &gb->mutex); 975} 976 977static int gb_loopback_latency_open(struct inode *inode, struct file *file) 978{ 979 return single_open(file, gb_loopback_dbgfs_latency_show, 980 inode->i_private); 981} 982 983static const struct file_operations gb_loopback_debugfs_latency_ops = { 984 .open = gb_loopback_latency_open, 985 .read = seq_read, 986 .llseek = seq_lseek, 987 .release = single_release, 988}; 989 990static int gb_loopback_bus_id_compare(void *priv, struct list_head *lha, 991 struct list_head *lhb) 992{ 993 struct gb_loopback *a = list_entry(lha, struct gb_loopback, entry); 994 struct gb_loopback *b = list_entry(lhb, struct gb_loopback, entry); 995 struct gb_connection *ca = a->connection; 996 struct gb_connection *cb = b->connection; 997 998 if (ca->bundle->intf->interface_id < cb->bundle->intf->interface_id) 999 return -1; 1000 if (cb->bundle->intf->interface_id < ca->bundle->intf->interface_id) 1001 return 1; 1002 if (ca->bundle->id < cb->bundle->id) 1003 return -1; 1004 if (cb->bundle->id < ca->bundle->id) 1005 return 1; 1006 if (ca->intf_cport_id < cb->intf_cport_id) 1007 return -1; 1008 else if (cb->intf_cport_id < ca->intf_cport_id) 1009 return 1; 1010 1011 return 0; 1012} 1013 1014static void gb_loopback_insert_id(struct gb_loopback *gb) 1015{ 1016 /* perform an insertion sort */ 1017 list_add_tail(&gb->entry, &gb_dev.list); 1018 list_sort(NULL, &gb_dev.list, gb_loopback_bus_id_compare); 1019} 1020 1021#define DEBUGFS_NAMELEN 32 1022 1023static int gb_loopback_probe(struct gb_bundle *bundle, 1024 const struct greybus_bundle_id *id) 1025{ 1026 struct greybus_descriptor_cport *cport_desc; 1027 struct gb_connection *connection; 1028 struct gb_loopback *gb; 1029 struct device *dev; 1030 int retval; 1031 char name[DEBUGFS_NAMELEN]; 1032 unsigned long flags; 1033 1034 if (bundle->num_cports != 1) 1035 return -ENODEV; 1036 1037 cport_desc = &bundle->cport_desc[0]; 1038 if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOOPBACK) 1039 return -ENODEV; 1040 1041 gb = kzalloc(sizeof(*gb), GFP_KERNEL); 1042 if (!gb) 1043 return -ENOMEM; 1044 1045 connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id), 1046 gb_loopback_request_handler); 1047 if (IS_ERR(connection)) { 1048 retval = PTR_ERR(connection); 1049 goto out_kzalloc; 1050 } 1051 1052 gb->connection = connection; 1053 greybus_set_drvdata(bundle, gb); 1054 1055 init_waitqueue_head(&gb->wq); 1056 init_waitqueue_head(&gb->wq_completion); 1057 atomic_set(&gb->outstanding_operations, 0); 1058 gb_loopback_reset_stats(gb); 1059 1060 /* Reported values to user-space for min/max timeouts */ 1061 gb->timeout_min = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN); 1062 gb->timeout_max = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX); 1063 1064 if (!gb_dev.count) { 1065 /* Calculate maximum payload */ 1066 gb_dev.size_max = gb_operation_get_payload_size_max(connection); 1067 if (gb_dev.size_max <= 1068 sizeof(struct gb_loopback_transfer_request)) { 1069 retval = -EINVAL; 1070 goto out_connection_destroy; 1071 } 1072 gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request); 1073 } 1074 1075 /* Create per-connection sysfs and debugfs data-points */ 1076 snprintf(name, sizeof(name), "raw_latency_%s", 1077 dev_name(&connection->bundle->dev)); 1078 gb->file = debugfs_create_file(name, S_IFREG | 0444, gb_dev.root, gb, 1079 &gb_loopback_debugfs_latency_ops); 1080 1081 gb->id = ida_simple_get(&loopback_ida, 0, 0, GFP_KERNEL); 1082 if (gb->id < 0) { 1083 retval = gb->id; 1084 goto out_debugfs_remove; 1085 } 1086 1087 retval = gb_connection_enable(connection); 1088 if (retval) 1089 goto out_ida_remove; 1090 1091 dev = device_create_with_groups(&loopback_class, 1092 &connection->bundle->dev, 1093 MKDEV(0, 0), gb, loopback_groups, 1094 "gb_loopback%d", gb->id); 1095 if (IS_ERR(dev)) { 1096 retval = PTR_ERR(dev); 1097 goto out_connection_disable; 1098 } 1099 gb->dev = dev; 1100 1101 /* Allocate kfifo */ 1102 if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32), 1103 GFP_KERNEL)) { 1104 retval = -ENOMEM; 1105 goto out_conn; 1106 } 1107 /* Fork worker thread */ 1108 mutex_init(&gb->mutex); 1109 gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback"); 1110 if (IS_ERR(gb->task)) { 1111 retval = PTR_ERR(gb->task); 1112 goto out_kfifo; 1113 } 1114 1115 spin_lock_irqsave(&gb_dev.lock, flags); 1116 gb_loopback_insert_id(gb); 1117 gb_dev.count++; 1118 spin_unlock_irqrestore(&gb_dev.lock, flags); 1119 1120 gb_connection_latency_tag_enable(connection); 1121 1122 gb_pm_runtime_put_autosuspend(bundle); 1123 1124 return 0; 1125 1126out_kfifo: 1127 kfifo_free(&gb->kfifo_lat); 1128out_conn: 1129 device_unregister(dev); 1130out_connection_disable: 1131 gb_connection_disable(connection); 1132out_ida_remove: 1133 ida_simple_remove(&loopback_ida, gb->id); 1134out_debugfs_remove: 1135 debugfs_remove(gb->file); 1136out_connection_destroy: 1137 gb_connection_destroy(connection); 1138out_kzalloc: 1139 kfree(gb); 1140 1141 return retval; 1142} 1143 1144static void gb_loopback_disconnect(struct gb_bundle *bundle) 1145{ 1146 struct gb_loopback *gb = greybus_get_drvdata(bundle); 1147 unsigned long flags; 1148 int ret; 1149 1150 ret = gb_pm_runtime_get_sync(bundle); 1151 if (ret) 1152 gb_pm_runtime_get_noresume(bundle); 1153 1154 gb_connection_disable(gb->connection); 1155 1156 if (!IS_ERR_OR_NULL(gb->task)) 1157 kthread_stop(gb->task); 1158 1159 kfifo_free(&gb->kfifo_lat); 1160 gb_connection_latency_tag_disable(gb->connection); 1161 debugfs_remove(gb->file); 1162 1163 /* 1164 * FIXME: gb_loopback_async_wait_all() is redundant now, as connection 1165 * is disabled at the beginning and so we can't have any more 1166 * incoming/outgoing requests. 1167 */ 1168 gb_loopback_async_wait_all(gb); 1169 1170 spin_lock_irqsave(&gb_dev.lock, flags); 1171 gb_dev.count--; 1172 list_del(&gb->entry); 1173 spin_unlock_irqrestore(&gb_dev.lock, flags); 1174 1175 device_unregister(gb->dev); 1176 ida_simple_remove(&loopback_ida, gb->id); 1177 1178 gb_connection_destroy(gb->connection); 1179 kfree(gb); 1180} 1181 1182static const struct greybus_bundle_id gb_loopback_id_table[] = { 1183 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK) }, 1184 { } 1185}; 1186MODULE_DEVICE_TABLE(greybus, gb_loopback_id_table); 1187 1188static struct greybus_driver gb_loopback_driver = { 1189 .name = "loopback", 1190 .probe = gb_loopback_probe, 1191 .disconnect = gb_loopback_disconnect, 1192 .id_table = gb_loopback_id_table, 1193}; 1194 1195static int loopback_init(void) 1196{ 1197 int retval; 1198 1199 INIT_LIST_HEAD(&gb_dev.list); 1200 INIT_LIST_HEAD(&gb_dev.list_op_async); 1201 spin_lock_init(&gb_dev.lock); 1202 gb_dev.root = debugfs_create_dir("gb_loopback", NULL); 1203 1204 retval = class_register(&loopback_class); 1205 if (retval) 1206 goto err; 1207 1208 retval = greybus_register(&gb_loopback_driver); 1209 if (retval) 1210 goto err_unregister; 1211 1212 return 0; 1213 1214err_unregister: 1215 class_unregister(&loopback_class); 1216err: 1217 debugfs_remove_recursive(gb_dev.root); 1218 return retval; 1219} 1220module_init(loopback_init); 1221 1222static void __exit loopback_exit(void) 1223{ 1224 debugfs_remove_recursive(gb_dev.root); 1225 greybus_deregister(&gb_loopback_driver); 1226 class_unregister(&loopback_class); 1227 ida_destroy(&loopback_ida); 1228} 1229module_exit(loopback_exit); 1230 1231MODULE_LICENSE("GPL v2");