at v4.9 1364 lines 36 kB view raw
1/* 2 * Loopback bridge driver for the Greybus loopback module. 3 * 4 * Copyright 2014 Google Inc. 5 * Copyright 2014 Linaro Ltd. 6 * 7 * Released under the GPLv2 only. 8 */ 9 10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12#include <linux/kernel.h> 13#include <linux/module.h> 14#include <linux/mutex.h> 15#include <linux/slab.h> 16#include <linux/kthread.h> 17#include <linux/delay.h> 18#include <linux/random.h> 19#include <linux/sizes.h> 20#include <linux/cdev.h> 21#include <linux/fs.h> 22#include <linux/kfifo.h> 23#include <linux/debugfs.h> 24#include <linux/list_sort.h> 25#include <linux/spinlock.h> 26#include <linux/workqueue.h> 27#include <linux/atomic.h> 28#include <linux/pm_runtime.h> 29 30#include <asm/div64.h> 31 32#include "greybus.h" 33#include "connection.h" 34 35#define NSEC_PER_DAY 86400000000000ULL 36 37struct gb_loopback_stats { 38 u32 min; 39 u32 max; 40 u64 sum; 41 u32 count; 42}; 43 44struct gb_loopback_device { 45 struct dentry *root; 46 u32 count; 47 size_t size_max; 48 49 /* We need to take a lock in atomic context */ 50 spinlock_t lock; 51 struct list_head list; 52 struct list_head list_op_async; 53 wait_queue_head_t wq; 54}; 55 56static struct gb_loopback_device gb_dev; 57 58struct gb_loopback_async_operation { 59 struct gb_loopback *gb; 60 struct gb_operation *operation; 61 struct timeval ts; 62 struct timer_list timer; 63 struct list_head entry; 64 struct work_struct work; 65 struct kref kref; 66 bool pending; 67 int (*completion)(struct gb_loopback_async_operation *op_async); 68}; 69 70struct gb_loopback { 71 struct gb_connection *connection; 72 73 struct dentry *file; 74 struct kfifo kfifo_lat; 75 struct kfifo kfifo_ts; 76 struct mutex mutex; 77 struct task_struct *task; 78 struct list_head entry; 79 struct device *dev; 80 wait_queue_head_t wq; 81 wait_queue_head_t wq_completion; 82 atomic_t outstanding_operations; 83 84 /* Per connection stats */ 85 struct timeval ts; 86 struct gb_loopback_stats latency; 87 struct gb_loopback_stats throughput; 88 struct gb_loopback_stats requests_per_second; 89 struct gb_loopback_stats apbridge_unipro_latency; 90 struct gb_loopback_stats gbphy_firmware_latency; 91 92 int type; 93 int async; 94 int id; 95 u32 size; 96 u32 iteration_max; 97 u32 iteration_count; 98 int us_wait; 99 u32 error; 100 u32 requests_completed; 101 u32 requests_timedout; 102 u32 timeout; 103 u32 jiffy_timeout; 104 u32 timeout_min; 105 u32 timeout_max; 106 u32 outstanding_operations_max; 107 u32 lbid; 108 u64 elapsed_nsecs; 109 u32 apbridge_latency_ts; 110 u32 gbphy_latency_ts; 111 112 u32 send_count; 113}; 114 115static struct class loopback_class = { 116 .name = "gb_loopback", 117 .owner = THIS_MODULE, 118}; 119static DEFINE_IDA(loopback_ida); 120 121/* Min/max values in jiffies */ 122#define GB_LOOPBACK_TIMEOUT_MIN 1 123#define GB_LOOPBACK_TIMEOUT_MAX 10000 124 125#define GB_LOOPBACK_FIFO_DEFAULT 8192 126 127static unsigned kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT; 128module_param(kfifo_depth, uint, 0444); 129 130/* Maximum size of any one send data buffer we support */ 131#define MAX_PACKET_SIZE (PAGE_SIZE * 2) 132 133#define GB_LOOPBACK_US_WAIT_MAX 1000000 134 135/* interface sysfs attributes */ 136#define gb_loopback_ro_attr(field) \ 137static ssize_t field##_show(struct device *dev, \ 138 struct device_attribute *attr, \ 139 char *buf) \ 140{ \ 141 struct gb_loopback *gb = dev_get_drvdata(dev); \ 142 return sprintf(buf, "%u\n", gb->field); \ 143} \ 144static DEVICE_ATTR_RO(field) 145 146#define gb_loopback_ro_stats_attr(name, field, type) \ 147static ssize_t name##_##field##_show(struct device *dev, \ 148 struct device_attribute *attr, \ 149 char *buf) \ 150{ \ 151 struct gb_loopback *gb = dev_get_drvdata(dev); \ 152 /* Report 0 for min and max if no transfer successed */ \ 153 if (!gb->requests_completed) \ 154 return sprintf(buf, "0\n"); \ 155 return sprintf(buf, "%"#type"\n", gb->name.field); \ 156} \ 157static DEVICE_ATTR_RO(name##_##field) 158 159#define gb_loopback_ro_avg_attr(name) \ 160static ssize_t name##_avg_show(struct device *dev, \ 161 struct device_attribute *attr, \ 162 char *buf) \ 163{ \ 164 struct gb_loopback_stats *stats; \ 165 struct gb_loopback *gb; \ 166 u64 avg, rem; \ 167 u32 count; \ 168 gb = dev_get_drvdata(dev); \ 169 stats = &gb->name; \ 170 count = stats->count ? stats->count : 1; \ 171 avg = stats->sum + count / 2000000; /* round closest */ \ 172 rem = do_div(avg, count); \ 173 rem *= 1000000; \ 174 do_div(rem, count); \ 175 return sprintf(buf, "%llu.%06u\n", avg, (u32)rem); \ 176} \ 177static DEVICE_ATTR_RO(name##_avg) 178 179#define gb_loopback_stats_attrs(field) \ 180 gb_loopback_ro_stats_attr(field, min, u); \ 181 gb_loopback_ro_stats_attr(field, max, u); \ 182 gb_loopback_ro_avg_attr(field) 183 184#define gb_loopback_attr(field, type) \ 185static ssize_t field##_show(struct device *dev, \ 186 struct device_attribute *attr, \ 187 char *buf) \ 188{ \ 189 struct gb_loopback *gb = dev_get_drvdata(dev); \ 190 return sprintf(buf, "%"#type"\n", gb->field); \ 191} \ 192static ssize_t field##_store(struct device *dev, \ 193 struct device_attribute *attr, \ 194 const char *buf, \ 195 size_t len) \ 196{ \ 197 int ret; \ 198 struct gb_loopback *gb = dev_get_drvdata(dev); \ 199 mutex_lock(&gb->mutex); \ 200 ret = sscanf(buf, "%"#type, &gb->field); \ 201 if (ret != 1) \ 202 len = -EINVAL; \ 203 else \ 204 gb_loopback_check_attr(gb, bundle); \ 205 mutex_unlock(&gb->mutex); \ 206 return len; \ 207} \ 208static DEVICE_ATTR_RW(field) 209 210#define gb_dev_loopback_ro_attr(field, conn) \ 211static ssize_t field##_show(struct device *dev, \ 212 struct device_attribute *attr, \ 213 char *buf) \ 214{ \ 215 struct gb_loopback *gb = dev_get_drvdata(dev); \ 216 return sprintf(buf, "%u\n", gb->field); \ 217} \ 218static DEVICE_ATTR_RO(field) 219 220#define gb_dev_loopback_rw_attr(field, type) \ 221static ssize_t field##_show(struct device *dev, \ 222 struct device_attribute *attr, \ 223 char *buf) \ 224{ \ 225 struct gb_loopback *gb = dev_get_drvdata(dev); \ 226 return sprintf(buf, "%"#type"\n", gb->field); \ 227} \ 228static ssize_t field##_store(struct device *dev, \ 229 struct device_attribute *attr, \ 230 const char *buf, \ 231 size_t len) \ 232{ \ 233 int ret; \ 234 struct gb_loopback *gb = dev_get_drvdata(dev); \ 235 mutex_lock(&gb->mutex); \ 236 ret = sscanf(buf, "%"#type, &gb->field); \ 237 if (ret != 1) \ 238 len = -EINVAL; \ 239 else \ 240 gb_loopback_check_attr(gb); \ 241 mutex_unlock(&gb->mutex); \ 242 return len; \ 243} \ 244static DEVICE_ATTR_RW(field) 245 246static void gb_loopback_reset_stats(struct gb_loopback *gb); 247static void gb_loopback_check_attr(struct gb_loopback *gb) 248{ 249 if (gb->us_wait > GB_LOOPBACK_US_WAIT_MAX) 250 gb->us_wait = GB_LOOPBACK_US_WAIT_MAX; 251 if (gb->size > gb_dev.size_max) 252 gb->size = gb_dev.size_max; 253 gb->requests_timedout = 0; 254 gb->requests_completed = 0; 255 gb->iteration_count = 0; 256 gb->send_count = 0; 257 gb->error = 0; 258 259 if (kfifo_depth < gb->iteration_max) { 260 dev_warn(gb->dev, 261 "cannot log bytes %u kfifo_depth %u\n", 262 gb->iteration_max, kfifo_depth); 263 } 264 kfifo_reset_out(&gb->kfifo_lat); 265 kfifo_reset_out(&gb->kfifo_ts); 266 267 switch (gb->type) { 268 case GB_LOOPBACK_TYPE_PING: 269 case GB_LOOPBACK_TYPE_TRANSFER: 270 case GB_LOOPBACK_TYPE_SINK: 271 gb->jiffy_timeout = usecs_to_jiffies(gb->timeout); 272 if (!gb->jiffy_timeout) 273 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MIN; 274 else if (gb->jiffy_timeout > GB_LOOPBACK_TIMEOUT_MAX) 275 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MAX; 276 gb_loopback_reset_stats(gb); 277 wake_up(&gb->wq); 278 break; 279 default: 280 gb->type = 0; 281 break; 282 } 283} 284 285/* Time to send and receive one message */ 286gb_loopback_stats_attrs(latency); 287/* Number of requests sent per second on this cport */ 288gb_loopback_stats_attrs(requests_per_second); 289/* Quantity of data sent and received on this cport */ 290gb_loopback_stats_attrs(throughput); 291/* Latency across the UniPro link from APBridge's perspective */ 292gb_loopback_stats_attrs(apbridge_unipro_latency); 293/* Firmware induced overhead in the GPBridge */ 294gb_loopback_stats_attrs(gbphy_firmware_latency); 295 296/* Number of errors encountered during loop */ 297gb_loopback_ro_attr(error); 298/* Number of requests successfully completed async */ 299gb_loopback_ro_attr(requests_completed); 300/* Number of requests timed out async */ 301gb_loopback_ro_attr(requests_timedout); 302/* Timeout minimum in useconds */ 303gb_loopback_ro_attr(timeout_min); 304/* Timeout minimum in useconds */ 305gb_loopback_ro_attr(timeout_max); 306 307/* 308 * Type of loopback message to send based on protocol type definitions 309 * 0 => Don't send message 310 * 2 => Send ping message continuously (message without payload) 311 * 3 => Send transfer message continuously (message with payload, 312 * payload returned in response) 313 * 4 => Send a sink message (message with payload, no payload in response) 314 */ 315gb_dev_loopback_rw_attr(type, d); 316/* Size of transfer message payload: 0-4096 bytes */ 317gb_dev_loopback_rw_attr(size, u); 318/* Time to wait between two messages: 0-1000 ms */ 319gb_dev_loopback_rw_attr(us_wait, d); 320/* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */ 321gb_dev_loopback_rw_attr(iteration_max, u); 322/* The current index of the for (i = 0; i < iteration_max; i++) loop */ 323gb_dev_loopback_ro_attr(iteration_count, false); 324/* A flag to indicate synchronous or asynchronous operations */ 325gb_dev_loopback_rw_attr(async, u); 326/* Timeout of an individual asynchronous request */ 327gb_dev_loopback_rw_attr(timeout, u); 328/* Maximum number of in-flight operations before back-off */ 329gb_dev_loopback_rw_attr(outstanding_operations_max, u); 330 331static struct attribute *loopback_attrs[] = { 332 &dev_attr_latency_min.attr, 333 &dev_attr_latency_max.attr, 334 &dev_attr_latency_avg.attr, 335 &dev_attr_requests_per_second_min.attr, 336 &dev_attr_requests_per_second_max.attr, 337 &dev_attr_requests_per_second_avg.attr, 338 &dev_attr_throughput_min.attr, 339 &dev_attr_throughput_max.attr, 340 &dev_attr_throughput_avg.attr, 341 &dev_attr_apbridge_unipro_latency_min.attr, 342 &dev_attr_apbridge_unipro_latency_max.attr, 343 &dev_attr_apbridge_unipro_latency_avg.attr, 344 &dev_attr_gbphy_firmware_latency_min.attr, 345 &dev_attr_gbphy_firmware_latency_max.attr, 346 &dev_attr_gbphy_firmware_latency_avg.attr, 347 &dev_attr_type.attr, 348 &dev_attr_size.attr, 349 &dev_attr_us_wait.attr, 350 &dev_attr_iteration_count.attr, 351 &dev_attr_iteration_max.attr, 352 &dev_attr_async.attr, 353 &dev_attr_error.attr, 354 &dev_attr_requests_completed.attr, 355 &dev_attr_requests_timedout.attr, 356 &dev_attr_timeout.attr, 357 &dev_attr_outstanding_operations_max.attr, 358 &dev_attr_timeout_min.attr, 359 &dev_attr_timeout_max.attr, 360 NULL, 361}; 362ATTRIBUTE_GROUPS(loopback); 363 364static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error); 365 366static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs) 367{ 368 u32 lat; 369 370 do_div(elapsed_nsecs, NSEC_PER_USEC); 371 lat = elapsed_nsecs; 372 return lat; 373} 374 375static u64 __gb_loopback_calc_latency(u64 t1, u64 t2) 376{ 377 if (t2 > t1) 378 return t2 - t1; 379 else 380 return NSEC_PER_DAY - t2 + t1; 381} 382 383static u64 gb_loopback_calc_latency(struct timeval *ts, struct timeval *te) 384{ 385 u64 t1, t2; 386 387 t1 = timeval_to_ns(ts); 388 t2 = timeval_to_ns(te); 389 390 return __gb_loopback_calc_latency(t1, t2); 391} 392 393static void gb_loopback_push_latency_ts(struct gb_loopback *gb, 394 struct timeval *ts, struct timeval *te) 395{ 396 kfifo_in(&gb->kfifo_ts, (unsigned char *)ts, sizeof(*ts)); 397 kfifo_in(&gb->kfifo_ts, (unsigned char *)te, sizeof(*te)); 398} 399 400static int gb_loopback_operation_sync(struct gb_loopback *gb, int type, 401 void *request, int request_size, 402 void *response, int response_size) 403{ 404 struct gb_operation *operation; 405 struct timeval ts, te; 406 int ret; 407 408 do_gettimeofday(&ts); 409 operation = gb_operation_create(gb->connection, type, request_size, 410 response_size, GFP_KERNEL); 411 if (!operation) 412 return -ENOMEM; 413 414 if (request_size) 415 memcpy(operation->request->payload, request, request_size); 416 417 ret = gb_operation_request_send_sync(operation); 418 if (ret) { 419 dev_err(&gb->connection->bundle->dev, 420 "synchronous operation failed: %d\n", ret); 421 goto out_put_operation; 422 } else { 423 if (response_size == operation->response->payload_size) { 424 memcpy(response, operation->response->payload, 425 response_size); 426 } else { 427 dev_err(&gb->connection->bundle->dev, 428 "response size %zu expected %d\n", 429 operation->response->payload_size, 430 response_size); 431 ret = -EINVAL; 432 goto out_put_operation; 433 } 434 } 435 436 do_gettimeofday(&te); 437 438 /* Calculate the total time the message took */ 439 gb_loopback_push_latency_ts(gb, &ts, &te); 440 gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te); 441 442out_put_operation: 443 gb_operation_put(operation); 444 445 return ret; 446} 447 448static void __gb_loopback_async_operation_destroy(struct kref *kref) 449{ 450 struct gb_loopback_async_operation *op_async; 451 452 op_async = container_of(kref, struct gb_loopback_async_operation, kref); 453 454 list_del(&op_async->entry); 455 if (op_async->operation) 456 gb_operation_put(op_async->operation); 457 atomic_dec(&op_async->gb->outstanding_operations); 458 wake_up(&op_async->gb->wq_completion); 459 kfree(op_async); 460} 461 462static void gb_loopback_async_operation_get(struct gb_loopback_async_operation 463 *op_async) 464{ 465 kref_get(&op_async->kref); 466} 467 468static void gb_loopback_async_operation_put(struct gb_loopback_async_operation 469 *op_async) 470{ 471 unsigned long flags; 472 473 spin_lock_irqsave(&gb_dev.lock, flags); 474 kref_put(&op_async->kref, __gb_loopback_async_operation_destroy); 475 spin_unlock_irqrestore(&gb_dev.lock, flags); 476} 477 478static struct gb_loopback_async_operation * 479 gb_loopback_operation_find(u16 id) 480{ 481 struct gb_loopback_async_operation *op_async; 482 bool found = false; 483 unsigned long flags; 484 485 spin_lock_irqsave(&gb_dev.lock, flags); 486 list_for_each_entry(op_async, &gb_dev.list_op_async, entry) { 487 if (op_async->operation->id == id) { 488 gb_loopback_async_operation_get(op_async); 489 found = true; 490 break; 491 } 492 } 493 spin_unlock_irqrestore(&gb_dev.lock, flags); 494 495 return found ? op_async : NULL; 496} 497 498static void gb_loopback_async_wait_all(struct gb_loopback *gb) 499{ 500 wait_event(gb->wq_completion, 501 !atomic_read(&gb->outstanding_operations)); 502} 503 504static void gb_loopback_async_operation_callback(struct gb_operation *operation) 505{ 506 struct gb_loopback_async_operation *op_async; 507 struct gb_loopback *gb; 508 struct timeval te; 509 bool err = false; 510 511 do_gettimeofday(&te); 512 op_async = gb_loopback_operation_find(operation->id); 513 if (!op_async) 514 return; 515 516 gb = op_async->gb; 517 mutex_lock(&gb->mutex); 518 519 if (!op_async->pending || gb_operation_result(operation)) { 520 err = true; 521 } else { 522 if (op_async->completion) 523 if (op_async->completion(op_async)) 524 err = true; 525 } 526 527 if (!err) { 528 gb_loopback_push_latency_ts(gb, &op_async->ts, &te); 529 gb->elapsed_nsecs = gb_loopback_calc_latency(&op_async->ts, 530 &te); 531 } 532 533 if (op_async->pending) { 534 if (err) 535 gb->error++; 536 gb->iteration_count++; 537 op_async->pending = false; 538 del_timer_sync(&op_async->timer); 539 gb_loopback_async_operation_put(op_async); 540 gb_loopback_calculate_stats(gb, err); 541 } 542 mutex_unlock(&gb->mutex); 543 544 dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n", 545 operation->id); 546 547 gb_loopback_async_operation_put(op_async); 548} 549 550static void gb_loopback_async_operation_work(struct work_struct *work) 551{ 552 struct gb_loopback *gb; 553 struct gb_operation *operation; 554 struct gb_loopback_async_operation *op_async; 555 556 op_async = container_of(work, struct gb_loopback_async_operation, work); 557 gb = op_async->gb; 558 operation = op_async->operation; 559 560 mutex_lock(&gb->mutex); 561 if (op_async->pending) { 562 gb->requests_timedout++; 563 gb->error++; 564 gb->iteration_count++; 565 op_async->pending = false; 566 gb_loopback_async_operation_put(op_async); 567 gb_loopback_calculate_stats(gb, true); 568 } 569 mutex_unlock(&gb->mutex); 570 571 dev_dbg(&gb->connection->bundle->dev, "timeout operation %d\n", 572 operation->id); 573 574 gb_operation_cancel(operation, -ETIMEDOUT); 575 gb_loopback_async_operation_put(op_async); 576} 577 578static void gb_loopback_async_operation_timeout(unsigned long data) 579{ 580 struct gb_loopback_async_operation *op_async; 581 u16 id = data; 582 583 op_async = gb_loopback_operation_find(id); 584 if (!op_async) { 585 pr_err("operation %d not found - time out ?\n", id); 586 return; 587 } 588 schedule_work(&op_async->work); 589} 590 591static int gb_loopback_async_operation(struct gb_loopback *gb, int type, 592 void *request, int request_size, 593 int response_size, 594 void *completion) 595{ 596 struct gb_loopback_async_operation *op_async; 597 struct gb_operation *operation; 598 int ret; 599 unsigned long flags; 600 601 op_async = kzalloc(sizeof(*op_async), GFP_KERNEL); 602 if (!op_async) 603 return -ENOMEM; 604 605 INIT_WORK(&op_async->work, gb_loopback_async_operation_work); 606 kref_init(&op_async->kref); 607 608 operation = gb_operation_create(gb->connection, type, request_size, 609 response_size, GFP_KERNEL); 610 if (!operation) { 611 kfree(op_async); 612 return -ENOMEM; 613 } 614 615 if (request_size) 616 memcpy(operation->request->payload, request, request_size); 617 618 op_async->gb = gb; 619 op_async->operation = operation; 620 op_async->completion = completion; 621 622 spin_lock_irqsave(&gb_dev.lock, flags); 623 list_add_tail(&op_async->entry, &gb_dev.list_op_async); 624 spin_unlock_irqrestore(&gb_dev.lock, flags); 625 626 do_gettimeofday(&op_async->ts); 627 op_async->pending = true; 628 atomic_inc(&gb->outstanding_operations); 629 mutex_lock(&gb->mutex); 630 ret = gb_operation_request_send(operation, 631 gb_loopback_async_operation_callback, 632 GFP_KERNEL); 633 if (ret) 634 goto error; 635 636 setup_timer(&op_async->timer, gb_loopback_async_operation_timeout, 637 (unsigned long)operation->id); 638 op_async->timer.expires = jiffies + gb->jiffy_timeout; 639 add_timer(&op_async->timer); 640 641 goto done; 642error: 643 gb_loopback_async_operation_put(op_async); 644done: 645 mutex_unlock(&gb->mutex); 646 return ret; 647} 648 649static int gb_loopback_sync_sink(struct gb_loopback *gb, u32 len) 650{ 651 struct gb_loopback_transfer_request *request; 652 int retval; 653 654 request = kmalloc(len + sizeof(*request), GFP_KERNEL); 655 if (!request) 656 return -ENOMEM; 657 658 request->len = cpu_to_le32(len); 659 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK, 660 request, len + sizeof(*request), 661 NULL, 0); 662 kfree(request); 663 return retval; 664} 665 666static int gb_loopback_sync_transfer(struct gb_loopback *gb, u32 len) 667{ 668 struct gb_loopback_transfer_request *request; 669 struct gb_loopback_transfer_response *response; 670 int retval; 671 672 gb->apbridge_latency_ts = 0; 673 gb->gbphy_latency_ts = 0; 674 675 request = kmalloc(len + sizeof(*request), GFP_KERNEL); 676 if (!request) 677 return -ENOMEM; 678 response = kmalloc(len + sizeof(*response), GFP_KERNEL); 679 if (!response) { 680 kfree(request); 681 return -ENOMEM; 682 } 683 684 memset(request->data, 0x5A, len); 685 686 request->len = cpu_to_le32(len); 687 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER, 688 request, len + sizeof(*request), 689 response, len + sizeof(*response)); 690 if (retval) 691 goto gb_error; 692 693 if (memcmp(request->data, response->data, len)) { 694 dev_err(&gb->connection->bundle->dev, 695 "Loopback Data doesn't match\n"); 696 retval = -EREMOTEIO; 697 } 698 gb->apbridge_latency_ts = (u32)__le32_to_cpu(response->reserved0); 699 gb->gbphy_latency_ts = (u32)__le32_to_cpu(response->reserved1); 700 701gb_error: 702 kfree(request); 703 kfree(response); 704 705 return retval; 706} 707 708static int gb_loopback_sync_ping(struct gb_loopback *gb) 709{ 710 return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING, 711 NULL, 0, NULL, 0); 712} 713 714static int gb_loopback_async_sink(struct gb_loopback *gb, u32 len) 715{ 716 struct gb_loopback_transfer_request *request; 717 int retval; 718 719 request = kmalloc(len + sizeof(*request), GFP_KERNEL); 720 if (!request) 721 return -ENOMEM; 722 723 request->len = cpu_to_le32(len); 724 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_SINK, 725 request, len + sizeof(*request), 726 0, NULL); 727 kfree(request); 728 return retval; 729} 730 731static int gb_loopback_async_transfer_complete( 732 struct gb_loopback_async_operation *op_async) 733{ 734 struct gb_loopback *gb; 735 struct gb_operation *operation; 736 struct gb_loopback_transfer_request *request; 737 struct gb_loopback_transfer_response *response; 738 size_t len; 739 int retval = 0; 740 741 gb = op_async->gb; 742 operation = op_async->operation; 743 request = operation->request->payload; 744 response = operation->response->payload; 745 len = le32_to_cpu(request->len); 746 747 if (memcmp(request->data, response->data, len)) { 748 dev_err(&gb->connection->bundle->dev, 749 "Loopback Data doesn't match operation id %d\n", 750 operation->id); 751 retval = -EREMOTEIO; 752 } else { 753 gb->apbridge_latency_ts = 754 (u32)__le32_to_cpu(response->reserved0); 755 gb->gbphy_latency_ts = 756 (u32)__le32_to_cpu(response->reserved1); 757 } 758 759 return retval; 760} 761 762static int gb_loopback_async_transfer(struct gb_loopback *gb, u32 len) 763{ 764 struct gb_loopback_transfer_request *request; 765 int retval, response_len; 766 767 request = kmalloc(len + sizeof(*request), GFP_KERNEL); 768 if (!request) 769 return -ENOMEM; 770 771 memset(request->data, 0x5A, len); 772 773 request->len = cpu_to_le32(len); 774 response_len = sizeof(struct gb_loopback_transfer_response); 775 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_TRANSFER, 776 request, len + sizeof(*request), 777 len + response_len, 778 gb_loopback_async_transfer_complete); 779 if (retval) 780 goto gb_error; 781 782gb_error: 783 kfree(request); 784 return retval; 785} 786 787static int gb_loopback_async_ping(struct gb_loopback *gb) 788{ 789 return gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_PING, 790 NULL, 0, 0, NULL); 791} 792 793static int gb_loopback_request_handler(struct gb_operation *operation) 794{ 795 struct gb_connection *connection = operation->connection; 796 struct gb_loopback_transfer_request *request; 797 struct gb_loopback_transfer_response *response; 798 struct device *dev = &connection->bundle->dev; 799 size_t len; 800 801 /* By convention, the AP initiates the version operation */ 802 switch (operation->type) { 803 case GB_LOOPBACK_TYPE_PING: 804 case GB_LOOPBACK_TYPE_SINK: 805 return 0; 806 case GB_LOOPBACK_TYPE_TRANSFER: 807 if (operation->request->payload_size < sizeof(*request)) { 808 dev_err(dev, "transfer request too small (%zu < %zu)\n", 809 operation->request->payload_size, 810 sizeof(*request)); 811 return -EINVAL; /* -EMSGSIZE */ 812 } 813 request = operation->request->payload; 814 len = le32_to_cpu(request->len); 815 if (len > gb_dev.size_max) { 816 dev_err(dev, "transfer request too large (%zu > %zu)\n", 817 len, gb_dev.size_max); 818 return -EINVAL; 819 } 820 821 if (!gb_operation_response_alloc(operation, 822 len + sizeof(*response), GFP_KERNEL)) { 823 dev_err(dev, "error allocating response\n"); 824 return -ENOMEM; 825 } 826 response = operation->response->payload; 827 response->len = cpu_to_le32(len); 828 if (len) 829 memcpy(response->data, request->data, len); 830 831 return 0; 832 default: 833 dev_err(dev, "unsupported request: %u\n", operation->type); 834 return -EINVAL; 835 } 836} 837 838static void gb_loopback_reset_stats(struct gb_loopback *gb) 839{ 840 struct gb_loopback_stats reset = { 841 .min = U32_MAX, 842 }; 843 844 /* Reset per-connection stats */ 845 memcpy(&gb->latency, &reset, 846 sizeof(struct gb_loopback_stats)); 847 memcpy(&gb->throughput, &reset, 848 sizeof(struct gb_loopback_stats)); 849 memcpy(&gb->requests_per_second, &reset, 850 sizeof(struct gb_loopback_stats)); 851 memcpy(&gb->apbridge_unipro_latency, &reset, 852 sizeof(struct gb_loopback_stats)); 853 memcpy(&gb->gbphy_firmware_latency, &reset, 854 sizeof(struct gb_loopback_stats)); 855 856 /* Should be initialized at least once per transaction set */ 857 gb->apbridge_latency_ts = 0; 858 gb->gbphy_latency_ts = 0; 859 memset(&gb->ts, 0, sizeof(struct timeval)); 860} 861 862static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val) 863{ 864 if (stats->min > val) 865 stats->min = val; 866 if (stats->max < val) 867 stats->max = val; 868 stats->sum += val; 869 stats->count++; 870} 871 872static void gb_loopback_update_stats_window(struct gb_loopback_stats *stats, 873 u64 val, u32 count) 874{ 875 stats->sum += val; 876 stats->count += count; 877 878 do_div(val, count); 879 if (stats->min > val) 880 stats->min = val; 881 if (stats->max < val) 882 stats->max = val; 883} 884 885static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency) 886{ 887 u64 req = gb->requests_completed * USEC_PER_SEC; 888 889 gb_loopback_update_stats_window(&gb->requests_per_second, req, latency); 890} 891 892static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency) 893{ 894 u64 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2; 895 896 switch (gb->type) { 897 case GB_LOOPBACK_TYPE_PING: 898 break; 899 case GB_LOOPBACK_TYPE_SINK: 900 aggregate_size += sizeof(struct gb_loopback_transfer_request) + 901 gb->size; 902 break; 903 case GB_LOOPBACK_TYPE_TRANSFER: 904 aggregate_size += sizeof(struct gb_loopback_transfer_request) + 905 sizeof(struct gb_loopback_transfer_response) + 906 gb->size * 2; 907 break; 908 default: 909 return; 910 } 911 912 aggregate_size *= gb->requests_completed; 913 aggregate_size *= USEC_PER_SEC; 914 gb_loopback_update_stats_window(&gb->throughput, aggregate_size, 915 latency); 916} 917 918static void gb_loopback_calculate_latency_stats(struct gb_loopback *gb) 919{ 920 u32 lat; 921 922 /* Express latency in terms of microseconds */ 923 lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs); 924 925 /* Log latency stastic */ 926 gb_loopback_update_stats(&gb->latency, lat); 927 928 /* Raw latency log on a per thread basis */ 929 kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat)); 930 931 /* Log the firmware supplied latency values */ 932 gb_loopback_update_stats(&gb->apbridge_unipro_latency, 933 gb->apbridge_latency_ts); 934 gb_loopback_update_stats(&gb->gbphy_firmware_latency, 935 gb->gbphy_latency_ts); 936} 937 938static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error) 939{ 940 u64 nlat; 941 u32 lat; 942 struct timeval te; 943 944 if (!error) { 945 gb->requests_completed++; 946 gb_loopback_calculate_latency_stats(gb); 947 } 948 949 do_gettimeofday(&te); 950 nlat = gb_loopback_calc_latency(&gb->ts, &te); 951 if (nlat >= NSEC_PER_SEC || gb->iteration_count == gb->iteration_max) { 952 lat = gb_loopback_nsec_to_usec_latency(nlat); 953 954 gb_loopback_throughput_update(gb, lat); 955 gb_loopback_requests_update(gb, lat); 956 957 if (gb->iteration_count != gb->iteration_max) { 958 gb->ts = te; 959 gb->requests_completed = 0; 960 } 961 } 962} 963 964static void gb_loopback_async_wait_to_send(struct gb_loopback *gb) 965{ 966 if (!(gb->async && gb->outstanding_operations_max)) 967 return; 968 wait_event_interruptible(gb->wq_completion, 969 (atomic_read(&gb->outstanding_operations) < 970 gb->outstanding_operations_max) || 971 kthread_should_stop()); 972} 973 974static int gb_loopback_fn(void *data) 975{ 976 int error = 0; 977 int us_wait = 0; 978 int type; 979 int ret; 980 u32 size; 981 982 struct gb_loopback *gb = data; 983 struct gb_bundle *bundle = gb->connection->bundle; 984 985 ret = gb_pm_runtime_get_sync(bundle); 986 if (ret) 987 return ret; 988 989 while (1) { 990 if (!gb->type) { 991 gb_pm_runtime_put_autosuspend(bundle); 992 wait_event_interruptible(gb->wq, gb->type || 993 kthread_should_stop()); 994 ret = gb_pm_runtime_get_sync(bundle); 995 if (ret) 996 return ret; 997 } 998 999 if (kthread_should_stop()) 1000 break; 1001 1002 /* Limit the maximum number of in-flight async operations */ 1003 gb_loopback_async_wait_to_send(gb); 1004 if (kthread_should_stop()) 1005 break; 1006 1007 mutex_lock(&gb->mutex); 1008 1009 /* Optionally terminate */ 1010 if (gb->send_count == gb->iteration_max) { 1011 if (gb->iteration_count == gb->iteration_max) { 1012 gb->type = 0; 1013 gb->send_count = 0; 1014 sysfs_notify(&gb->dev->kobj, NULL, 1015 "iteration_count"); 1016 } 1017 mutex_unlock(&gb->mutex); 1018 continue; 1019 } 1020 size = gb->size; 1021 us_wait = gb->us_wait; 1022 type = gb->type; 1023 if (gb->ts.tv_usec == 0 && gb->ts.tv_sec == 0) 1024 do_gettimeofday(&gb->ts); 1025 mutex_unlock(&gb->mutex); 1026 1027 /* Else operations to perform */ 1028 if (gb->async) { 1029 if (type == GB_LOOPBACK_TYPE_PING) { 1030 error = gb_loopback_async_ping(gb); 1031 } else if (type == GB_LOOPBACK_TYPE_TRANSFER) { 1032 error = gb_loopback_async_transfer(gb, size); 1033 } else if (type == GB_LOOPBACK_TYPE_SINK) { 1034 error = gb_loopback_async_sink(gb, size); 1035 } 1036 1037 if (error) 1038 gb->error++; 1039 } else { 1040 /* We are effectively single threaded here */ 1041 if (type == GB_LOOPBACK_TYPE_PING) 1042 error = gb_loopback_sync_ping(gb); 1043 else if (type == GB_LOOPBACK_TYPE_TRANSFER) 1044 error = gb_loopback_sync_transfer(gb, size); 1045 else if (type == GB_LOOPBACK_TYPE_SINK) 1046 error = gb_loopback_sync_sink(gb, size); 1047 1048 if (error) 1049 gb->error++; 1050 gb->iteration_count++; 1051 gb_loopback_calculate_stats(gb, !!error); 1052 } 1053 gb->send_count++; 1054 if (us_wait) 1055 udelay(us_wait); 1056 } 1057 1058 gb_pm_runtime_put_autosuspend(bundle); 1059 1060 return 0; 1061} 1062 1063static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s, 1064 struct kfifo *kfifo, 1065 struct mutex *mutex) 1066{ 1067 u32 latency; 1068 int retval; 1069 1070 if (kfifo_len(kfifo) == 0) { 1071 retval = -EAGAIN; 1072 goto done; 1073 } 1074 1075 mutex_lock(mutex); 1076 retval = kfifo_out(kfifo, &latency, sizeof(latency)); 1077 if (retval > 0) { 1078 seq_printf(s, "%u", latency); 1079 retval = 0; 1080 } 1081 mutex_unlock(mutex); 1082done: 1083 return retval; 1084} 1085 1086static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused) 1087{ 1088 struct gb_loopback *gb = s->private; 1089 1090 return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat, 1091 &gb->mutex); 1092} 1093 1094static int gb_loopback_latency_open(struct inode *inode, struct file *file) 1095{ 1096 return single_open(file, gb_loopback_dbgfs_latency_show, 1097 inode->i_private); 1098} 1099 1100static const struct file_operations gb_loopback_debugfs_latency_ops = { 1101 .open = gb_loopback_latency_open, 1102 .read = seq_read, 1103 .llseek = seq_lseek, 1104 .release = single_release, 1105}; 1106 1107static int gb_loopback_bus_id_compare(void *priv, struct list_head *lha, 1108 struct list_head *lhb) 1109{ 1110 struct gb_loopback *a = list_entry(lha, struct gb_loopback, entry); 1111 struct gb_loopback *b = list_entry(lhb, struct gb_loopback, entry); 1112 struct gb_connection *ca = a->connection; 1113 struct gb_connection *cb = b->connection; 1114 1115 if (ca->bundle->intf->interface_id < cb->bundle->intf->interface_id) 1116 return -1; 1117 if (cb->bundle->intf->interface_id < ca->bundle->intf->interface_id) 1118 return 1; 1119 if (ca->bundle->id < cb->bundle->id) 1120 return -1; 1121 if (cb->bundle->id < ca->bundle->id) 1122 return 1; 1123 if (ca->intf_cport_id < cb->intf_cport_id) 1124 return -1; 1125 else if (cb->intf_cport_id < ca->intf_cport_id) 1126 return 1; 1127 1128 return 0; 1129} 1130 1131static void gb_loopback_insert_id(struct gb_loopback *gb) 1132{ 1133 struct gb_loopback *gb_list; 1134 u32 new_lbid = 0; 1135 1136 /* perform an insertion sort */ 1137 list_add_tail(&gb->entry, &gb_dev.list); 1138 list_sort(NULL, &gb_dev.list, gb_loopback_bus_id_compare); 1139 list_for_each_entry(gb_list, &gb_dev.list, entry) { 1140 gb_list->lbid = 1 << new_lbid; 1141 new_lbid++; 1142 } 1143} 1144 1145#define DEBUGFS_NAMELEN 32 1146 1147static int gb_loopback_probe(struct gb_bundle *bundle, 1148 const struct greybus_bundle_id *id) 1149{ 1150 struct greybus_descriptor_cport *cport_desc; 1151 struct gb_connection *connection; 1152 struct gb_loopback *gb; 1153 struct device *dev; 1154 int retval; 1155 char name[DEBUGFS_NAMELEN]; 1156 unsigned long flags; 1157 1158 if (bundle->num_cports != 1) 1159 return -ENODEV; 1160 1161 cport_desc = &bundle->cport_desc[0]; 1162 if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOOPBACK) 1163 return -ENODEV; 1164 1165 gb = kzalloc(sizeof(*gb), GFP_KERNEL); 1166 if (!gb) 1167 return -ENOMEM; 1168 1169 connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id), 1170 gb_loopback_request_handler); 1171 if (IS_ERR(connection)) { 1172 retval = PTR_ERR(connection); 1173 goto out_kzalloc; 1174 } 1175 1176 gb->connection = connection; 1177 greybus_set_drvdata(bundle, gb); 1178 1179 init_waitqueue_head(&gb->wq); 1180 init_waitqueue_head(&gb->wq_completion); 1181 atomic_set(&gb->outstanding_operations, 0); 1182 gb_loopback_reset_stats(gb); 1183 1184 /* Reported values to user-space for min/max timeouts */ 1185 gb->timeout_min = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN); 1186 gb->timeout_max = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX); 1187 1188 if (!gb_dev.count) { 1189 /* Calculate maximum payload */ 1190 gb_dev.size_max = gb_operation_get_payload_size_max(connection); 1191 if (gb_dev.size_max <= 1192 sizeof(struct gb_loopback_transfer_request)) { 1193 retval = -EINVAL; 1194 goto out_connection_destroy; 1195 } 1196 gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request); 1197 } 1198 1199 /* Create per-connection sysfs and debugfs data-points */ 1200 snprintf(name, sizeof(name), "raw_latency_%s", 1201 dev_name(&connection->bundle->dev)); 1202 gb->file = debugfs_create_file(name, S_IFREG | S_IRUGO, gb_dev.root, gb, 1203 &gb_loopback_debugfs_latency_ops); 1204 1205 gb->id = ida_simple_get(&loopback_ida, 0, 0, GFP_KERNEL); 1206 if (gb->id < 0) { 1207 retval = gb->id; 1208 goto out_debugfs_remove; 1209 } 1210 1211 retval = gb_connection_enable(connection); 1212 if (retval) 1213 goto out_ida_remove; 1214 1215 dev = device_create_with_groups(&loopback_class, 1216 &connection->bundle->dev, 1217 MKDEV(0, 0), gb, loopback_groups, 1218 "gb_loopback%d", gb->id); 1219 if (IS_ERR(dev)) { 1220 retval = PTR_ERR(dev); 1221 goto out_connection_disable; 1222 } 1223 gb->dev = dev; 1224 1225 /* Allocate kfifo */ 1226 if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32), 1227 GFP_KERNEL)) { 1228 retval = -ENOMEM; 1229 goto out_conn; 1230 } 1231 if (kfifo_alloc(&gb->kfifo_ts, kfifo_depth * sizeof(struct timeval) * 2, 1232 GFP_KERNEL)) { 1233 retval = -ENOMEM; 1234 goto out_kfifo0; 1235 } 1236 1237 /* Fork worker thread */ 1238 mutex_init(&gb->mutex); 1239 gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback"); 1240 if (IS_ERR(gb->task)) { 1241 retval = PTR_ERR(gb->task); 1242 goto out_kfifo1; 1243 } 1244 1245 spin_lock_irqsave(&gb_dev.lock, flags); 1246 gb_loopback_insert_id(gb); 1247 gb_dev.count++; 1248 spin_unlock_irqrestore(&gb_dev.lock, flags); 1249 1250 gb_connection_latency_tag_enable(connection); 1251 1252 gb_pm_runtime_put_autosuspend(bundle); 1253 1254 return 0; 1255 1256out_kfifo1: 1257 kfifo_free(&gb->kfifo_ts); 1258out_kfifo0: 1259 kfifo_free(&gb->kfifo_lat); 1260out_conn: 1261 device_unregister(dev); 1262out_connection_disable: 1263 gb_connection_disable(connection); 1264out_ida_remove: 1265 ida_simple_remove(&loopback_ida, gb->id); 1266out_debugfs_remove: 1267 debugfs_remove(gb->file); 1268out_connection_destroy: 1269 gb_connection_destroy(connection); 1270out_kzalloc: 1271 kfree(gb); 1272 1273 return retval; 1274} 1275 1276static void gb_loopback_disconnect(struct gb_bundle *bundle) 1277{ 1278 struct gb_loopback *gb = greybus_get_drvdata(bundle); 1279 unsigned long flags; 1280 int ret; 1281 1282 ret = gb_pm_runtime_get_sync(bundle); 1283 if (ret) 1284 gb_pm_runtime_get_noresume(bundle); 1285 1286 gb_connection_disable(gb->connection); 1287 1288 if (!IS_ERR_OR_NULL(gb->task)) 1289 kthread_stop(gb->task); 1290 1291 kfifo_free(&gb->kfifo_lat); 1292 kfifo_free(&gb->kfifo_ts); 1293 gb_connection_latency_tag_disable(gb->connection); 1294 debugfs_remove(gb->file); 1295 1296 /* 1297 * FIXME: gb_loopback_async_wait_all() is redundant now, as connection 1298 * is disabled at the beginning and so we can't have any more 1299 * incoming/outgoing requests. 1300 */ 1301 gb_loopback_async_wait_all(gb); 1302 1303 spin_lock_irqsave(&gb_dev.lock, flags); 1304 gb_dev.count--; 1305 list_del(&gb->entry); 1306 spin_unlock_irqrestore(&gb_dev.lock, flags); 1307 1308 device_unregister(gb->dev); 1309 ida_simple_remove(&loopback_ida, gb->id); 1310 1311 gb_connection_destroy(gb->connection); 1312 kfree(gb); 1313} 1314 1315static const struct greybus_bundle_id gb_loopback_id_table[] = { 1316 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK) }, 1317 { } 1318}; 1319MODULE_DEVICE_TABLE(greybus, gb_loopback_id_table); 1320 1321static struct greybus_driver gb_loopback_driver = { 1322 .name = "loopback", 1323 .probe = gb_loopback_probe, 1324 .disconnect = gb_loopback_disconnect, 1325 .id_table = gb_loopback_id_table, 1326}; 1327 1328static int loopback_init(void) 1329{ 1330 int retval; 1331 1332 INIT_LIST_HEAD(&gb_dev.list); 1333 INIT_LIST_HEAD(&gb_dev.list_op_async); 1334 spin_lock_init(&gb_dev.lock); 1335 gb_dev.root = debugfs_create_dir("gb_loopback", NULL); 1336 1337 retval = class_register(&loopback_class); 1338 if (retval) 1339 goto err; 1340 1341 retval = greybus_register(&gb_loopback_driver); 1342 if (retval) 1343 goto err_unregister; 1344 1345 return 0; 1346 1347err_unregister: 1348 class_unregister(&loopback_class); 1349err: 1350 debugfs_remove_recursive(gb_dev.root); 1351 return retval; 1352} 1353module_init(loopback_init); 1354 1355static void __exit loopback_exit(void) 1356{ 1357 debugfs_remove_recursive(gb_dev.root); 1358 greybus_deregister(&gb_loopback_driver); 1359 class_unregister(&loopback_class); 1360 ida_destroy(&loopback_ida); 1361} 1362module_exit(loopback_exit); 1363 1364MODULE_LICENSE("GPL v2");