at v4.13 1264 lines 30 kB view raw
1/* 2 * Greybus interface code 3 * 4 * Copyright 2014 Google Inc. 5 * Copyright 2014 Linaro Ltd. 6 * 7 * Released under the GPLv2 only. 8 */ 9 10#include <linux/delay.h> 11 12#include "greybus.h" 13#include "greybus_trace.h" 14 15#define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000 16 17#define GB_INTERFACE_DEVICE_ID_BAD 0xff 18 19#define GB_INTERFACE_AUTOSUSPEND_MS 3000 20 21/* Time required for interface to enter standby before disabling REFCLK */ 22#define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS 20 23 24/* Don't-care selector index */ 25#define DME_SELECTOR_INDEX_NULL 0 26 27/* DME attributes */ 28/* FIXME: remove ES2 support and DME_T_TST_SRC_INCREMENT */ 29#define DME_T_TST_SRC_INCREMENT 0x4083 30 31#define DME_DDBL1_MANUFACTURERID 0x5003 32#define DME_DDBL1_PRODUCTID 0x5004 33 34#define DME_TOSHIBA_GMP_VID 0x6000 35#define DME_TOSHIBA_GMP_PID 0x6001 36#define DME_TOSHIBA_GMP_SN0 0x6002 37#define DME_TOSHIBA_GMP_SN1 0x6003 38#define DME_TOSHIBA_GMP_INIT_STATUS 0x6101 39 40/* DDBL1 Manufacturer and Product ids */ 41#define TOSHIBA_DMID 0x0126 42#define TOSHIBA_ES2_BRIDGE_DPID 0x1000 43#define TOSHIBA_ES3_APBRIDGE_DPID 0x1001 44#define TOSHIBA_ES3_GBPHY_DPID 0x1002 45 46static int gb_interface_hibernate_link(struct gb_interface *intf); 47static int gb_interface_refclk_set(struct gb_interface *intf, bool enable); 48 49static int gb_interface_dme_attr_get(struct gb_interface *intf, 50 u16 attr, u32 *val) 51{ 52 return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id, 53 attr, DME_SELECTOR_INDEX_NULL, val); 54} 55 56static int gb_interface_read_ara_dme(struct gb_interface *intf) 57{ 58 u32 sn0, sn1; 59 int ret; 60 61 /* 62 * Unless this is a Toshiba bridge, bail out until we have defined 63 * standard GMP attributes. 64 */ 65 if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) { 66 dev_err(&intf->dev, "unknown manufacturer %08x\n", 67 intf->ddbl1_manufacturer_id); 68 return -ENODEV; 69 } 70 71 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID, 72 &intf->vendor_id); 73 if (ret) 74 return ret; 75 76 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID, 77 &intf->product_id); 78 if (ret) 79 return ret; 80 81 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0); 82 if (ret) 83 return ret; 84 85 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1); 86 if (ret) 87 return ret; 88 89 intf->serial_number = (u64)sn1 << 32 | sn0; 90 91 return 0; 92} 93 94static int gb_interface_read_dme(struct gb_interface *intf) 95{ 96 int ret; 97 98 /* DME attributes have already been read */ 99 if (intf->dme_read) 100 return 0; 101 102 ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID, 103 &intf->ddbl1_manufacturer_id); 104 if (ret) 105 return ret; 106 107 ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID, 108 &intf->ddbl1_product_id); 109 if (ret) 110 return ret; 111 112 if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID && 113 intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) { 114 intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS; 115 intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS; 116 } 117 118 ret = gb_interface_read_ara_dme(intf); 119 if (ret) 120 return ret; 121 122 intf->dme_read = true; 123 124 return 0; 125} 126 127static int gb_interface_route_create(struct gb_interface *intf) 128{ 129 struct gb_svc *svc = intf->hd->svc; 130 u8 intf_id = intf->interface_id; 131 u8 device_id; 132 int ret; 133 134 /* Allocate an interface device id. */ 135 ret = ida_simple_get(&svc->device_id_map, 136 GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1, 137 GFP_KERNEL); 138 if (ret < 0) { 139 dev_err(&intf->dev, "failed to allocate device id: %d\n", ret); 140 return ret; 141 } 142 device_id = ret; 143 144 ret = gb_svc_intf_device_id(svc, intf_id, device_id); 145 if (ret) { 146 dev_err(&intf->dev, "failed to set device id %u: %d\n", 147 device_id, ret); 148 goto err_ida_remove; 149 } 150 151 /* FIXME: Hard-coded AP device id. */ 152 ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP, 153 intf_id, device_id); 154 if (ret) { 155 dev_err(&intf->dev, "failed to create route: %d\n", ret); 156 goto err_svc_id_free; 157 } 158 159 intf->device_id = device_id; 160 161 return 0; 162 163err_svc_id_free: 164 /* 165 * XXX Should we tell SVC that this id doesn't belong to interface 166 * XXX anymore. 167 */ 168err_ida_remove: 169 ida_simple_remove(&svc->device_id_map, device_id); 170 171 return ret; 172} 173 174static void gb_interface_route_destroy(struct gb_interface *intf) 175{ 176 struct gb_svc *svc = intf->hd->svc; 177 178 if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD) 179 return; 180 181 gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id); 182 ida_simple_remove(&svc->device_id_map, intf->device_id); 183 intf->device_id = GB_INTERFACE_DEVICE_ID_BAD; 184} 185 186/* Locking: Caller holds the interface mutex. */ 187static int gb_interface_legacy_mode_switch(struct gb_interface *intf) 188{ 189 int ret; 190 191 dev_info(&intf->dev, "legacy mode switch detected\n"); 192 193 /* Mark as disconnected to prevent I/O during disable. */ 194 intf->disconnected = true; 195 gb_interface_disable(intf); 196 intf->disconnected = false; 197 198 ret = gb_interface_enable(intf); 199 if (ret) { 200 dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret); 201 gb_interface_deactivate(intf); 202 } 203 204 return ret; 205} 206 207void gb_interface_mailbox_event(struct gb_interface *intf, u16 result, 208 u32 mailbox) 209{ 210 mutex_lock(&intf->mutex); 211 212 if (result) { 213 dev_warn(&intf->dev, 214 "mailbox event with UniPro error: 0x%04x\n", 215 result); 216 goto err_disable; 217 } 218 219 if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) { 220 dev_warn(&intf->dev, 221 "mailbox event with unexpected value: 0x%08x\n", 222 mailbox); 223 goto err_disable; 224 } 225 226 if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) { 227 gb_interface_legacy_mode_switch(intf); 228 goto out_unlock; 229 } 230 231 if (!intf->mode_switch) { 232 dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n", 233 mailbox); 234 goto err_disable; 235 } 236 237 dev_info(&intf->dev, "mode switch detected\n"); 238 239 complete(&intf->mode_switch_completion); 240 241out_unlock: 242 mutex_unlock(&intf->mutex); 243 244 return; 245 246err_disable: 247 gb_interface_disable(intf); 248 gb_interface_deactivate(intf); 249 mutex_unlock(&intf->mutex); 250} 251 252static void gb_interface_mode_switch_work(struct work_struct *work) 253{ 254 struct gb_interface *intf; 255 struct gb_control *control; 256 unsigned long timeout; 257 int ret; 258 259 intf = container_of(work, struct gb_interface, mode_switch_work); 260 261 mutex_lock(&intf->mutex); 262 /* Make sure interface is still enabled. */ 263 if (!intf->enabled) { 264 dev_dbg(&intf->dev, "mode switch aborted\n"); 265 intf->mode_switch = false; 266 mutex_unlock(&intf->mutex); 267 goto out_interface_put; 268 } 269 270 /* 271 * Prepare the control device for mode switch and make sure to get an 272 * extra reference before it goes away during interface disable. 273 */ 274 control = gb_control_get(intf->control); 275 gb_control_mode_switch_prepare(control); 276 gb_interface_disable(intf); 277 mutex_unlock(&intf->mutex); 278 279 timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT); 280 ret = wait_for_completion_interruptible_timeout( 281 &intf->mode_switch_completion, timeout); 282 283 /* Finalise control-connection mode switch. */ 284 gb_control_mode_switch_complete(control); 285 gb_control_put(control); 286 287 if (ret < 0) { 288 dev_err(&intf->dev, "mode switch interrupted\n"); 289 goto err_deactivate; 290 } else if (ret == 0) { 291 dev_err(&intf->dev, "mode switch timed out\n"); 292 goto err_deactivate; 293 } 294 295 /* Re-enable (re-enumerate) interface if still active. */ 296 mutex_lock(&intf->mutex); 297 intf->mode_switch = false; 298 if (intf->active) { 299 ret = gb_interface_enable(intf); 300 if (ret) { 301 dev_err(&intf->dev, "failed to re-enable interface: %d\n", 302 ret); 303 gb_interface_deactivate(intf); 304 } 305 } 306 mutex_unlock(&intf->mutex); 307 308out_interface_put: 309 gb_interface_put(intf); 310 311 return; 312 313err_deactivate: 314 mutex_lock(&intf->mutex); 315 intf->mode_switch = false; 316 gb_interface_deactivate(intf); 317 mutex_unlock(&intf->mutex); 318 319 gb_interface_put(intf); 320} 321 322int gb_interface_request_mode_switch(struct gb_interface *intf) 323{ 324 int ret = 0; 325 326 mutex_lock(&intf->mutex); 327 if (intf->mode_switch) { 328 ret = -EBUSY; 329 goto out_unlock; 330 } 331 332 intf->mode_switch = true; 333 reinit_completion(&intf->mode_switch_completion); 334 335 /* 336 * Get a reference to the interface device, which will be put once the 337 * mode switch is complete. 338 */ 339 get_device(&intf->dev); 340 341 if (!queue_work(system_long_wq, &intf->mode_switch_work)) { 342 put_device(&intf->dev); 343 ret = -EBUSY; 344 goto out_unlock; 345 } 346 347out_unlock: 348 mutex_unlock(&intf->mutex); 349 350 return ret; 351} 352EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch); 353 354/* 355 * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the 356 * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and 357 * clear it after reading a non-zero value from it. 358 * 359 * FIXME: This is module-hardware dependent and needs to be extended for every 360 * type of module we want to support. 361 */ 362static int gb_interface_read_and_clear_init_status(struct gb_interface *intf) 363{ 364 struct gb_host_device *hd = intf->hd; 365 unsigned long bootrom_quirks; 366 unsigned long s2l_quirks; 367 int ret; 368 u32 value; 369 u16 attr; 370 u8 init_status; 371 372 /* 373 * ES2 bridges use T_TstSrcIncrement for the init status. 374 * 375 * FIXME: Remove ES2 support 376 */ 377 if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS) 378 attr = DME_T_TST_SRC_INCREMENT; 379 else 380 attr = DME_TOSHIBA_GMP_INIT_STATUS; 381 382 ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr, 383 DME_SELECTOR_INDEX_NULL, &value); 384 if (ret) 385 return ret; 386 387 /* 388 * A nonzero init status indicates the module has finished 389 * initializing. 390 */ 391 if (!value) { 392 dev_err(&intf->dev, "invalid init status\n"); 393 return -ENODEV; 394 } 395 396 /* 397 * Extract the init status. 398 * 399 * For ES2: We need to check lowest 8 bits of 'value'. 400 * For ES3: We need to check highest 8 bits out of 32 of 'value'. 401 * 402 * FIXME: Remove ES2 support 403 */ 404 if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS) 405 init_status = value & 0xff; 406 else 407 init_status = value >> 24; 408 409 /* 410 * Check if the interface is executing the quirky ES3 bootrom that, 411 * for example, requires E2EFC, CSD and CSV to be disabled. 412 */ 413 bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES | 414 GB_INTERFACE_QUIRK_FORCED_DISABLE | 415 GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH | 416 GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE; 417 418 s2l_quirks = GB_INTERFACE_QUIRK_NO_PM; 419 420 switch (init_status) { 421 case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED: 422 case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED: 423 intf->quirks |= bootrom_quirks; 424 break; 425 case GB_INIT_S2_LOADER_BOOT_STARTED: 426 /* S2 Loader doesn't support runtime PM */ 427 intf->quirks &= ~bootrom_quirks; 428 intf->quirks |= s2l_quirks; 429 break; 430 default: 431 intf->quirks &= ~bootrom_quirks; 432 intf->quirks &= ~s2l_quirks; 433 } 434 435 /* Clear the init status. */ 436 return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr, 437 DME_SELECTOR_INDEX_NULL, 0); 438} 439 440/* interface sysfs attributes */ 441#define gb_interface_attr(field, type) \ 442static ssize_t field##_show(struct device *dev, \ 443 struct device_attribute *attr, \ 444 char *buf) \ 445{ \ 446 struct gb_interface *intf = to_gb_interface(dev); \ 447 return scnprintf(buf, PAGE_SIZE, type"\n", intf->field); \ 448} \ 449static DEVICE_ATTR_RO(field) 450 451gb_interface_attr(ddbl1_manufacturer_id, "0x%08x"); 452gb_interface_attr(ddbl1_product_id, "0x%08x"); 453gb_interface_attr(interface_id, "%u"); 454gb_interface_attr(vendor_id, "0x%08x"); 455gb_interface_attr(product_id, "0x%08x"); 456gb_interface_attr(serial_number, "0x%016llx"); 457 458static ssize_t voltage_now_show(struct device *dev, 459 struct device_attribute *attr, char *buf) 460{ 461 struct gb_interface *intf = to_gb_interface(dev); 462 int ret; 463 u32 measurement; 464 465 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, 466 GB_SVC_PWRMON_TYPE_VOL, 467 &measurement); 468 if (ret) { 469 dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret); 470 return ret; 471 } 472 473 return sprintf(buf, "%u\n", measurement); 474} 475static DEVICE_ATTR_RO(voltage_now); 476 477static ssize_t current_now_show(struct device *dev, 478 struct device_attribute *attr, char *buf) 479{ 480 struct gb_interface *intf = to_gb_interface(dev); 481 int ret; 482 u32 measurement; 483 484 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, 485 GB_SVC_PWRMON_TYPE_CURR, 486 &measurement); 487 if (ret) { 488 dev_err(&intf->dev, "failed to get current sample (%d)\n", ret); 489 return ret; 490 } 491 492 return sprintf(buf, "%u\n", measurement); 493} 494static DEVICE_ATTR_RO(current_now); 495 496static ssize_t power_now_show(struct device *dev, 497 struct device_attribute *attr, char *buf) 498{ 499 struct gb_interface *intf = to_gb_interface(dev); 500 int ret; 501 u32 measurement; 502 503 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, 504 GB_SVC_PWRMON_TYPE_PWR, 505 &measurement); 506 if (ret) { 507 dev_err(&intf->dev, "failed to get power sample (%d)\n", ret); 508 return ret; 509 } 510 511 return sprintf(buf, "%u\n", measurement); 512} 513static DEVICE_ATTR_RO(power_now); 514 515static ssize_t power_state_show(struct device *dev, 516 struct device_attribute *attr, char *buf) 517{ 518 struct gb_interface *intf = to_gb_interface(dev); 519 520 if (intf->active) 521 return scnprintf(buf, PAGE_SIZE, "on\n"); 522 else 523 return scnprintf(buf, PAGE_SIZE, "off\n"); 524} 525 526static ssize_t power_state_store(struct device *dev, 527 struct device_attribute *attr, const char *buf, 528 size_t len) 529{ 530 struct gb_interface *intf = to_gb_interface(dev); 531 bool activate; 532 int ret = 0; 533 534 if (kstrtobool(buf, &activate)) 535 return -EINVAL; 536 537 mutex_lock(&intf->mutex); 538 539 if (activate == intf->active) 540 goto unlock; 541 542 if (activate) { 543 ret = gb_interface_activate(intf); 544 if (ret) { 545 dev_err(&intf->dev, 546 "failed to activate interface: %d\n", ret); 547 goto unlock; 548 } 549 550 ret = gb_interface_enable(intf); 551 if (ret) { 552 dev_err(&intf->dev, 553 "failed to enable interface: %d\n", ret); 554 gb_interface_deactivate(intf); 555 goto unlock; 556 } 557 } else { 558 gb_interface_disable(intf); 559 gb_interface_deactivate(intf); 560 } 561 562unlock: 563 mutex_unlock(&intf->mutex); 564 565 if (ret) 566 return ret; 567 568 return len; 569} 570static DEVICE_ATTR_RW(power_state); 571 572static const char *gb_interface_type_string(struct gb_interface *intf) 573{ 574 static const char * const types[] = { 575 [GB_INTERFACE_TYPE_INVALID] = "invalid", 576 [GB_INTERFACE_TYPE_UNKNOWN] = "unknown", 577 [GB_INTERFACE_TYPE_DUMMY] = "dummy", 578 [GB_INTERFACE_TYPE_UNIPRO] = "unipro", 579 [GB_INTERFACE_TYPE_GREYBUS] = "greybus", 580 }; 581 582 return types[intf->type]; 583} 584 585static ssize_t interface_type_show(struct device *dev, 586 struct device_attribute *attr, char *buf) 587{ 588 struct gb_interface *intf = to_gb_interface(dev); 589 590 return sprintf(buf, "%s\n", gb_interface_type_string(intf)); 591} 592static DEVICE_ATTR_RO(interface_type); 593 594static struct attribute *interface_unipro_attrs[] = { 595 &dev_attr_ddbl1_manufacturer_id.attr, 596 &dev_attr_ddbl1_product_id.attr, 597 NULL 598}; 599 600static struct attribute *interface_greybus_attrs[] = { 601 &dev_attr_vendor_id.attr, 602 &dev_attr_product_id.attr, 603 &dev_attr_serial_number.attr, 604 NULL 605}; 606 607static struct attribute *interface_power_attrs[] = { 608 &dev_attr_voltage_now.attr, 609 &dev_attr_current_now.attr, 610 &dev_attr_power_now.attr, 611 &dev_attr_power_state.attr, 612 NULL 613}; 614 615static struct attribute *interface_common_attrs[] = { 616 &dev_attr_interface_id.attr, 617 &dev_attr_interface_type.attr, 618 NULL 619}; 620 621static umode_t interface_unipro_is_visible(struct kobject *kobj, 622 struct attribute *attr, int n) 623{ 624 struct device *dev = container_of(kobj, struct device, kobj); 625 struct gb_interface *intf = to_gb_interface(dev); 626 627 switch (intf->type) { 628 case GB_INTERFACE_TYPE_UNIPRO: 629 case GB_INTERFACE_TYPE_GREYBUS: 630 return attr->mode; 631 default: 632 return 0; 633 } 634} 635 636static umode_t interface_greybus_is_visible(struct kobject *kobj, 637 struct attribute *attr, int n) 638{ 639 struct device *dev = container_of(kobj, struct device, kobj); 640 struct gb_interface *intf = to_gb_interface(dev); 641 642 switch (intf->type) { 643 case GB_INTERFACE_TYPE_GREYBUS: 644 return attr->mode; 645 default: 646 return 0; 647 } 648} 649 650static umode_t interface_power_is_visible(struct kobject *kobj, 651 struct attribute *attr, int n) 652{ 653 struct device *dev = container_of(kobj, struct device, kobj); 654 struct gb_interface *intf = to_gb_interface(dev); 655 656 switch (intf->type) { 657 case GB_INTERFACE_TYPE_UNIPRO: 658 case GB_INTERFACE_TYPE_GREYBUS: 659 return attr->mode; 660 default: 661 return 0; 662 } 663} 664 665static const struct attribute_group interface_unipro_group = { 666 .is_visible = interface_unipro_is_visible, 667 .attrs = interface_unipro_attrs, 668}; 669 670static const struct attribute_group interface_greybus_group = { 671 .is_visible = interface_greybus_is_visible, 672 .attrs = interface_greybus_attrs, 673}; 674 675static const struct attribute_group interface_power_group = { 676 .is_visible = interface_power_is_visible, 677 .attrs = interface_power_attrs, 678}; 679 680static const struct attribute_group interface_common_group = { 681 .attrs = interface_common_attrs, 682}; 683 684static const struct attribute_group *interface_groups[] = { 685 &interface_unipro_group, 686 &interface_greybus_group, 687 &interface_power_group, 688 &interface_common_group, 689 NULL 690}; 691 692static void gb_interface_release(struct device *dev) 693{ 694 struct gb_interface *intf = to_gb_interface(dev); 695 696 trace_gb_interface_release(intf); 697 698 kfree(intf); 699} 700 701#ifdef CONFIG_PM 702static int gb_interface_suspend(struct device *dev) 703{ 704 struct gb_interface *intf = to_gb_interface(dev); 705 int ret; 706 707 ret = gb_control_interface_suspend_prepare(intf->control); 708 if (ret) 709 return ret; 710 711 ret = gb_control_suspend(intf->control); 712 if (ret) 713 goto err_hibernate_abort; 714 715 ret = gb_interface_hibernate_link(intf); 716 if (ret) 717 return ret; 718 719 /* Delay to allow interface to enter standby before disabling refclk */ 720 msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS); 721 722 ret = gb_interface_refclk_set(intf, false); 723 if (ret) 724 return ret; 725 726 return 0; 727 728err_hibernate_abort: 729 gb_control_interface_hibernate_abort(intf->control); 730 731 return ret; 732} 733 734static int gb_interface_resume(struct device *dev) 735{ 736 struct gb_interface *intf = to_gb_interface(dev); 737 struct gb_svc *svc = intf->hd->svc; 738 int ret; 739 740 ret = gb_interface_refclk_set(intf, true); 741 if (ret) 742 return ret; 743 744 ret = gb_svc_intf_resume(svc, intf->interface_id); 745 if (ret) 746 return ret; 747 748 ret = gb_control_resume(intf->control); 749 if (ret) 750 return ret; 751 752 return 0; 753} 754 755static int gb_interface_runtime_idle(struct device *dev) 756{ 757 pm_runtime_mark_last_busy(dev); 758 pm_request_autosuspend(dev); 759 760 return 0; 761} 762#endif 763 764static const struct dev_pm_ops gb_interface_pm_ops = { 765 SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume, 766 gb_interface_runtime_idle) 767}; 768 769struct device_type greybus_interface_type = { 770 .name = "greybus_interface", 771 .release = gb_interface_release, 772 .pm = &gb_interface_pm_ops, 773}; 774 775/* 776 * A Greybus module represents a user-replaceable component on a GMP 777 * phone. An interface is the physical connection on that module. A 778 * module may have more than one interface. 779 * 780 * Create a gb_interface structure to represent a discovered interface. 781 * The position of interface within the Endo is encoded in "interface_id" 782 * argument. 783 * 784 * Returns a pointer to the new interfce or a null pointer if a 785 * failure occurs due to memory exhaustion. 786 */ 787struct gb_interface *gb_interface_create(struct gb_module *module, 788 u8 interface_id) 789{ 790 struct gb_host_device *hd = module->hd; 791 struct gb_interface *intf; 792 793 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 794 if (!intf) 795 return NULL; 796 797 intf->hd = hd; /* XXX refcount? */ 798 intf->module = module; 799 intf->interface_id = interface_id; 800 INIT_LIST_HEAD(&intf->bundles); 801 INIT_LIST_HEAD(&intf->manifest_descs); 802 mutex_init(&intf->mutex); 803 INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work); 804 init_completion(&intf->mode_switch_completion); 805 806 /* Invalid device id to start with */ 807 intf->device_id = GB_INTERFACE_DEVICE_ID_BAD; 808 809 intf->dev.parent = &module->dev; 810 intf->dev.bus = &greybus_bus_type; 811 intf->dev.type = &greybus_interface_type; 812 intf->dev.groups = interface_groups; 813 intf->dev.dma_mask = module->dev.dma_mask; 814 device_initialize(&intf->dev); 815 dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev), 816 interface_id); 817 818 pm_runtime_set_autosuspend_delay(&intf->dev, 819 GB_INTERFACE_AUTOSUSPEND_MS); 820 821 trace_gb_interface_create(intf); 822 823 return intf; 824} 825 826static int gb_interface_vsys_set(struct gb_interface *intf, bool enable) 827{ 828 struct gb_svc *svc = intf->hd->svc; 829 int ret; 830 831 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable); 832 833 ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable); 834 if (ret) { 835 dev_err(&intf->dev, "failed to set v_sys: %d\n", ret); 836 return ret; 837 } 838 839 return 0; 840} 841 842static int gb_interface_refclk_set(struct gb_interface *intf, bool enable) 843{ 844 struct gb_svc *svc = intf->hd->svc; 845 int ret; 846 847 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable); 848 849 ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable); 850 if (ret) { 851 dev_err(&intf->dev, "failed to set refclk: %d\n", ret); 852 return ret; 853 } 854 855 return 0; 856} 857 858static int gb_interface_unipro_set(struct gb_interface *intf, bool enable) 859{ 860 struct gb_svc *svc = intf->hd->svc; 861 int ret; 862 863 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable); 864 865 ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable); 866 if (ret) { 867 dev_err(&intf->dev, "failed to set UniPro: %d\n", ret); 868 return ret; 869 } 870 871 return 0; 872} 873 874static int gb_interface_activate_operation(struct gb_interface *intf, 875 enum gb_interface_type *intf_type) 876{ 877 struct gb_svc *svc = intf->hd->svc; 878 u8 type; 879 int ret; 880 881 dev_dbg(&intf->dev, "%s\n", __func__); 882 883 ret = gb_svc_intf_activate(svc, intf->interface_id, &type); 884 if (ret) { 885 dev_err(&intf->dev, "failed to activate: %d\n", ret); 886 return ret; 887 } 888 889 switch (type) { 890 case GB_SVC_INTF_TYPE_DUMMY: 891 *intf_type = GB_INTERFACE_TYPE_DUMMY; 892 /* FIXME: handle as an error for now */ 893 return -ENODEV; 894 case GB_SVC_INTF_TYPE_UNIPRO: 895 *intf_type = GB_INTERFACE_TYPE_UNIPRO; 896 dev_err(&intf->dev, "interface type UniPro not supported\n"); 897 /* FIXME: handle as an error for now */ 898 return -ENODEV; 899 case GB_SVC_INTF_TYPE_GREYBUS: 900 *intf_type = GB_INTERFACE_TYPE_GREYBUS; 901 break; 902 default: 903 dev_err(&intf->dev, "unknown interface type: %u\n", type); 904 *intf_type = GB_INTERFACE_TYPE_UNKNOWN; 905 return -ENODEV; 906 } 907 908 return 0; 909} 910 911static int gb_interface_hibernate_link(struct gb_interface *intf) 912{ 913 struct gb_svc *svc = intf->hd->svc; 914 915 return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id); 916} 917 918static int _gb_interface_activate(struct gb_interface *intf, 919 enum gb_interface_type *type) 920{ 921 int ret; 922 923 *type = GB_INTERFACE_TYPE_UNKNOWN; 924 925 if (intf->ejected || intf->removed) 926 return -ENODEV; 927 928 ret = gb_interface_vsys_set(intf, true); 929 if (ret) 930 return ret; 931 932 ret = gb_interface_refclk_set(intf, true); 933 if (ret) 934 goto err_vsys_disable; 935 936 ret = gb_interface_unipro_set(intf, true); 937 if (ret) 938 goto err_refclk_disable; 939 940 ret = gb_interface_activate_operation(intf, type); 941 if (ret) { 942 switch (*type) { 943 case GB_INTERFACE_TYPE_UNIPRO: 944 case GB_INTERFACE_TYPE_GREYBUS: 945 goto err_hibernate_link; 946 default: 947 goto err_unipro_disable; 948 } 949 } 950 951 ret = gb_interface_read_dme(intf); 952 if (ret) 953 goto err_hibernate_link; 954 955 ret = gb_interface_route_create(intf); 956 if (ret) 957 goto err_hibernate_link; 958 959 intf->active = true; 960 961 trace_gb_interface_activate(intf); 962 963 return 0; 964 965err_hibernate_link: 966 gb_interface_hibernate_link(intf); 967err_unipro_disable: 968 gb_interface_unipro_set(intf, false); 969err_refclk_disable: 970 gb_interface_refclk_set(intf, false); 971err_vsys_disable: 972 gb_interface_vsys_set(intf, false); 973 974 return ret; 975} 976 977/* 978 * At present, we assume a UniPro-only module to be a Greybus module that 979 * failed to send its mailbox poke. There is some reason to believe that this 980 * is because of a bug in the ES3 bootrom. 981 * 982 * FIXME: Check if this is a Toshiba bridge before retrying? 983 */ 984static int _gb_interface_activate_es3_hack(struct gb_interface *intf, 985 enum gb_interface_type *type) 986{ 987 int retries = 3; 988 int ret; 989 990 while (retries--) { 991 ret = _gb_interface_activate(intf, type); 992 if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO) 993 continue; 994 995 break; 996 } 997 998 return ret; 999} 1000 1001/* 1002 * Activate an interface. 1003 * 1004 * Locking: Caller holds the interface mutex. 1005 */ 1006int gb_interface_activate(struct gb_interface *intf) 1007{ 1008 enum gb_interface_type type; 1009 int ret; 1010 1011 switch (intf->type) { 1012 case GB_INTERFACE_TYPE_INVALID: 1013 case GB_INTERFACE_TYPE_GREYBUS: 1014 ret = _gb_interface_activate_es3_hack(intf, &type); 1015 break; 1016 default: 1017 ret = _gb_interface_activate(intf, &type); 1018 } 1019 1020 /* Make sure type is detected correctly during reactivation. */ 1021 if (intf->type != GB_INTERFACE_TYPE_INVALID) { 1022 if (type != intf->type) { 1023 dev_err(&intf->dev, "failed to detect interface type\n"); 1024 1025 if (!ret) 1026 gb_interface_deactivate(intf); 1027 1028 return -EIO; 1029 } 1030 } else { 1031 intf->type = type; 1032 } 1033 1034 return ret; 1035} 1036 1037/* 1038 * Deactivate an interface. 1039 * 1040 * Locking: Caller holds the interface mutex. 1041 */ 1042void gb_interface_deactivate(struct gb_interface *intf) 1043{ 1044 if (!intf->active) 1045 return; 1046 1047 trace_gb_interface_deactivate(intf); 1048 1049 /* Abort any ongoing mode switch. */ 1050 if (intf->mode_switch) 1051 complete(&intf->mode_switch_completion); 1052 1053 gb_interface_route_destroy(intf); 1054 gb_interface_hibernate_link(intf); 1055 gb_interface_unipro_set(intf, false); 1056 gb_interface_refclk_set(intf, false); 1057 gb_interface_vsys_set(intf, false); 1058 1059 intf->active = false; 1060} 1061 1062/* 1063 * Enable an interface by enabling its control connection, fetching the 1064 * manifest and other information over it, and finally registering its child 1065 * devices. 1066 * 1067 * Locking: Caller holds the interface mutex. 1068 */ 1069int gb_interface_enable(struct gb_interface *intf) 1070{ 1071 struct gb_control *control; 1072 struct gb_bundle *bundle, *tmp; 1073 int ret, size; 1074 void *manifest; 1075 1076 ret = gb_interface_read_and_clear_init_status(intf); 1077 if (ret) { 1078 dev_err(&intf->dev, "failed to clear init status: %d\n", ret); 1079 return ret; 1080 } 1081 1082 /* Establish control connection */ 1083 control = gb_control_create(intf); 1084 if (IS_ERR(control)) { 1085 dev_err(&intf->dev, "failed to create control device: %ld\n", 1086 PTR_ERR(control)); 1087 return PTR_ERR(control); 1088 } 1089 intf->control = control; 1090 1091 ret = gb_control_enable(intf->control); 1092 if (ret) 1093 goto err_put_control; 1094 1095 /* Get manifest size using control protocol on CPort */ 1096 size = gb_control_get_manifest_size_operation(intf); 1097 if (size <= 0) { 1098 dev_err(&intf->dev, "failed to get manifest size: %d\n", size); 1099 1100 if (size) 1101 ret = size; 1102 else 1103 ret = -EINVAL; 1104 1105 goto err_disable_control; 1106 } 1107 1108 manifest = kmalloc(size, GFP_KERNEL); 1109 if (!manifest) { 1110 ret = -ENOMEM; 1111 goto err_disable_control; 1112 } 1113 1114 /* Get manifest using control protocol on CPort */ 1115 ret = gb_control_get_manifest_operation(intf, manifest, size); 1116 if (ret) { 1117 dev_err(&intf->dev, "failed to get manifest: %d\n", ret); 1118 goto err_free_manifest; 1119 } 1120 1121 /* 1122 * Parse the manifest and build up our data structures representing 1123 * what's in it. 1124 */ 1125 if (!gb_manifest_parse(intf, manifest, size)) { 1126 dev_err(&intf->dev, "failed to parse manifest\n"); 1127 ret = -EINVAL; 1128 goto err_destroy_bundles; 1129 } 1130 1131 ret = gb_control_get_bundle_versions(intf->control); 1132 if (ret) 1133 goto err_destroy_bundles; 1134 1135 /* Register the control device and any bundles */ 1136 ret = gb_control_add(intf->control); 1137 if (ret) 1138 goto err_destroy_bundles; 1139 1140 pm_runtime_use_autosuspend(&intf->dev); 1141 pm_runtime_get_noresume(&intf->dev); 1142 pm_runtime_set_active(&intf->dev); 1143 pm_runtime_enable(&intf->dev); 1144 1145 list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) { 1146 ret = gb_bundle_add(bundle); 1147 if (ret) { 1148 gb_bundle_destroy(bundle); 1149 continue; 1150 } 1151 } 1152 1153 kfree(manifest); 1154 1155 intf->enabled = true; 1156 1157 pm_runtime_put(&intf->dev); 1158 1159 trace_gb_interface_enable(intf); 1160 1161 return 0; 1162 1163err_destroy_bundles: 1164 list_for_each_entry_safe(bundle, tmp, &intf->bundles, links) 1165 gb_bundle_destroy(bundle); 1166err_free_manifest: 1167 kfree(manifest); 1168err_disable_control: 1169 gb_control_disable(intf->control); 1170err_put_control: 1171 gb_control_put(intf->control); 1172 intf->control = NULL; 1173 1174 return ret; 1175} 1176 1177/* 1178 * Disable an interface and destroy its bundles. 1179 * 1180 * Locking: Caller holds the interface mutex. 1181 */ 1182void gb_interface_disable(struct gb_interface *intf) 1183{ 1184 struct gb_bundle *bundle; 1185 struct gb_bundle *next; 1186 1187 if (!intf->enabled) 1188 return; 1189 1190 trace_gb_interface_disable(intf); 1191 1192 pm_runtime_get_sync(&intf->dev); 1193 1194 /* Set disconnected flag to avoid I/O during connection tear down. */ 1195 if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE) 1196 intf->disconnected = true; 1197 1198 list_for_each_entry_safe(bundle, next, &intf->bundles, links) 1199 gb_bundle_destroy(bundle); 1200 1201 if (!intf->mode_switch && !intf->disconnected) 1202 gb_control_interface_deactivate_prepare(intf->control); 1203 1204 gb_control_del(intf->control); 1205 gb_control_disable(intf->control); 1206 gb_control_put(intf->control); 1207 intf->control = NULL; 1208 1209 intf->enabled = false; 1210 1211 pm_runtime_disable(&intf->dev); 1212 pm_runtime_set_suspended(&intf->dev); 1213 pm_runtime_dont_use_autosuspend(&intf->dev); 1214 pm_runtime_put_noidle(&intf->dev); 1215} 1216 1217/* Register an interface. */ 1218int gb_interface_add(struct gb_interface *intf) 1219{ 1220 int ret; 1221 1222 ret = device_add(&intf->dev); 1223 if (ret) { 1224 dev_err(&intf->dev, "failed to register interface: %d\n", ret); 1225 return ret; 1226 } 1227 1228 trace_gb_interface_add(intf); 1229 1230 dev_info(&intf->dev, "Interface added (%s)\n", 1231 gb_interface_type_string(intf)); 1232 1233 switch (intf->type) { 1234 case GB_INTERFACE_TYPE_GREYBUS: 1235 dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n", 1236 intf->vendor_id, intf->product_id); 1237 /* fall-through */ 1238 case GB_INTERFACE_TYPE_UNIPRO: 1239 dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n", 1240 intf->ddbl1_manufacturer_id, 1241 intf->ddbl1_product_id); 1242 break; 1243 default: 1244 break; 1245 } 1246 1247 return 0; 1248} 1249 1250/* Deregister an interface. */ 1251void gb_interface_del(struct gb_interface *intf) 1252{ 1253 if (device_is_registered(&intf->dev)) { 1254 trace_gb_interface_del(intf); 1255 1256 device_del(&intf->dev); 1257 dev_info(&intf->dev, "Interface removed\n"); 1258 } 1259} 1260 1261void gb_interface_put(struct gb_interface *intf) 1262{ 1263 put_device(&intf->dev); 1264}