at v4.9 635 lines 16 kB view raw
1/* 2 * Greybus CPort control protocol. 3 * 4 * Copyright 2015 Google Inc. 5 * Copyright 2015 Linaro Ltd. 6 * 7 * Released under the GPLv2 only. 8 */ 9 10#include <linux/kernel.h> 11#include <linux/module.h> 12#include <linux/slab.h> 13#include "greybus.h" 14 15/* Highest control-protocol version supported */ 16#define GB_CONTROL_VERSION_MAJOR 0 17#define GB_CONTROL_VERSION_MINOR 1 18 19 20static int gb_control_get_version(struct gb_control *control) 21{ 22 struct gb_interface *intf = control->connection->intf; 23 struct gb_control_version_request request; 24 struct gb_control_version_response response; 25 int ret; 26 27 request.major = GB_CONTROL_VERSION_MAJOR; 28 request.minor = GB_CONTROL_VERSION_MINOR; 29 30 ret = gb_operation_sync(control->connection, 31 GB_CONTROL_TYPE_VERSION, 32 &request, sizeof(request), &response, 33 sizeof(response)); 34 if (ret) { 35 dev_err(&intf->dev, 36 "failed to get control-protocol version: %d\n", 37 ret); 38 return ret; 39 } 40 41 if (response.major > request.major) { 42 dev_err(&intf->dev, 43 "unsupported major control-protocol version (%u > %u)\n", 44 response.major, request.major); 45 return -ENOTSUPP; 46 } 47 48 control->protocol_major = response.major; 49 control->protocol_minor = response.minor; 50 51 dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major, 52 response.minor); 53 54 return 0; 55} 56 57static int gb_control_get_bundle_version(struct gb_control *control, 58 struct gb_bundle *bundle) 59{ 60 struct gb_interface *intf = control->connection->intf; 61 struct gb_control_bundle_version_request request; 62 struct gb_control_bundle_version_response response; 63 int ret; 64 65 request.bundle_id = bundle->id; 66 67 ret = gb_operation_sync(control->connection, 68 GB_CONTROL_TYPE_BUNDLE_VERSION, 69 &request, sizeof(request), 70 &response, sizeof(response)); 71 if (ret) { 72 dev_err(&intf->dev, 73 "failed to get bundle %u class version: %d\n", 74 bundle->id, ret); 75 return ret; 76 } 77 78 bundle->class_major = response.major; 79 bundle->class_minor = response.minor; 80 81 dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id, 82 response.major, response.minor); 83 84 return 0; 85} 86 87int gb_control_get_bundle_versions(struct gb_control *control) 88{ 89 struct gb_interface *intf = control->connection->intf; 90 struct gb_bundle *bundle; 91 int ret; 92 93 if (!control->has_bundle_version) 94 return 0; 95 96 list_for_each_entry(bundle, &intf->bundles, links) { 97 ret = gb_control_get_bundle_version(control, bundle); 98 if (ret) 99 return ret; 100 } 101 102 return 0; 103} 104 105/* Get Manifest's size from the interface */ 106int gb_control_get_manifest_size_operation(struct gb_interface *intf) 107{ 108 struct gb_control_get_manifest_size_response response; 109 struct gb_connection *connection = intf->control->connection; 110 int ret; 111 112 ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE, 113 NULL, 0, &response, sizeof(response)); 114 if (ret) { 115 dev_err(&connection->intf->dev, 116 "failed to get manifest size: %d\n", ret); 117 return ret; 118 } 119 120 return le16_to_cpu(response.size); 121} 122 123/* Reads Manifest from the interface */ 124int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest, 125 size_t size) 126{ 127 struct gb_connection *connection = intf->control->connection; 128 129 return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST, 130 NULL, 0, manifest, size); 131} 132 133int gb_control_connected_operation(struct gb_control *control, u16 cport_id) 134{ 135 struct gb_control_connected_request request; 136 137 request.cport_id = cpu_to_le16(cport_id); 138 return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED, 139 &request, sizeof(request), NULL, 0); 140} 141 142int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id) 143{ 144 struct gb_control_disconnected_request request; 145 146 request.cport_id = cpu_to_le16(cport_id); 147 return gb_operation_sync(control->connection, 148 GB_CONTROL_TYPE_DISCONNECTED, &request, 149 sizeof(request), NULL, 0); 150} 151 152int gb_control_disconnecting_operation(struct gb_control *control, 153 u16 cport_id) 154{ 155 struct gb_control_disconnecting_request *request; 156 struct gb_operation *operation; 157 int ret; 158 159 operation = gb_operation_create_core(control->connection, 160 GB_CONTROL_TYPE_DISCONNECTING, 161 sizeof(*request), 0, 0, 162 GFP_KERNEL); 163 if (!operation) 164 return -ENOMEM; 165 166 request = operation->request->payload; 167 request->cport_id = cpu_to_le16(cport_id); 168 169 ret = gb_operation_request_send_sync(operation); 170 if (ret) { 171 dev_err(&control->dev, "failed to send disconnecting: %d\n", 172 ret); 173 } 174 175 gb_operation_put(operation); 176 177 return ret; 178} 179 180int gb_control_mode_switch_operation(struct gb_control *control) 181{ 182 struct gb_operation *operation; 183 int ret; 184 185 operation = gb_operation_create_core(control->connection, 186 GB_CONTROL_TYPE_MODE_SWITCH, 187 0, 0, GB_OPERATION_FLAG_UNIDIRECTIONAL, 188 GFP_KERNEL); 189 if (!operation) 190 return -ENOMEM; 191 192 ret = gb_operation_request_send_sync(operation); 193 if (ret) 194 dev_err(&control->dev, "failed to send mode switch: %d\n", ret); 195 196 gb_operation_put(operation); 197 198 return ret; 199} 200 201int gb_control_timesync_enable(struct gb_control *control, u8 count, 202 u64 frame_time, u32 strobe_delay, u32 refclk) 203{ 204 struct gb_control_timesync_enable_request request; 205 206 request.count = count; 207 request.frame_time = cpu_to_le64(frame_time); 208 request.strobe_delay = cpu_to_le32(strobe_delay); 209 request.refclk = cpu_to_le32(refclk); 210 return gb_operation_sync(control->connection, 211 GB_CONTROL_TYPE_TIMESYNC_ENABLE, &request, 212 sizeof(request), NULL, 0); 213} 214 215int gb_control_timesync_disable(struct gb_control *control) 216{ 217 return gb_operation_sync(control->connection, 218 GB_CONTROL_TYPE_TIMESYNC_DISABLE, NULL, 0, 219 NULL, 0); 220} 221 222int gb_control_timesync_get_last_event(struct gb_control *control, 223 u64 *frame_time) 224{ 225 struct gb_control_timesync_get_last_event_response response; 226 int ret; 227 228 ret = gb_operation_sync(control->connection, 229 GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT, 230 NULL, 0, &response, sizeof(response)); 231 if (!ret) 232 *frame_time = le64_to_cpu(response.frame_time); 233 return ret; 234} 235 236int gb_control_timesync_authoritative(struct gb_control *control, 237 u64 *frame_time) 238{ 239 struct gb_control_timesync_authoritative_request request; 240 int i; 241 242 for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++) 243 request.frame_time[i] = cpu_to_le64(frame_time[i]); 244 245 return gb_operation_sync(control->connection, 246 GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE, 247 &request, sizeof(request), 248 NULL, 0); 249} 250 251static int gb_control_bundle_pm_status_map(u8 status) 252{ 253 switch (status) { 254 case GB_CONTROL_BUNDLE_PM_INVAL: 255 return -EINVAL; 256 case GB_CONTROL_BUNDLE_PM_BUSY: 257 return -EBUSY; 258 case GB_CONTROL_BUNDLE_PM_NA: 259 return -ENOMSG; 260 case GB_CONTROL_BUNDLE_PM_FAIL: 261 default: 262 return -EREMOTEIO; 263 } 264} 265 266int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id) 267{ 268 struct gb_control_bundle_pm_request request; 269 struct gb_control_bundle_pm_response response; 270 int ret; 271 272 request.bundle_id = bundle_id; 273 ret = gb_operation_sync(control->connection, 274 GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request, 275 sizeof(request), &response, sizeof(response)); 276 if (ret) { 277 dev_err(&control->dev, "failed to send bundle %u suspend: %d\n", 278 bundle_id, ret); 279 return ret; 280 } 281 282 if (response.status != GB_CONTROL_BUNDLE_PM_OK) { 283 dev_err(&control->dev, "failed to suspend bundle %u: %d\n", 284 bundle_id, response.status); 285 return gb_control_bundle_pm_status_map(response.status); 286 } 287 288 return 0; 289} 290 291int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id) 292{ 293 struct gb_control_bundle_pm_request request; 294 struct gb_control_bundle_pm_response response; 295 int ret; 296 297 request.bundle_id = bundle_id; 298 ret = gb_operation_sync(control->connection, 299 GB_CONTROL_TYPE_BUNDLE_RESUME, &request, 300 sizeof(request), &response, sizeof(response)); 301 if (ret) { 302 dev_err(&control->dev, "failed to send bundle %u resume: %d\n", 303 bundle_id, ret); 304 return ret; 305 } 306 307 if (response.status != GB_CONTROL_BUNDLE_PM_OK) { 308 dev_err(&control->dev, "failed to resume bundle %u: %d\n", 309 bundle_id, response.status); 310 return gb_control_bundle_pm_status_map(response.status); 311 } 312 313 return 0; 314} 315 316int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id) 317{ 318 struct gb_control_bundle_pm_request request; 319 struct gb_control_bundle_pm_response response; 320 int ret; 321 322 request.bundle_id = bundle_id; 323 ret = gb_operation_sync(control->connection, 324 GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request, 325 sizeof(request), &response, sizeof(response)); 326 if (ret) { 327 dev_err(&control->dev, 328 "failed to send bundle %u deactivate: %d\n", bundle_id, 329 ret); 330 return ret; 331 } 332 333 if (response.status != GB_CONTROL_BUNDLE_PM_OK) { 334 dev_err(&control->dev, "failed to deactivate bundle %u: %d\n", 335 bundle_id, response.status); 336 return gb_control_bundle_pm_status_map(response.status); 337 } 338 339 return 0; 340} 341 342int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id) 343{ 344 struct gb_control_bundle_pm_request request; 345 struct gb_control_bundle_pm_response response; 346 int ret; 347 348 if (!control->has_bundle_activate) 349 return 0; 350 351 request.bundle_id = bundle_id; 352 ret = gb_operation_sync(control->connection, 353 GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request, 354 sizeof(request), &response, sizeof(response)); 355 if (ret) { 356 dev_err(&control->dev, 357 "failed to send bundle %u activate: %d\n", bundle_id, 358 ret); 359 return ret; 360 } 361 362 if (response.status != GB_CONTROL_BUNDLE_PM_OK) { 363 dev_err(&control->dev, "failed to activate bundle %u: %d\n", 364 bundle_id, response.status); 365 return gb_control_bundle_pm_status_map(response.status); 366 } 367 368 return 0; 369} 370 371static int gb_control_interface_pm_status_map(u8 status) 372{ 373 switch (status) { 374 case GB_CONTROL_INTF_PM_BUSY: 375 return -EBUSY; 376 case GB_CONTROL_INTF_PM_NA: 377 return -ENOMSG; 378 default: 379 return -EREMOTEIO; 380 } 381} 382 383int gb_control_interface_suspend_prepare(struct gb_control *control) 384{ 385 struct gb_control_intf_pm_response response; 386 int ret; 387 388 ret = gb_operation_sync(control->connection, 389 GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0, 390 &response, sizeof(response)); 391 if (ret) { 392 dev_err(&control->dev, 393 "failed to send interface suspend prepare: %d\n", ret); 394 return ret; 395 } 396 397 if (response.status != GB_CONTROL_INTF_PM_OK) { 398 dev_err(&control->dev, "interface error while preparing suspend: %d\n", 399 response.status); 400 return gb_control_interface_pm_status_map(response.status); 401 } 402 403 return 0; 404} 405 406int gb_control_interface_deactivate_prepare(struct gb_control *control) 407{ 408 struct gb_control_intf_pm_response response; 409 int ret; 410 411 ret = gb_operation_sync(control->connection, 412 GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL, 413 0, &response, sizeof(response)); 414 if (ret) { 415 dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n", 416 ret); 417 return ret; 418 } 419 420 if (response.status != GB_CONTROL_INTF_PM_OK) { 421 dev_err(&control->dev, "interface error while preparing deactivate: %d\n", 422 response.status); 423 return gb_control_interface_pm_status_map(response.status); 424 } 425 426 return 0; 427} 428 429int gb_control_interface_hibernate_abort(struct gb_control *control) 430{ 431 struct gb_control_intf_pm_response response; 432 int ret; 433 434 ret = gb_operation_sync(control->connection, 435 GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0, 436 &response, sizeof(response)); 437 if (ret) { 438 dev_err(&control->dev, 439 "failed to send interface aborting hibernate: %d\n", 440 ret); 441 return ret; 442 } 443 444 if (response.status != GB_CONTROL_INTF_PM_OK) { 445 dev_err(&control->dev, "interface error while aborting hibernate: %d\n", 446 response.status); 447 return gb_control_interface_pm_status_map(response.status); 448 } 449 450 return 0; 451} 452 453static ssize_t vendor_string_show(struct device *dev, 454 struct device_attribute *attr, char *buf) 455{ 456 struct gb_control *control = to_gb_control(dev); 457 458 return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string); 459} 460static DEVICE_ATTR_RO(vendor_string); 461 462static ssize_t product_string_show(struct device *dev, 463 struct device_attribute *attr, char *buf) 464{ 465 struct gb_control *control = to_gb_control(dev); 466 467 return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string); 468} 469static DEVICE_ATTR_RO(product_string); 470 471static struct attribute *control_attrs[] = { 472 &dev_attr_vendor_string.attr, 473 &dev_attr_product_string.attr, 474 NULL, 475}; 476ATTRIBUTE_GROUPS(control); 477 478static void gb_control_release(struct device *dev) 479{ 480 struct gb_control *control = to_gb_control(dev); 481 482 gb_connection_destroy(control->connection); 483 484 kfree(control->vendor_string); 485 kfree(control->product_string); 486 487 kfree(control); 488} 489 490struct device_type greybus_control_type = { 491 .name = "greybus_control", 492 .release = gb_control_release, 493}; 494 495struct gb_control *gb_control_create(struct gb_interface *intf) 496{ 497 struct gb_connection *connection; 498 struct gb_control *control; 499 500 control = kzalloc(sizeof(*control), GFP_KERNEL); 501 if (!control) 502 return ERR_PTR(-ENOMEM); 503 504 control->intf = intf; 505 506 connection = gb_connection_create_control(intf); 507 if (IS_ERR(connection)) { 508 dev_err(&intf->dev, 509 "failed to create control connection: %ld\n", 510 PTR_ERR(connection)); 511 kfree(control); 512 return ERR_CAST(connection); 513 } 514 515 control->connection = connection; 516 517 control->dev.parent = &intf->dev; 518 control->dev.bus = &greybus_bus_type; 519 control->dev.type = &greybus_control_type; 520 control->dev.groups = control_groups; 521 control->dev.dma_mask = intf->dev.dma_mask; 522 device_initialize(&control->dev); 523 dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev)); 524 525 gb_connection_set_data(control->connection, control); 526 527 return control; 528} 529 530int gb_control_enable(struct gb_control *control) 531{ 532 int ret; 533 534 dev_dbg(&control->connection->intf->dev, "%s\n", __func__); 535 536 ret = gb_connection_enable_tx(control->connection); 537 if (ret) { 538 dev_err(&control->connection->intf->dev, 539 "failed to enable control connection: %d\n", 540 ret); 541 return ret; 542 } 543 544 ret = gb_control_get_version(control); 545 if (ret) 546 goto err_disable_connection; 547 548 if (control->protocol_major > 0 || control->protocol_minor > 1) 549 control->has_bundle_version = true; 550 551 /* FIXME: use protocol version instead */ 552 if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE)) 553 control->has_bundle_activate = true; 554 555 return 0; 556 557err_disable_connection: 558 gb_connection_disable(control->connection); 559 560 return ret; 561} 562 563void gb_control_disable(struct gb_control *control) 564{ 565 dev_dbg(&control->connection->intf->dev, "%s\n", __func__); 566 567 if (control->intf->disconnected) 568 gb_connection_disable_forced(control->connection); 569 else 570 gb_connection_disable(control->connection); 571} 572 573int gb_control_suspend(struct gb_control *control) 574{ 575 gb_connection_disable(control->connection); 576 577 return 0; 578} 579 580int gb_control_resume(struct gb_control *control) 581{ 582 int ret; 583 584 ret = gb_connection_enable_tx(control->connection); 585 if (ret) { 586 dev_err(&control->connection->intf->dev, 587 "failed to enable control connection: %d\n", ret); 588 return ret; 589 } 590 591 return 0; 592} 593 594int gb_control_add(struct gb_control *control) 595{ 596 int ret; 597 598 ret = device_add(&control->dev); 599 if (ret) { 600 dev_err(&control->dev, 601 "failed to register control device: %d\n", 602 ret); 603 return ret; 604 } 605 606 return 0; 607} 608 609void gb_control_del(struct gb_control *control) 610{ 611 if (device_is_registered(&control->dev)) 612 device_del(&control->dev); 613} 614 615struct gb_control *gb_control_get(struct gb_control *control) 616{ 617 get_device(&control->dev); 618 619 return control; 620} 621 622void gb_control_put(struct gb_control *control) 623{ 624 put_device(&control->dev); 625} 626 627void gb_control_mode_switch_prepare(struct gb_control *control) 628{ 629 gb_connection_mode_switch_prepare(control->connection); 630} 631 632void gb_control_mode_switch_complete(struct gb_control *control) 633{ 634 gb_connection_mode_switch_complete(control->connection); 635}