at master 1224 lines 30 kB view raw
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * 4 * Bluetooth HCI UART driver for Intel devices 5 * 6 * Copyright (C) 2015 Intel Corporation 7 */ 8 9#include <linux/kernel.h> 10#include <linux/errno.h> 11#include <linux/skbuff.h> 12#include <linux/firmware.h> 13#include <linux/module.h> 14#include <linux/wait.h> 15#include <linux/tty.h> 16#include <linux/platform_device.h> 17#include <linux/gpio/consumer.h> 18#include <linux/acpi.h> 19#include <linux/interrupt.h> 20#include <linux/pm_runtime.h> 21 22#include <net/bluetooth/bluetooth.h> 23#include <net/bluetooth/hci_core.h> 24 25#include "hci_uart.h" 26#include "btintel.h" 27 28#define STATE_BOOTLOADER 0 29#define STATE_DOWNLOADING 1 30#define STATE_FIRMWARE_LOADED 2 31#define STATE_FIRMWARE_FAILED 3 32#define STATE_BOOTING 4 33#define STATE_LPM_ENABLED 5 34#define STATE_TX_ACTIVE 6 35#define STATE_SUSPENDED 7 36#define STATE_LPM_TRANSACTION 8 37 38#define HCI_LPM_WAKE_PKT 0xf0 39#define HCI_LPM_PKT 0xf1 40#define HCI_LPM_MAX_SIZE 10 41#define HCI_LPM_HDR_SIZE HCI_EVENT_HDR_SIZE 42 43#define LPM_OP_TX_NOTIFY 0x00 44#define LPM_OP_SUSPEND_ACK 0x02 45#define LPM_OP_RESUME_ACK 0x03 46 47#define LPM_SUSPEND_DELAY_MS 1000 48 49struct hci_lpm_pkt { 50 __u8 opcode; 51 __u8 dlen; 52 __u8 data[]; 53} __packed; 54 55struct intel_device { 56 struct list_head list; 57 struct platform_device *pdev; 58 struct gpio_desc *reset; 59 struct hci_uart *hu; 60 struct mutex hu_lock; 61 int irq; 62}; 63 64static LIST_HEAD(intel_device_list); 65static DEFINE_MUTEX(intel_device_list_lock); 66 67struct intel_data { 68 struct sk_buff *rx_skb; 69 struct sk_buff_head txq; 70 struct work_struct busy_work; 71 struct hci_uart *hu; 72 unsigned long flags; 73}; 74 75static u8 intel_convert_speed(unsigned int speed) 76{ 77 switch (speed) { 78 case 9600: 79 return 0x00; 80 case 19200: 81 return 0x01; 82 case 38400: 83 return 0x02; 84 case 57600: 85 return 0x03; 86 case 115200: 87 return 0x04; 88 case 230400: 89 return 0x05; 90 case 460800: 91 return 0x06; 92 case 921600: 93 return 0x07; 94 case 1843200: 95 return 0x08; 96 case 3250000: 97 return 0x09; 98 case 2000000: 99 return 0x0a; 100 case 3000000: 101 return 0x0b; 102 default: 103 return 0xff; 104 } 105} 106 107static int intel_wait_booting(struct hci_uart *hu) 108{ 109 struct intel_data *intel = hu->priv; 110 int err; 111 112 err = wait_on_bit_timeout(&intel->flags, STATE_BOOTING, 113 TASK_INTERRUPTIBLE, 114 msecs_to_jiffies(1000)); 115 116 if (err == -EINTR) { 117 bt_dev_err(hu->hdev, "Device boot interrupted"); 118 return -EINTR; 119 } 120 121 if (err) { 122 bt_dev_err(hu->hdev, "Device boot timeout"); 123 return -ETIMEDOUT; 124 } 125 126 return err; 127} 128 129static int intel_wait_lpm_transaction(struct hci_uart *hu) 130{ 131 struct intel_data *intel = hu->priv; 132 int err; 133 134 err = wait_on_bit_timeout(&intel->flags, STATE_LPM_TRANSACTION, 135 TASK_INTERRUPTIBLE, 136 msecs_to_jiffies(1000)); 137 138 if (err == -EINTR) { 139 bt_dev_err(hu->hdev, "LPM transaction interrupted"); 140 return -EINTR; 141 } 142 143 if (err) { 144 bt_dev_err(hu->hdev, "LPM transaction timeout"); 145 return -ETIMEDOUT; 146 } 147 148 return err; 149} 150 151static int intel_lpm_suspend(struct hci_uart *hu) 152{ 153 static const u8 suspend[] = { 0x01, 0x01, 0x01 }; 154 struct intel_data *intel = hu->priv; 155 struct sk_buff *skb; 156 157 if (!test_bit(STATE_LPM_ENABLED, &intel->flags) || 158 test_bit(STATE_SUSPENDED, &intel->flags)) 159 return 0; 160 161 if (test_bit(STATE_TX_ACTIVE, &intel->flags)) 162 return -EAGAIN; 163 164 bt_dev_dbg(hu->hdev, "Suspending"); 165 166 skb = bt_skb_alloc(sizeof(suspend), GFP_KERNEL); 167 if (!skb) { 168 bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet"); 169 return -ENOMEM; 170 } 171 172 skb_put_data(skb, suspend, sizeof(suspend)); 173 hci_skb_pkt_type(skb) = HCI_LPM_PKT; 174 175 set_bit(STATE_LPM_TRANSACTION, &intel->flags); 176 177 /* LPM flow is a priority, enqueue packet at list head */ 178 skb_queue_head(&intel->txq, skb); 179 hci_uart_tx_wakeup(hu); 180 181 intel_wait_lpm_transaction(hu); 182 /* Even in case of failure, continue and test the suspended flag */ 183 184 clear_bit(STATE_LPM_TRANSACTION, &intel->flags); 185 186 if (!test_bit(STATE_SUSPENDED, &intel->flags)) { 187 bt_dev_err(hu->hdev, "Device suspend error"); 188 return -EINVAL; 189 } 190 191 bt_dev_dbg(hu->hdev, "Suspended"); 192 193 hci_uart_set_flow_control(hu, true); 194 195 return 0; 196} 197 198static int intel_lpm_resume(struct hci_uart *hu) 199{ 200 struct intel_data *intel = hu->priv; 201 struct sk_buff *skb; 202 203 if (!test_bit(STATE_LPM_ENABLED, &intel->flags) || 204 !test_bit(STATE_SUSPENDED, &intel->flags)) 205 return 0; 206 207 bt_dev_dbg(hu->hdev, "Resuming"); 208 209 hci_uart_set_flow_control(hu, false); 210 211 skb = bt_skb_alloc(0, GFP_KERNEL); 212 if (!skb) { 213 bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet"); 214 return -ENOMEM; 215 } 216 217 hci_skb_pkt_type(skb) = HCI_LPM_WAKE_PKT; 218 219 set_bit(STATE_LPM_TRANSACTION, &intel->flags); 220 221 /* LPM flow is a priority, enqueue packet at list head */ 222 skb_queue_head(&intel->txq, skb); 223 hci_uart_tx_wakeup(hu); 224 225 intel_wait_lpm_transaction(hu); 226 /* Even in case of failure, continue and test the suspended flag */ 227 228 clear_bit(STATE_LPM_TRANSACTION, &intel->flags); 229 230 if (test_bit(STATE_SUSPENDED, &intel->flags)) { 231 bt_dev_err(hu->hdev, "Device resume error"); 232 return -EINVAL; 233 } 234 235 bt_dev_dbg(hu->hdev, "Resumed"); 236 237 return 0; 238} 239 240static int intel_lpm_host_wake(struct hci_uart *hu) 241{ 242 static const u8 lpm_resume_ack[] = { LPM_OP_RESUME_ACK, 0x00 }; 243 struct intel_data *intel = hu->priv; 244 struct sk_buff *skb; 245 246 hci_uart_set_flow_control(hu, false); 247 248 clear_bit(STATE_SUSPENDED, &intel->flags); 249 250 skb = bt_skb_alloc(sizeof(lpm_resume_ack), GFP_KERNEL); 251 if (!skb) { 252 bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet"); 253 return -ENOMEM; 254 } 255 256 skb_put_data(skb, lpm_resume_ack, sizeof(lpm_resume_ack)); 257 hci_skb_pkt_type(skb) = HCI_LPM_PKT; 258 259 /* LPM flow is a priority, enqueue packet at list head */ 260 skb_queue_head(&intel->txq, skb); 261 hci_uart_tx_wakeup(hu); 262 263 bt_dev_dbg(hu->hdev, "Resumed by controller"); 264 265 return 0; 266} 267 268static irqreturn_t intel_irq(int irq, void *dev_id) 269{ 270 struct intel_device *idev = dev_id; 271 272 dev_info(&idev->pdev->dev, "hci_intel irq\n"); 273 274 mutex_lock(&idev->hu_lock); 275 if (idev->hu) 276 intel_lpm_host_wake(idev->hu); 277 mutex_unlock(&idev->hu_lock); 278 279 /* Host/Controller are now LPM resumed, trigger a new delayed suspend */ 280 pm_runtime_get(&idev->pdev->dev); 281 pm_runtime_put_autosuspend(&idev->pdev->dev); 282 283 return IRQ_HANDLED; 284} 285 286static int intel_set_power(struct hci_uart *hu, bool powered) 287{ 288 struct intel_device *idev; 289 int err = -ENODEV; 290 291 if (!hu->tty->dev) 292 return err; 293 294 mutex_lock(&intel_device_list_lock); 295 296 list_for_each_entry(idev, &intel_device_list, list) { 297 /* tty device and pdev device should share the same parent 298 * which is the UART port. 299 */ 300 if (hu->tty->dev->parent != idev->pdev->dev.parent) 301 continue; 302 303 if (!idev->reset) { 304 err = -ENOTSUPP; 305 break; 306 } 307 308 BT_INFO("hu %p, Switching compatible pm device (%s) to %u", 309 hu, dev_name(&idev->pdev->dev), powered); 310 311 gpiod_set_value(idev->reset, powered); 312 313 /* Provide to idev a hu reference which is used to run LPM 314 * transactions (lpm suspend/resume) from PM callbacks. 315 * hu needs to be protected against concurrent removing during 316 * these PM ops. 317 */ 318 mutex_lock(&idev->hu_lock); 319 idev->hu = powered ? hu : NULL; 320 mutex_unlock(&idev->hu_lock); 321 322 if (idev->irq < 0) 323 break; 324 325 if (powered && device_can_wakeup(&idev->pdev->dev)) { 326 err = devm_request_threaded_irq(&idev->pdev->dev, 327 idev->irq, NULL, 328 intel_irq, 329 IRQF_ONESHOT, 330 "bt-host-wake", idev); 331 if (err) { 332 BT_ERR("hu %p, unable to allocate irq-%d", 333 hu, idev->irq); 334 break; 335 } 336 337 device_wakeup_enable(&idev->pdev->dev); 338 339 pm_runtime_set_active(&idev->pdev->dev); 340 pm_runtime_use_autosuspend(&idev->pdev->dev); 341 pm_runtime_set_autosuspend_delay(&idev->pdev->dev, 342 LPM_SUSPEND_DELAY_MS); 343 pm_runtime_enable(&idev->pdev->dev); 344 } else if (!powered && device_may_wakeup(&idev->pdev->dev)) { 345 devm_free_irq(&idev->pdev->dev, idev->irq, idev); 346 device_wakeup_disable(&idev->pdev->dev); 347 348 pm_runtime_disable(&idev->pdev->dev); 349 } 350 } 351 352 mutex_unlock(&intel_device_list_lock); 353 354 return err; 355} 356 357static void intel_busy_work(struct work_struct *work) 358{ 359 struct intel_data *intel = container_of(work, struct intel_data, 360 busy_work); 361 struct intel_device *idev; 362 363 if (!intel->hu->tty->dev) 364 return; 365 366 /* Link is busy, delay the suspend */ 367 mutex_lock(&intel_device_list_lock); 368 list_for_each_entry(idev, &intel_device_list, list) { 369 if (intel->hu->tty->dev->parent == idev->pdev->dev.parent) { 370 pm_runtime_get(&idev->pdev->dev); 371 pm_runtime_put_autosuspend(&idev->pdev->dev); 372 break; 373 } 374 } 375 mutex_unlock(&intel_device_list_lock); 376} 377 378static int intel_open(struct hci_uart *hu) 379{ 380 struct intel_data *intel; 381 382 BT_DBG("hu %p", hu); 383 384 if (!hci_uart_has_flow_control(hu)) 385 return -EOPNOTSUPP; 386 387 intel = kzalloc(sizeof(*intel), GFP_KERNEL); 388 if (!intel) 389 return -ENOMEM; 390 391 skb_queue_head_init(&intel->txq); 392 INIT_WORK(&intel->busy_work, intel_busy_work); 393 394 intel->hu = hu; 395 396 hu->priv = intel; 397 398 if (!intel_set_power(hu, true)) 399 set_bit(STATE_BOOTING, &intel->flags); 400 401 return 0; 402} 403 404static int intel_close(struct hci_uart *hu) 405{ 406 struct intel_data *intel = hu->priv; 407 408 BT_DBG("hu %p", hu); 409 410 cancel_work_sync(&intel->busy_work); 411 412 intel_set_power(hu, false); 413 414 skb_queue_purge(&intel->txq); 415 kfree_skb(intel->rx_skb); 416 kfree(intel); 417 418 hu->priv = NULL; 419 return 0; 420} 421 422static int intel_flush(struct hci_uart *hu) 423{ 424 struct intel_data *intel = hu->priv; 425 426 BT_DBG("hu %p", hu); 427 428 skb_queue_purge(&intel->txq); 429 430 return 0; 431} 432 433static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode) 434{ 435 struct sk_buff *skb; 436 struct hci_event_hdr *hdr; 437 struct hci_ev_cmd_complete *evt; 438 439 skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL); 440 if (!skb) 441 return -ENOMEM; 442 443 hdr = skb_put(skb, sizeof(*hdr)); 444 hdr->evt = HCI_EV_CMD_COMPLETE; 445 hdr->plen = sizeof(*evt) + 1; 446 447 evt = skb_put(skb, sizeof(*evt)); 448 evt->ncmd = 0x01; 449 evt->opcode = cpu_to_le16(opcode); 450 451 skb_put_u8(skb, 0x00); 452 453 hci_skb_pkt_type(skb) = HCI_EVENT_PKT; 454 455 return hci_recv_frame(hdev, skb); 456} 457 458static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed) 459{ 460 struct intel_data *intel = hu->priv; 461 struct hci_dev *hdev = hu->hdev; 462 u8 speed_cmd[] = { 0x06, 0xfc, 0x01, 0x00 }; 463 struct sk_buff *skb; 464 int err; 465 466 /* This can be the first command sent to the chip, check 467 * that the controller is ready. 468 */ 469 err = intel_wait_booting(hu); 470 471 clear_bit(STATE_BOOTING, &intel->flags); 472 473 /* In case of timeout, try to continue anyway */ 474 if (err && err != -ETIMEDOUT) 475 return err; 476 477 bt_dev_info(hdev, "Change controller speed to %d", speed); 478 479 speed_cmd[3] = intel_convert_speed(speed); 480 if (speed_cmd[3] == 0xff) { 481 bt_dev_err(hdev, "Unsupported speed"); 482 return -EINVAL; 483 } 484 485 /* Device will not accept speed change if Intel version has not been 486 * previously requested. 487 */ 488 skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT); 489 if (IS_ERR(skb)) { 490 bt_dev_err(hdev, "Reading Intel version information failed (%ld)", 491 PTR_ERR(skb)); 492 return PTR_ERR(skb); 493 } 494 kfree_skb(skb); 495 496 skb = bt_skb_alloc(sizeof(speed_cmd), GFP_KERNEL); 497 if (!skb) { 498 bt_dev_err(hdev, "Failed to alloc memory for baudrate packet"); 499 return -ENOMEM; 500 } 501 502 skb_put_data(skb, speed_cmd, sizeof(speed_cmd)); 503 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; 504 505 hci_uart_set_flow_control(hu, true); 506 507 skb_queue_tail(&intel->txq, skb); 508 hci_uart_tx_wakeup(hu); 509 510 /* wait 100ms to change baudrate on controller side */ 511 msleep(100); 512 513 hci_uart_set_baudrate(hu, speed); 514 hci_uart_set_flow_control(hu, false); 515 516 return 0; 517} 518 519static int intel_setup(struct hci_uart *hu) 520{ 521 struct intel_data *intel = hu->priv; 522 struct hci_dev *hdev = hu->hdev; 523 struct sk_buff *skb; 524 struct intel_version ver; 525 struct intel_boot_params params; 526 struct intel_device *idev; 527 const struct firmware *fw; 528 char fwname[64]; 529 u32 boot_param; 530 ktime_t calltime, delta, rettime; 531 unsigned long long duration; 532 unsigned int init_speed, oper_speed; 533 int speed_change = 0; 534 int err; 535 536 bt_dev_dbg(hdev, ""); 537 538 hu->hdev->set_diag = btintel_set_diag; 539 hu->hdev->set_bdaddr = btintel_set_bdaddr; 540 541 /* Set the default boot parameter to 0x0 and it is updated to 542 * SKU specific boot parameter after reading Intel_Write_Boot_Params 543 * command while downloading the firmware. 544 */ 545 boot_param = 0x00000000; 546 547 calltime = ktime_get(); 548 549 if (hu->init_speed) 550 init_speed = hu->init_speed; 551 else 552 init_speed = hu->proto->init_speed; 553 554 if (hu->oper_speed) 555 oper_speed = hu->oper_speed; 556 else 557 oper_speed = hu->proto->oper_speed; 558 559 if (oper_speed && init_speed && oper_speed != init_speed) 560 speed_change = 1; 561 562 /* Check that the controller is ready */ 563 err = intel_wait_booting(hu); 564 565 clear_bit(STATE_BOOTING, &intel->flags); 566 567 /* In case of timeout, try to continue anyway */ 568 if (err && err != -ETIMEDOUT) 569 return err; 570 571 set_bit(STATE_BOOTLOADER, &intel->flags); 572 573 /* Read the Intel version information to determine if the device 574 * is in bootloader mode or if it already has operational firmware 575 * loaded. 576 */ 577 err = btintel_read_version(hdev, &ver); 578 if (err) 579 return err; 580 581 /* The hardware platform number has a fixed value of 0x37 and 582 * for now only accept this single value. 583 */ 584 if (ver.hw_platform != 0x37) { 585 bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)", 586 ver.hw_platform); 587 return -EINVAL; 588 } 589 590 /* Check for supported iBT hardware variants of this firmware 591 * loading method. 592 * 593 * This check has been put in place to ensure correct forward 594 * compatibility options when newer hardware variants come along. 595 */ 596 switch (ver.hw_variant) { 597 case 0x0b: /* LnP */ 598 case 0x0c: /* WsP */ 599 case 0x12: /* ThP */ 600 break; 601 default: 602 bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", 603 ver.hw_variant); 604 return -EINVAL; 605 } 606 607 btintel_version_info(hdev, &ver); 608 609 /* The firmware variant determines if the device is in bootloader 610 * mode or is running operational firmware. The value 0x06 identifies 611 * the bootloader and the value 0x23 identifies the operational 612 * firmware. 613 * 614 * When the operational firmware is already present, then only 615 * the check for valid Bluetooth device address is needed. This 616 * determines if the device will be added as configured or 617 * unconfigured controller. 618 * 619 * It is not possible to use the Secure Boot Parameters in this 620 * case since that command is only available in bootloader mode. 621 */ 622 if (ver.fw_variant == 0x23) { 623 clear_bit(STATE_BOOTLOADER, &intel->flags); 624 btintel_check_bdaddr(hdev); 625 return 0; 626 } 627 628 /* If the device is not in bootloader mode, then the only possible 629 * choice is to return an error and abort the device initialization. 630 */ 631 if (ver.fw_variant != 0x06) { 632 bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)", 633 ver.fw_variant); 634 return -ENODEV; 635 } 636 637 /* Read the secure boot parameters to identify the operating 638 * details of the bootloader. 639 */ 640 err = btintel_read_boot_params(hdev, &params); 641 if (err) 642 return err; 643 644 /* It is required that every single firmware fragment is acknowledged 645 * with a command complete event. If the boot parameters indicate 646 * that this bootloader does not send them, then abort the setup. 647 */ 648 if (params.limited_cce != 0x00) { 649 bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)", 650 params.limited_cce); 651 return -EINVAL; 652 } 653 654 /* If the OTP has no valid Bluetooth device address, then there will 655 * also be no valid address for the operational firmware. 656 */ 657 if (!bacmp(&params.otp_bdaddr, BDADDR_ANY)) { 658 bt_dev_info(hdev, "No device address configured"); 659 hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR); 660 } 661 662 /* With this Intel bootloader only the hardware variant and device 663 * revision information are used to select the right firmware for SfP 664 * and WsP. 665 * 666 * The firmware filename is ibt-<hw_variant>-<dev_revid>.sfi. 667 * 668 * Currently the supported hardware variants are: 669 * 11 (0x0b) for iBT 3.0 (LnP/SfP) 670 * 12 (0x0c) for iBT 3.5 (WsP) 671 * 672 * For ThP/JfP and for future SKU's, the FW name varies based on HW 673 * variant, HW revision and FW revision, as these are dependent on CNVi 674 * and RF Combination. 675 * 676 * 18 (0x12) for iBT3.5 (ThP/JfP) 677 * 678 * The firmware file name for these will be 679 * ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi. 680 * 681 */ 682 switch (ver.hw_variant) { 683 case 0x0b: /* SfP */ 684 case 0x0c: /* WsP */ 685 snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi", 686 ver.hw_variant, le16_to_cpu(params.dev_revid)); 687 break; 688 case 0x12: /* ThP */ 689 snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi", 690 ver.hw_variant, ver.hw_revision, ver.fw_revision); 691 break; 692 default: 693 bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", 694 ver.hw_variant); 695 return -EINVAL; 696 } 697 698 err = request_firmware(&fw, fwname, &hdev->dev); 699 if (err < 0) { 700 bt_dev_err(hdev, "Failed to load Intel firmware file (%d)", 701 err); 702 return err; 703 } 704 705 bt_dev_info(hdev, "Found device firmware: %s", fwname); 706 707 /* Save the DDC file name for later */ 708 switch (ver.hw_variant) { 709 case 0x0b: /* SfP */ 710 case 0x0c: /* WsP */ 711 snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc", 712 ver.hw_variant, le16_to_cpu(params.dev_revid)); 713 break; 714 case 0x12: /* ThP */ 715 snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc", 716 ver.hw_variant, ver.hw_revision, ver.fw_revision); 717 break; 718 default: 719 bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", 720 ver.hw_variant); 721 return -EINVAL; 722 } 723 724 if (fw->size < 644) { 725 bt_dev_err(hdev, "Invalid size of firmware file (%zu)", 726 fw->size); 727 err = -EBADF; 728 goto done; 729 } 730 731 set_bit(STATE_DOWNLOADING, &intel->flags); 732 733 /* Start firmware downloading and get boot parameter */ 734 err = btintel_download_firmware(hdev, &ver, fw, &boot_param); 735 if (err < 0) 736 goto done; 737 738 set_bit(STATE_FIRMWARE_LOADED, &intel->flags); 739 740 bt_dev_info(hdev, "Waiting for firmware download to complete"); 741 742 /* Before switching the device into operational mode and with that 743 * booting the loaded firmware, wait for the bootloader notification 744 * that all fragments have been successfully received. 745 * 746 * When the event processing receives the notification, then the 747 * STATE_DOWNLOADING flag will be cleared. 748 * 749 * The firmware loading should not take longer than 5 seconds 750 * and thus just timeout if that happens and fail the setup 751 * of this device. 752 */ 753 err = wait_on_bit_timeout(&intel->flags, STATE_DOWNLOADING, 754 TASK_INTERRUPTIBLE, 755 msecs_to_jiffies(5000)); 756 if (err == -EINTR) { 757 bt_dev_err(hdev, "Firmware loading interrupted"); 758 err = -EINTR; 759 goto done; 760 } 761 762 if (err) { 763 bt_dev_err(hdev, "Firmware loading timeout"); 764 err = -ETIMEDOUT; 765 goto done; 766 } 767 768 if (test_bit(STATE_FIRMWARE_FAILED, &intel->flags)) { 769 bt_dev_err(hdev, "Firmware loading failed"); 770 err = -ENOEXEC; 771 goto done; 772 } 773 774 rettime = ktime_get(); 775 delta = ktime_sub(rettime, calltime); 776 duration = (unsigned long long)ktime_to_ns(delta) >> 10; 777 778 bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration); 779 780done: 781 release_firmware(fw); 782 783 /* Check if there was an error and if is not -EALREADY which means the 784 * firmware has already been loaded. 785 */ 786 if (err < 0 && err != -EALREADY) 787 return err; 788 789 /* We need to restore the default speed before Intel reset */ 790 if (speed_change) { 791 err = intel_set_baudrate(hu, init_speed); 792 if (err) 793 return err; 794 } 795 796 calltime = ktime_get(); 797 798 set_bit(STATE_BOOTING, &intel->flags); 799 800 err = btintel_send_intel_reset(hdev, boot_param); 801 if (err) 802 return err; 803 804 /* The bootloader will not indicate when the device is ready. This 805 * is done by the operational firmware sending bootup notification. 806 * 807 * Booting into operational firmware should not take longer than 808 * 1 second. However if that happens, then just fail the setup 809 * since something went wrong. 810 */ 811 bt_dev_info(hdev, "Waiting for device to boot"); 812 813 err = intel_wait_booting(hu); 814 if (err) 815 return err; 816 817 clear_bit(STATE_BOOTING, &intel->flags); 818 819 rettime = ktime_get(); 820 delta = ktime_sub(rettime, calltime); 821 duration = (unsigned long long)ktime_to_ns(delta) >> 10; 822 823 bt_dev_info(hdev, "Device booted in %llu usecs", duration); 824 825 /* Enable LPM if matching pdev with wakeup enabled, set TX active 826 * until further LPM TX notification. 827 */ 828 mutex_lock(&intel_device_list_lock); 829 list_for_each_entry(idev, &intel_device_list, list) { 830 if (!hu->tty->dev) 831 break; 832 if (hu->tty->dev->parent == idev->pdev->dev.parent) { 833 if (device_may_wakeup(&idev->pdev->dev)) { 834 set_bit(STATE_LPM_ENABLED, &intel->flags); 835 set_bit(STATE_TX_ACTIVE, &intel->flags); 836 } 837 break; 838 } 839 } 840 mutex_unlock(&intel_device_list_lock); 841 842 /* Ignore errors, device can work without DDC parameters */ 843 btintel_load_ddc_config(hdev, fwname); 844 845 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT); 846 if (IS_ERR(skb)) 847 return PTR_ERR(skb); 848 kfree_skb(skb); 849 850 if (speed_change) { 851 err = intel_set_baudrate(hu, oper_speed); 852 if (err) 853 return err; 854 } 855 856 bt_dev_info(hdev, "Setup complete"); 857 858 clear_bit(STATE_BOOTLOADER, &intel->flags); 859 860 return 0; 861} 862 863static int intel_recv_event(struct hci_dev *hdev, struct sk_buff *skb) 864{ 865 struct hci_uart *hu = hci_get_drvdata(hdev); 866 struct intel_data *intel = hu->priv; 867 struct hci_event_hdr *hdr; 868 869 if (!test_bit(STATE_BOOTLOADER, &intel->flags) && 870 !test_bit(STATE_BOOTING, &intel->flags)) 871 goto recv; 872 873 hdr = (void *)skb->data; 874 875 /* When the firmware loading completes the device sends 876 * out a vendor specific event indicating the result of 877 * the firmware loading. 878 */ 879 if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 && 880 skb->data[2] == 0x06) { 881 if (skb->data[3] != 0x00) 882 set_bit(STATE_FIRMWARE_FAILED, &intel->flags); 883 884 if (test_and_clear_bit(STATE_DOWNLOADING, &intel->flags) && 885 test_bit(STATE_FIRMWARE_LOADED, &intel->flags)) 886 wake_up_bit(&intel->flags, STATE_DOWNLOADING); 887 888 /* When switching to the operational firmware the device 889 * sends a vendor specific event indicating that the bootup 890 * completed. 891 */ 892 } else if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 && 893 skb->data[2] == 0x02) { 894 if (test_and_clear_bit(STATE_BOOTING, &intel->flags)) 895 wake_up_bit(&intel->flags, STATE_BOOTING); 896 } 897recv: 898 return hci_recv_frame(hdev, skb); 899} 900 901static void intel_recv_lpm_notify(struct hci_dev *hdev, int value) 902{ 903 struct hci_uart *hu = hci_get_drvdata(hdev); 904 struct intel_data *intel = hu->priv; 905 906 bt_dev_dbg(hdev, "TX idle notification (%d)", value); 907 908 if (value) { 909 set_bit(STATE_TX_ACTIVE, &intel->flags); 910 schedule_work(&intel->busy_work); 911 } else { 912 clear_bit(STATE_TX_ACTIVE, &intel->flags); 913 } 914} 915 916static int intel_recv_lpm(struct hci_dev *hdev, struct sk_buff *skb) 917{ 918 struct hci_lpm_pkt *lpm = (void *)skb->data; 919 struct hci_uart *hu = hci_get_drvdata(hdev); 920 struct intel_data *intel = hu->priv; 921 922 switch (lpm->opcode) { 923 case LPM_OP_TX_NOTIFY: 924 if (lpm->dlen < 1) { 925 bt_dev_err(hu->hdev, "Invalid LPM notification packet"); 926 break; 927 } 928 intel_recv_lpm_notify(hdev, lpm->data[0]); 929 break; 930 case LPM_OP_SUSPEND_ACK: 931 set_bit(STATE_SUSPENDED, &intel->flags); 932 if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags)) 933 wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION); 934 break; 935 case LPM_OP_RESUME_ACK: 936 clear_bit(STATE_SUSPENDED, &intel->flags); 937 if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags)) 938 wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION); 939 break; 940 default: 941 bt_dev_err(hdev, "Unknown LPM opcode (%02x)", lpm->opcode); 942 break; 943 } 944 945 kfree_skb(skb); 946 947 return 0; 948} 949 950#define INTEL_RECV_LPM \ 951 .type = HCI_LPM_PKT, \ 952 .hlen = HCI_LPM_HDR_SIZE, \ 953 .loff = 1, \ 954 .lsize = 1, \ 955 .maxlen = HCI_LPM_MAX_SIZE 956 957static const struct h4_recv_pkt intel_recv_pkts[] = { 958 { H4_RECV_ACL, .recv = hci_recv_frame }, 959 { H4_RECV_SCO, .recv = hci_recv_frame }, 960 { H4_RECV_EVENT, .recv = intel_recv_event }, 961 { INTEL_RECV_LPM, .recv = intel_recv_lpm }, 962}; 963 964static int intel_recv(struct hci_uart *hu, const void *data, int count) 965{ 966 struct intel_data *intel = hu->priv; 967 968 if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) 969 return -EUNATCH; 970 971 intel->rx_skb = h4_recv_buf(hu, intel->rx_skb, data, count, 972 intel_recv_pkts, 973 ARRAY_SIZE(intel_recv_pkts)); 974 if (IS_ERR(intel->rx_skb)) { 975 int err = PTR_ERR(intel->rx_skb); 976 977 bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); 978 intel->rx_skb = NULL; 979 return err; 980 } 981 982 return count; 983} 984 985static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb) 986{ 987 struct intel_data *intel = hu->priv; 988 struct intel_device *idev; 989 990 BT_DBG("hu %p skb %p", hu, skb); 991 992 if (!hu->tty->dev) 993 goto out_enqueue; 994 995 /* Be sure our controller is resumed and potential LPM transaction 996 * completed before enqueuing any packet. 997 */ 998 mutex_lock(&intel_device_list_lock); 999 list_for_each_entry(idev, &intel_device_list, list) { 1000 if (hu->tty->dev->parent == idev->pdev->dev.parent) { 1001 pm_runtime_get_sync(&idev->pdev->dev); 1002 pm_runtime_put_autosuspend(&idev->pdev->dev); 1003 break; 1004 } 1005 } 1006 mutex_unlock(&intel_device_list_lock); 1007out_enqueue: 1008 skb_queue_tail(&intel->txq, skb); 1009 1010 return 0; 1011} 1012 1013static struct sk_buff *intel_dequeue(struct hci_uart *hu) 1014{ 1015 struct intel_data *intel = hu->priv; 1016 struct sk_buff *skb; 1017 1018 skb = skb_dequeue(&intel->txq); 1019 if (!skb) 1020 return skb; 1021 1022 if (test_bit(STATE_BOOTLOADER, &intel->flags) && 1023 (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT)) { 1024 struct hci_command_hdr *cmd = (void *)skb->data; 1025 __u16 opcode = le16_to_cpu(cmd->opcode); 1026 1027 /* When the BTINTEL_HCI_OP_RESET command is issued to boot into 1028 * the operational firmware, it will actually not send a command 1029 * complete event. To keep the flow control working inject that 1030 * event here. 1031 */ 1032 if (opcode == BTINTEL_HCI_OP_RESET) 1033 inject_cmd_complete(hu->hdev, opcode); 1034 } 1035 1036 /* Prepend skb with frame type */ 1037 memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); 1038 1039 return skb; 1040} 1041 1042static const struct hci_uart_proto intel_proto = { 1043 .id = HCI_UART_INTEL, 1044 .name = "Intel", 1045 .manufacturer = 2, 1046 .init_speed = 115200, 1047 .oper_speed = 3000000, 1048 .open = intel_open, 1049 .close = intel_close, 1050 .flush = intel_flush, 1051 .setup = intel_setup, 1052 .set_baudrate = intel_set_baudrate, 1053 .recv = intel_recv, 1054 .enqueue = intel_enqueue, 1055 .dequeue = intel_dequeue, 1056}; 1057 1058#ifdef CONFIG_ACPI 1059static const struct acpi_device_id intel_acpi_match[] = { 1060 { "INT33E1", 0 }, 1061 { "INT33E3", 0 }, 1062 { } 1063}; 1064MODULE_DEVICE_TABLE(acpi, intel_acpi_match); 1065#endif 1066 1067static int intel_suspend_device(struct device *dev) 1068{ 1069 struct intel_device *idev = dev_get_drvdata(dev); 1070 1071 mutex_lock(&idev->hu_lock); 1072 if (idev->hu) 1073 intel_lpm_suspend(idev->hu); 1074 mutex_unlock(&idev->hu_lock); 1075 1076 return 0; 1077} 1078 1079static int intel_resume_device(struct device *dev) 1080{ 1081 struct intel_device *idev = dev_get_drvdata(dev); 1082 1083 mutex_lock(&idev->hu_lock); 1084 if (idev->hu) 1085 intel_lpm_resume(idev->hu); 1086 mutex_unlock(&idev->hu_lock); 1087 1088 return 0; 1089} 1090 1091static int __maybe_unused intel_suspend(struct device *dev) 1092{ 1093 struct intel_device *idev = dev_get_drvdata(dev); 1094 1095 if (device_may_wakeup(dev)) 1096 enable_irq_wake(idev->irq); 1097 1098 return intel_suspend_device(dev); 1099} 1100 1101static int __maybe_unused intel_resume(struct device *dev) 1102{ 1103 struct intel_device *idev = dev_get_drvdata(dev); 1104 1105 if (device_may_wakeup(dev)) 1106 disable_irq_wake(idev->irq); 1107 1108 return intel_resume_device(dev); 1109} 1110 1111static const struct dev_pm_ops intel_pm_ops = { 1112 SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume) 1113 SET_RUNTIME_PM_OPS(intel_suspend_device, intel_resume_device, NULL) 1114}; 1115 1116static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; 1117static const struct acpi_gpio_params host_wake_gpios = { 1, 0, false }; 1118 1119static const struct acpi_gpio_mapping acpi_hci_intel_gpios[] = { 1120 { "reset-gpios", &reset_gpios, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO }, 1121 { "host-wake-gpios", &host_wake_gpios, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO }, 1122 { } 1123}; 1124 1125static int intel_probe(struct platform_device *pdev) 1126{ 1127 struct intel_device *idev; 1128 int ret; 1129 1130 idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL); 1131 if (!idev) 1132 return -ENOMEM; 1133 1134 mutex_init(&idev->hu_lock); 1135 1136 idev->pdev = pdev; 1137 1138 ret = devm_acpi_dev_add_driver_gpios(&pdev->dev, acpi_hci_intel_gpios); 1139 if (ret) 1140 dev_dbg(&pdev->dev, "Unable to add GPIO mapping table\n"); 1141 1142 idev->reset = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW); 1143 if (IS_ERR(idev->reset)) { 1144 dev_err(&pdev->dev, "Unable to retrieve gpio\n"); 1145 return PTR_ERR(idev->reset); 1146 } 1147 1148 idev->irq = platform_get_irq(pdev, 0); 1149 if (idev->irq < 0) { 1150 struct gpio_desc *host_wake; 1151 1152 dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n"); 1153 1154 host_wake = devm_gpiod_get(&pdev->dev, "host-wake", GPIOD_IN); 1155 if (IS_ERR(host_wake)) { 1156 dev_err(&pdev->dev, "Unable to retrieve IRQ\n"); 1157 goto no_irq; 1158 } 1159 1160 idev->irq = gpiod_to_irq(host_wake); 1161 if (idev->irq < 0) { 1162 dev_err(&pdev->dev, "No corresponding irq for gpio\n"); 1163 goto no_irq; 1164 } 1165 } 1166 1167 /* Only enable wake-up/irq when controller is powered */ 1168 device_set_wakeup_capable(&pdev->dev, true); 1169 device_wakeup_disable(&pdev->dev); 1170 1171no_irq: 1172 platform_set_drvdata(pdev, idev); 1173 1174 /* Place this instance on the device list */ 1175 mutex_lock(&intel_device_list_lock); 1176 list_add_tail(&idev->list, &intel_device_list); 1177 mutex_unlock(&intel_device_list_lock); 1178 1179 dev_info(&pdev->dev, "registered, gpio(%d)/irq(%d).\n", 1180 desc_to_gpio(idev->reset), idev->irq); 1181 1182 return 0; 1183} 1184 1185static void intel_remove(struct platform_device *pdev) 1186{ 1187 struct intel_device *idev = platform_get_drvdata(pdev); 1188 1189 device_wakeup_disable(&pdev->dev); 1190 1191 mutex_lock(&intel_device_list_lock); 1192 list_del(&idev->list); 1193 mutex_unlock(&intel_device_list_lock); 1194 1195 dev_info(&pdev->dev, "unregistered.\n"); 1196} 1197 1198static struct platform_driver intel_driver = { 1199 .probe = intel_probe, 1200 .remove = intel_remove, 1201 .driver = { 1202 .name = "hci_intel", 1203 .acpi_match_table = ACPI_PTR(intel_acpi_match), 1204 .pm = &intel_pm_ops, 1205 }, 1206}; 1207 1208int __init intel_init(void) 1209{ 1210 int err; 1211 1212 err = platform_driver_register(&intel_driver); 1213 if (err) 1214 return err; 1215 1216 return hci_uart_register_proto(&intel_proto); 1217} 1218 1219int __exit intel_deinit(void) 1220{ 1221 platform_driver_unregister(&intel_driver); 1222 1223 return hci_uart_unregister_proto(&intel_proto); 1224}