Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-net-next-2024-07-15' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Luiz Augusto von Dentz says:

====================
bluetooth-next pull request for net-next:

- qca: use the power sequencer for QCA6390
- btusb: mediatek: add ISO data transmission functions
- hci_bcm4377: Add BCM4388 support
- btintel: Add support for BlazarU core
- btintel: Add support for Whale Peak2
- btnxpuart: Add support for AW693 A1 chipset
- btnxpuart: Add support for IW615 chipset
- btusb: Add Realtek RTL8852BE support ID 0x13d3:0x3591

* tag 'for-net-next-2024-07-15' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next: (71 commits)
Bluetooth: btmtk: Mark all stub functions as inline
Bluetooth: hci_qca: Fix build error
Bluetooth: hci_qca: use the power sequencer for wcn7850 and wcn6855
Bluetooth: hci_qca: make pwrseq calls the default if available
Bluetooth: hci_qca: unduplicate calls to hci_uart_register_device()
Bluetooth: hci_qca: schedule a devm action for disabling the clock
dt-bindings: bluetooth: qualcomm: describe the inputs from PMU for wcn7850
Bluetooth: btnxpuart: Fix warnings for suspend and resume functions
Bluetooth: btnxpuart: Add system suspend and resume handlers
Bluetooth: btnxpuart: Add support for IW615 chipset
Bluetooth: btnxpuart: Add support for AW693 A1 chipset
Bluetooth: btintel: Add support for Whale Peak2
Bluetooth: btintel: Add support for BlazarU core
Bluetooth: btusb: mediatek: add ISO data transmission functions
Bluetooth: btmtk: move btusb_recv_acl_mtk to btmtk.c
Bluetooth: btmtk: move btusb_mtk_[setup, shutdown] to btmtk.c
Bluetooth: btmtk: move btusb_mtk_hci_wmt_sync to btmtk.c
Bluetooth: btusb: add callback function in btusb suspend/resume
Bluetooth: btmtk: rename btmediatek_data
Bluetooth: btusb: mediatek: return error for failed reg access
...
====================

Link: https://patch.msgid.link/20240715142543.303944-1-luiz.dentz@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+3697 -2018
+51
Documentation/devicetree/bindings/net/bluetooth/mediatek,mt7622-bluetooth.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/net/bluetooth/mediatek,mt7622-bluetooth.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: MediaTek SoC built-in Bluetooth 8 + 9 + description: 10 + This device is a serial attached device to BTIF device and thus it must be a 11 + child node of the serial node with BTIF. The dt-bindings details for BTIF 12 + device can be known via Documentation/devicetree/bindings/serial/8250.yaml. 13 + 14 + maintainers: 15 + - Sean Wang <sean.wang@mediatek.com> 16 + 17 + allOf: 18 + - $ref: bluetooth-controller.yaml# 19 + 20 + properties: 21 + compatible: 22 + const: mediatek,mt7622-bluetooth 23 + 24 + clocks: 25 + maxItems: 1 26 + 27 + clock-names: 28 + const: ref 29 + 30 + power-domains: 31 + maxItems: 1 32 + 33 + required: 34 + - clocks 35 + - clock-names 36 + - power-domains 37 + 38 + unevaluatedProperties: false 39 + 40 + examples: 41 + - | 42 + #include <dt-bindings/power/mt7622-power.h> 43 + 44 + serial { 45 + bluetooth { 46 + compatible = "mediatek,mt7622-bluetooth"; 47 + power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>; 48 + clocks = <&clk25m>; 49 + clock-names = "ref"; 50 + }; 51 + };
+4
Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml
··· 31 31 This property depends on the module vendor's 32 32 configuration. 33 33 34 + firmware-name: 35 + maxItems: 1 36 + 34 37 required: 35 38 - compatible 36 39 ··· 45 42 bluetooth { 46 43 compatible = "nxp,88w8987-bt"; 47 44 fw-init-baudrate = <3000000>; 45 + firmware-name = "uartuart8987_bt_v0.bin"; 48 46 }; 49 47 };
+30 -5
Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
··· 62 62 vdddig-supply: 63 63 description: VDD_DIG supply regulator handle 64 64 65 + vddbtcmx-supply: 66 + description: VDD_BT_CMX supply regulator handle 67 + 65 68 vddbtcxmx-supply: 66 69 description: VDD_BT_CXMX supply regulator handle 67 70 ··· 77 74 vddrfa1p7-supply: 78 75 description: VDD_RFA_1P7 supply regulator handle 79 76 77 + vddrfa1p8-supply: 78 + description: VDD_RFA_1P8 supply regulator handle 79 + 80 80 vddrfa1p2-supply: 81 81 description: VDD_RFA_1P2 supply regulator handle 82 82 ··· 91 85 92 86 vddasd-supply: 93 87 description: VDD_ASD supply regulator handle 88 + 89 + vddwlcx-supply: 90 + description: VDD_WLCX supply regulator handle 91 + 92 + vddwlmx-supply: 93 + description: VDD_WLMX supply regulator handle 94 94 95 95 max-speed: 96 96 description: see Documentation/devicetree/bindings/serial/serial.yaml ··· 188 176 - qcom,wcn7850-bt 189 177 then: 190 178 required: 191 - - enable-gpios 192 - - swctrl-gpios 193 - - vddio-supply 179 + - vddrfacmn-supply 194 180 - vddaon-supply 195 - - vdddig-supply 181 + - vddwlcx-supply 182 + - vddwlmx-supply 196 183 - vddrfa0p8-supply 197 184 - vddrfa1p2-supply 198 - - vddrfa1p9-supply 185 + - vddrfa1p8-supply 186 + - if: 187 + properties: 188 + compatible: 189 + contains: 190 + enum: 191 + - qcom,qca6390-bt 192 + then: 193 + required: 194 + - vddrfacmn-supply 195 + - vddaon-supply 196 + - vddbtcmx-supply 197 + - vddrfa0p8-supply 198 + - vddrfa1p2-supply 199 + - vddrfa1p7-supply 199 200 200 201 examples: 201 202 - |
-36
Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
··· 1 - MediaTek SoC built-in Bluetooth Devices 2 - ================================== 3 - 4 - This device is a serial attached device to BTIF device and thus it must be a 5 - child node of the serial node with BTIF. The dt-bindings details for BTIF 6 - device can be known via Documentation/devicetree/bindings/serial/8250.yaml. 7 - 8 - Required properties: 9 - 10 - - compatible: Must be 11 - "mediatek,mt7622-bluetooth": for MT7622 SoC 12 - - clocks: Should be the clock specifiers corresponding to the entry in 13 - clock-names property. 14 - - clock-names: Should contain "ref" entries. 15 - - power-domains: Phandle to the power domain that the device is part of 16 - 17 - Example: 18 - 19 - btif: serial@1100c000 { 20 - compatible = "mediatek,mt7622-btif", 21 - "mediatek,mtk-btif"; 22 - reg = <0 0x1100c000 0 0x1000>; 23 - interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_LOW>; 24 - clocks = <&pericfg CLK_PERI_BTIF_PD>; 25 - clock-names = "main"; 26 - reg-shift = <2>; 27 - reg-io-width = <4>; 28 - 29 - bluetooth { 30 - compatible = "mediatek,mt7622-bluetooth"; 31 - power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>; 32 - clocks = <&clk25m>; 33 - clock-names = "ref"; 34 - }; 35 - }; 36 - 37 1 MediaTek UART based Bluetooth Devices 38 2 ================================== 39 3
+8
MAINTAINERS
··· 17908 17908 F: include/linux/powercap.h 17909 17909 F: kernel/configs/nopm.config 17910 17910 17911 + POWER SEQUENCING 17912 + M: Bartosz Golaszewski <brgl@bgdev.pl> 17913 + L: linux-pm@vger.kernel.org 17914 + S: Maintained 17915 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git 17916 + F: drivers/power/sequencing/ 17917 + F: include/linux/pwrseq/ 17918 + 17911 17919 POWER STATE COORDINATION INTERFACE (PSCI) 17912 17920 M: Mark Rutland <mark.rutland@arm.com> 17913 17921 M: Lorenzo Pieralisi <lpieralisi@kernel.org>
+4 -3
drivers/bluetooth/Kconfig
··· 105 105 tristate "HCI UART driver" 106 106 depends on SERIAL_DEV_BUS || !SERIAL_DEV_BUS 107 107 depends on NVMEM || !NVMEM 108 + depends on POWER_SEQUENCING || !POWER_SEQUENCING 108 109 depends on TTY 109 110 help 110 111 Bluetooth HCI UART driver. ··· 288 287 289 288 290 289 config BT_HCIBCM4377 291 - tristate "HCI BCM4377/4378/4387 PCIe driver" 290 + tristate "HCI BCM4377/4378/4387/4388 PCIe driver" 292 291 depends on PCI 293 292 select FW_LOADER 294 293 help 295 - Support for Broadcom BCM4377/4378/4387 Bluetooth chipsets attached via 296 - PCIe. These are usually found in Apple machines. 294 + Support for Broadcom BCM4377/4378/4387/4388 Bluetooth chipsets 295 + attached via PCIe. These are usually found in Apple machines. 297 296 298 297 Say Y here to compile support for HCI BCM4377 family devices into the 299 298 kernel or say M to compile it as module (hci_bcm4377).
+133 -111
drivers/bluetooth/btintel.c
··· 26 26 #define ECDSA_OFFSET 644 27 27 #define ECDSA_HEADER_LEN 320 28 28 29 - #define BTINTEL_PPAG_NAME "PPAG" 30 - 31 29 enum { 32 30 DSM_SET_WDISABLE2_DELAY = 1, 33 31 DSM_SET_RESET_METHOD = 3, 34 - }; 35 - 36 - /* structure to store the PPAG data read from ACPI table */ 37 - struct btintel_ppag { 38 - u32 domain; 39 - u32 mode; 40 - acpi_status status; 41 - struct hci_dev *hdev; 42 32 }; 43 33 44 34 #define CMD_WRITE_BOOT_PARAMS 0xfc0e ··· 472 482 case 0x19: /* Slr-F */ 473 483 case 0x1b: /* Mgr */ 474 484 case 0x1c: /* Gale Peak (GaP) */ 485 + case 0x1d: /* BlazarU (BzrU) */ 475 486 case 0x1e: /* BlazarI (Bzr) */ 476 487 break; 477 488 default: ··· 631 640 break; 632 641 case INTEL_TLV_GIT_SHA1: 633 642 version->git_sha1 = get_unaligned_le32(tlv->val); 643 + break; 644 + case INTEL_TLV_FW_ID: 645 + snprintf(version->fw_id, sizeof(version->fw_id), 646 + "%s", tlv->val); 634 647 break; 635 648 default: 636 649 /* Ignore rest of information */ ··· 1317 1322 */ 1318 1323 kfree_skb(skb); 1319 1324 return 0; 1320 - } 1321 - 1322 - static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data, 1323 - void **ret) 1324 - { 1325 - acpi_status status; 1326 - size_t len; 1327 - struct btintel_ppag *ppag = data; 1328 - union acpi_object *p, *elements; 1329 - struct acpi_buffer string = {ACPI_ALLOCATE_BUFFER, NULL}; 1330 - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 1331 - struct hci_dev *hdev = ppag->hdev; 1332 - 1333 - status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); 1334 - if (ACPI_FAILURE(status)) { 1335 - bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); 1336 - return status; 1337 - } 1338 - 1339 - len = strlen(string.pointer); 1340 - if (len < strlen(BTINTEL_PPAG_NAME)) { 1341 - kfree(string.pointer); 1342 - return AE_OK; 1343 - } 1344 - 1345 - if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) { 1346 - kfree(string.pointer); 1347 - return AE_OK; 1348 - } 1349 - kfree(string.pointer); 1350 - 1351 - status = acpi_evaluate_object(handle, NULL, NULL, &buffer); 1352 - if (ACPI_FAILURE(status)) { 1353 - ppag->status = status; 1354 - bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); 1355 - return status; 1356 - } 1357 - 1358 - p = buffer.pointer; 1359 - ppag = (struct btintel_ppag *)data; 1360 - 1361 - if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) { 1362 - kfree(buffer.pointer); 1363 - bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d", 1364 - p->type, p->package.count); 1365 - ppag->status = AE_ERROR; 1366 - return AE_ERROR; 1367 - } 1368 - 1369 - elements = p->package.elements; 1370 - 1371 - /* PPAG table is located at element[1] */ 1372 - p = &elements[1]; 1373 - 1374 - ppag->domain = (u32)p->package.elements[0].integer.value; 1375 - ppag->mode = (u32)p->package.elements[1].integer.value; 1376 - ppag->status = AE_OK; 1377 - kfree(buffer.pointer); 1378 - return AE_CTRL_TERMINATE; 1379 1325 } 1380 1326 1381 1327 static int btintel_set_debug_features(struct hci_dev *hdev, ··· 2138 2202 const char *suffix) 2139 2203 { 2140 2204 const char *format; 2141 - /* The firmware file name for new generation controllers will be 2142 - * ibt-<cnvi_top type+cnvi_top step>-<cnvr_top type+cnvr_top step> 2143 - */ 2144 - switch (ver->cnvi_top & 0xfff) { 2205 + u32 cnvi, cnvr; 2206 + 2207 + cnvi = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top), 2208 + INTEL_CNVX_TOP_STEP(ver->cnvi_top)); 2209 + 2210 + cnvr = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top), 2211 + INTEL_CNVX_TOP_STEP(ver->cnvr_top)); 2212 + 2145 2213 /* Only Blazar product supports downloading of intermediate loader 2146 2214 * image 2147 2215 */ 2148 - case BTINTEL_CNVI_BLAZARI: 2149 - if (ver->img_type == BTINTEL_IMG_BOOTLOADER) 2150 - format = "intel/ibt-%04x-%04x-iml.%s"; 2151 - else 2152 - format = "intel/ibt-%04x-%04x.%s"; 2153 - break; 2154 - default: 2155 - format = "intel/ibt-%04x-%04x.%s"; 2156 - break; 2157 - } 2216 + if (INTEL_HW_VARIANT(ver->cnvi_bt) >= 0x1e) { 2217 + u8 zero[BTINTEL_FWID_MAXLEN]; 2158 2218 2159 - snprintf(fw_name, len, format, 2160 - INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top), 2161 - INTEL_CNVX_TOP_STEP(ver->cnvi_top)), 2162 - INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top), 2163 - INTEL_CNVX_TOP_STEP(ver->cnvr_top)), 2164 - suffix); 2219 + if (ver->img_type == BTINTEL_IMG_BOOTLOADER) { 2220 + format = "intel/ibt-%04x-%04x-iml.%s"; 2221 + snprintf(fw_name, len, format, cnvi, cnvr, suffix); 2222 + return; 2223 + } 2224 + 2225 + memset(zero, 0, sizeof(zero)); 2226 + 2227 + /* ibt-<cnvi_top type+cnvi_top step>-<cnvr_top type+cnvr_top step-fw_id> */ 2228 + if (memcmp(ver->fw_id, zero, sizeof(zero))) { 2229 + format = "intel/ibt-%04x-%04x-%s.%s"; 2230 + snprintf(fw_name, len, format, cnvi, cnvr, 2231 + ver->fw_id, suffix); 2232 + return; 2233 + } 2234 + /* If firmware id is not present, fallback to legacy naming 2235 + * convention 2236 + */ 2237 + } 2238 + /* Fallback to legacy naming convention for other controllers 2239 + * ibt-<cnvi_top type+cnvi_top step>-<cnvr_top type+cnvr_top step> 2240 + */ 2241 + format = "intel/ibt-%04x-%04x.%s"; 2242 + snprintf(fw_name, len, format, cnvi, cnvr, suffix); 2243 + } 2244 + 2245 + static void btintel_get_iml_tlv(const struct intel_version_tlv *ver, 2246 + char *fw_name, size_t len, 2247 + const char *suffix) 2248 + { 2249 + const char *format; 2250 + u32 cnvi, cnvr; 2251 + 2252 + cnvi = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top), 2253 + INTEL_CNVX_TOP_STEP(ver->cnvi_top)); 2254 + 2255 + cnvr = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top), 2256 + INTEL_CNVX_TOP_STEP(ver->cnvr_top)); 2257 + 2258 + format = "intel/ibt-%04x-%04x-iml.%s"; 2259 + snprintf(fw_name, len, format, cnvi, cnvr, suffix); 2165 2260 } 2166 2261 2167 2262 static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev, ··· 2200 2233 u32 *boot_param) 2201 2234 { 2202 2235 const struct firmware *fw; 2203 - char fwname[64]; 2236 + char fwname[128]; 2204 2237 int err; 2205 2238 ktime_t calltime; 2206 2239 ··· 2235 2268 } 2236 2269 } 2237 2270 2238 - btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi"); 2271 + if (ver->img_type == BTINTEL_IMG_OP) { 2272 + /* Controller running OP image. In case of FW downgrade, 2273 + * FWID TLV may not be present and driver may attempt to load 2274 + * firmware image which doesn't exist. Lets compare the version 2275 + * of IML image 2276 + */ 2277 + if (INTEL_HW_VARIANT(ver->cnvi_bt) >= 0x1e) 2278 + btintel_get_iml_tlv(ver, fwname, sizeof(fwname), "sfi"); 2279 + else 2280 + btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi"); 2281 + } else { 2282 + btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi"); 2283 + } 2284 + 2239 2285 err = firmware_request_nowarn(&fw, fwname, &hdev->dev); 2240 2286 if (err < 0) { 2241 2287 if (!btintel_test_flag(hdev, INTEL_BOOTLOADER)) { ··· 2407 2427 2408 2428 static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver) 2409 2429 { 2410 - struct btintel_ppag ppag; 2411 2430 struct sk_buff *skb; 2412 2431 struct hci_ppag_enable_cmd ppag_cmd; 2413 2432 acpi_handle handle; 2433 + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 2434 + union acpi_object *p, *elements; 2435 + u32 domain, mode; 2436 + acpi_status status; 2414 2437 2415 2438 /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */ 2416 2439 switch (ver->cnvr_top & 0xFFF) { ··· 2431 2448 return; 2432 2449 } 2433 2450 2434 - memset(&ppag, 0, sizeof(ppag)); 2435 - 2436 - ppag.hdev = hdev; 2437 - ppag.status = AE_NOT_FOUND; 2438 - acpi_walk_namespace(ACPI_TYPE_PACKAGE, handle, 1, NULL, 2439 - btintel_ppag_callback, &ppag, NULL); 2440 - 2441 - if (ACPI_FAILURE(ppag.status)) { 2442 - if (ppag.status == AE_NOT_FOUND) { 2451 + status = acpi_evaluate_object(handle, "PPAG", NULL, &buffer); 2452 + if (ACPI_FAILURE(status)) { 2453 + if (status == AE_NOT_FOUND) { 2443 2454 bt_dev_dbg(hdev, "PPAG-BT: ACPI entry not found"); 2444 2455 return; 2445 2456 } 2457 + bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); 2446 2458 return; 2447 2459 } 2448 2460 2449 - if (ppag.domain != 0x12) { 2461 + p = buffer.pointer; 2462 + if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) { 2463 + bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d", 2464 + p->type, p->package.count); 2465 + kfree(buffer.pointer); 2466 + return; 2467 + } 2468 + 2469 + elements = p->package.elements; 2470 + 2471 + /* PPAG table is located at element[1] */ 2472 + p = &elements[1]; 2473 + 2474 + domain = (u32)p->package.elements[0].integer.value; 2475 + mode = (u32)p->package.elements[1].integer.value; 2476 + kfree(buffer.pointer); 2477 + 2478 + if (domain != 0x12) { 2450 2479 bt_dev_dbg(hdev, "PPAG-BT: Bluetooth domain is disabled in ACPI firmware"); 2451 2480 return; 2452 2481 } ··· 2469 2474 * BIT 1 : 0 Disabled in China 2470 2475 * 1 Enabled in China 2471 2476 */ 2472 - if ((ppag.mode & 0x01) != BIT(0) && (ppag.mode & 0x02) != BIT(1)) { 2473 - bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in CB/BIOS"); 2477 + mode &= 0x03; 2478 + 2479 + if (!mode) { 2480 + bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in BIOS"); 2474 2481 return; 2475 2482 } 2476 2483 2477 - ppag_cmd.ppag_enable_flags = cpu_to_le32(ppag.mode); 2484 + ppag_cmd.ppag_enable_flags = cpu_to_le32(mode); 2478 2485 2479 - skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd), &ppag_cmd, HCI_CMD_TIMEOUT); 2486 + skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd), 2487 + &ppag_cmd, HCI_CMD_TIMEOUT); 2480 2488 if (IS_ERR(skb)) { 2481 2489 bt_dev_warn(hdev, "Failed to send PPAG Enable (%ld)", PTR_ERR(skb)); 2482 2490 return; 2483 2491 } 2484 - bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", ppag.mode); 2492 + bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", mode); 2485 2493 kfree_skb(skb); 2486 2494 } 2487 2495 ··· 2598 2600 data->acpi_reset_method = btintel_acpi_reset_method; 2599 2601 } 2600 2602 2603 + #define BTINTEL_ISODATA_HANDLE_BASE 0x900 2604 + 2605 + static u8 btintel_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb) 2606 + { 2607 + /* 2608 + * Distinguish ISO data packets form ACL data packets 2609 + * based on their connection handle value range. 2610 + */ 2611 + if (hci_skb_pkt_type(skb) == HCI_ACLDATA_PKT) { 2612 + __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle); 2613 + 2614 + if (hci_handle(handle) >= BTINTEL_ISODATA_HANDLE_BASE) 2615 + return HCI_ISODATA_PKT; 2616 + } 2617 + 2618 + return hci_skb_pkt_type(skb); 2619 + } 2620 + 2601 2621 int btintel_bootloader_setup_tlv(struct hci_dev *hdev, 2602 2622 struct intel_version_tlv *ver) 2603 2623 { ··· 2651 2635 return err; 2652 2636 2653 2637 /* If image type returned is BTINTEL_IMG_IML, then controller supports 2654 - * intermediae loader image 2638 + * intermediate loader image 2655 2639 */ 2656 2640 if (ver->img_type == BTINTEL_IMG_IML) { 2657 2641 err = btintel_prepare_fw_download_tlv(hdev, ver, &boot_param); ··· 2719 2703 case 0x19: 2720 2704 case 0x1b: 2721 2705 case 0x1c: 2706 + case 0x1d: 2722 2707 case 0x1e: 2723 2708 hci_set_msft_opcode(hdev, 0xFC1E); 2724 2709 break; ··· 2730 2713 } 2731 2714 EXPORT_SYMBOL_GPL(btintel_set_msft_opcode); 2732 2715 2733 - static void btintel_print_fseq_info(struct hci_dev *hdev) 2716 + void btintel_print_fseq_info(struct hci_dev *hdev) 2734 2717 { 2735 2718 struct sk_buff *skb; 2736 2719 u8 *p; ··· 2842 2825 2843 2826 kfree_skb(skb); 2844 2827 } 2828 + EXPORT_SYMBOL_GPL(btintel_print_fseq_info); 2845 2829 2846 2830 static int btintel_setup_combined(struct hci_dev *hdev) 2847 2831 { ··· 3057 3039 err = btintel_bootloader_setup(hdev, &ver); 3058 3040 btintel_register_devcoredump_support(hdev); 3059 3041 break; 3042 + case 0x18: /* GfP2 */ 3043 + case 0x1c: /* GaP */ 3044 + /* Re-classify packet type for controllers with LE audio */ 3045 + hdev->classify_pkt_type = btintel_classify_pkt_type; 3046 + fallthrough; 3060 3047 case 0x17: 3061 - case 0x18: 3062 3048 case 0x19: 3063 3049 case 0x1b: 3064 - case 0x1c: 3050 + case 0x1d: 3065 3051 case 0x1e: 3066 3052 /* Display version information of TLV type */ 3067 3053 btintel_version_info_tlv(hdev, &ver_tlv);
+10 -1
drivers/bluetooth/btintel.h
··· 42 42 INTEL_TLV_SBE_TYPE, 43 43 INTEL_TLV_OTP_BDADDR, 44 44 INTEL_TLV_UNLOCKED_STATE, 45 - INTEL_TLV_GIT_SHA1 45 + INTEL_TLV_GIT_SHA1, 46 + INTEL_TLV_FW_ID = 0x50 46 47 }; 47 48 48 49 struct intel_tlv { ··· 57 56 #define BTINTEL_IMG_BOOTLOADER 0x01 /* Bootloader image */ 58 57 #define BTINTEL_IMG_IML 0x02 /* Intermediate image */ 59 58 #define BTINTEL_IMG_OP 0x03 /* Operational image */ 59 + 60 + #define BTINTEL_FWID_MAXLEN 64 60 61 61 62 struct intel_version_tlv { 62 63 u32 cnvi_top; ··· 80 77 u8 limited_cce; 81 78 u8 sbe_type; 82 79 u32 git_sha1; 80 + u8 fw_id[BTINTEL_FWID_MAXLEN]; 83 81 bdaddr_t otp_bd_addr; 84 82 }; 85 83 ··· 248 244 struct intel_version_tlv *ver); 249 245 int btintel_shutdown_combined(struct hci_dev *hdev); 250 246 void btintel_hw_error(struct hci_dev *hdev, u8 code); 247 + void btintel_print_fseq_info(struct hci_dev *hdev); 251 248 #else 252 249 253 250 static inline int btintel_check_bdaddr(struct hci_dev *hdev) ··· 376 371 } 377 372 378 373 static inline void btintel_hw_error(struct hci_dev *hdev, u8 code) 374 + { 375 + } 376 + 377 + static inline void btintel_print_fseq_info(struct hci_dev *hdev) 379 378 { 380 379 } 381 380 #endif
+8 -2
drivers/bluetooth/btintel_pcie.c
··· 797 797 kfree(txq->bufs); 798 798 return -ENOMEM; 799 799 } 800 - memset(txq->buf_v_addr, 0, txq->count * BTINTEL_PCIE_BUFFER_SIZE); 801 800 802 801 /* Setup the allocated DMA buffer to bufs. Each data_buf should 803 802 * have virtual address and physical address ··· 841 842 kfree(rxq->bufs); 842 843 return -ENOMEM; 843 844 } 844 - memset(rxq->buf_v_addr, 0, rxq->count * BTINTEL_PCIE_BUFFER_SIZE); 845 845 846 846 /* Setup the allocated DMA buffer to bufs. Each data_buf should 847 847 * have virtual address and physical address ··· 1195 1197 bt_dev_err(hdev, "Unsupported Intel hw variant (%u)", 1196 1198 INTEL_HW_VARIANT(ver_tlv.cnvi_bt)); 1197 1199 err = -EINVAL; 1200 + goto exit_error; 1198 1201 break; 1199 1202 } 1200 1203 1204 + btintel_print_fseq_info(hdev); 1201 1205 exit_error: 1202 1206 kfree_skb(skb); 1203 1207 ··· 1327 1327 data = pci_get_drvdata(pdev); 1328 1328 1329 1329 btintel_pcie_reset_bt(data); 1330 + for (int i = 0; i < data->alloc_vecs; i++) { 1331 + struct msix_entry *msix_entry; 1332 + 1333 + msix_entry = &data->msix_entries[i]; 1334 + free_irq(msix_entry->vector, msix_entry); 1335 + } 1330 1336 1331 1337 pci_free_irq_vectors(pdev); 1332 1338
+1080 -5
drivers/bluetooth/btmtk.c
··· 4 4 */ 5 5 #include <linux/module.h> 6 6 #include <linux/firmware.h> 7 + #include <linux/usb.h> 8 + #include <linux/iopoll.h> 9 + #include <asm/unaligned.h> 7 10 8 11 #include <net/bluetooth/bluetooth.h> 9 12 #include <net/bluetooth/hci_core.h> ··· 21 18 #define MTK_FW_ROM_PATCH_SEC_MAP_SIZE 64 22 19 #define MTK_SEC_MAP_COMMON_SIZE 12 23 20 #define MTK_SEC_MAP_NEED_SEND_SIZE 52 21 + 22 + /* It is for mt79xx iso data transmission setting */ 23 + #define MTK_ISO_THRESHOLD 264 24 24 25 25 struct btmtk_patch_header { 26 26 u8 datetime[16]; ··· 70 64 71 65 static void btmtk_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb) 72 66 { 73 - struct btmediatek_data *data = hci_get_priv(hdev); 67 + struct btmtk_data *data = hci_get_priv(hdev); 74 68 char buf[80]; 75 69 76 70 snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n", ··· 91 85 92 86 static void btmtk_coredump_notify(struct hci_dev *hdev, int state) 93 87 { 94 - struct btmediatek_data *data = hci_get_priv(hdev); 88 + struct btmtk_data *data = hci_get_priv(hdev); 95 89 96 90 switch (state) { 97 91 case HCI_DEVCOREDUMP_IDLE: ··· 108 102 break; 109 103 } 110 104 } 105 + 106 + void btmtk_fw_get_filename(char *buf, size_t size, u32 dev_id, u32 fw_ver, 107 + u32 fw_flavor) 108 + { 109 + if (dev_id == 0x7925) 110 + snprintf(buf, size, 111 + "mediatek/mt%04x/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", 112 + dev_id & 0xffff, dev_id & 0xffff, (fw_ver & 0xff) + 1); 113 + else if (dev_id == 0x7961 && fw_flavor) 114 + snprintf(buf, size, 115 + "mediatek/BT_RAM_CODE_MT%04x_1a_%x_hdr.bin", 116 + dev_id & 0xffff, (fw_ver & 0xff) + 1); 117 + else 118 + snprintf(buf, size, 119 + "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", 120 + dev_id & 0xffff, (fw_ver & 0xff) + 1); 121 + } 122 + EXPORT_SYMBOL_GPL(btmtk_fw_get_filename); 111 123 112 124 int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname, 113 125 wmt_cmd_sync_func_t wmt_cmd_sync) ··· 361 337 362 338 void btmtk_reset_sync(struct hci_dev *hdev) 363 339 { 364 - struct btmediatek_data *reset_work = hci_get_priv(hdev); 340 + struct btmtk_data *reset_work = hci_get_priv(hdev); 365 341 int err; 366 342 367 343 hci_dev_lock(hdev); ··· 377 353 int btmtk_register_coredump(struct hci_dev *hdev, const char *name, 378 354 u32 fw_version) 379 355 { 380 - struct btmediatek_data *data = hci_get_priv(hdev); 356 + struct btmtk_data *data = hci_get_priv(hdev); 381 357 382 358 if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) 383 359 return -EOPNOTSUPP; ··· 393 369 394 370 int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb) 395 371 { 396 - struct btmediatek_data *data = hci_get_priv(hdev); 372 + struct btmtk_data *data = hci_get_priv(hdev); 397 373 int err; 398 374 399 375 if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) { ··· 436 412 return err; 437 413 } 438 414 EXPORT_SYMBOL_GPL(btmtk_process_coredump); 415 + 416 + static void btmtk_usb_wmt_recv(struct urb *urb) 417 + { 418 + struct hci_dev *hdev = urb->context; 419 + struct btmtk_data *data = hci_get_priv(hdev); 420 + struct sk_buff *skb; 421 + int err; 422 + 423 + if (urb->status == 0 && urb->actual_length > 0) { 424 + hdev->stat.byte_rx += urb->actual_length; 425 + 426 + /* WMT event shouldn't be fragmented and the size should be 427 + * less than HCI_WMT_MAX_EVENT_SIZE. 428 + */ 429 + skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC); 430 + if (!skb) { 431 + hdev->stat.err_rx++; 432 + kfree(urb->setup_packet); 433 + return; 434 + } 435 + 436 + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; 437 + skb_put_data(skb, urb->transfer_buffer, urb->actual_length); 438 + 439 + /* When someone waits for the WMT event, the skb is being cloned 440 + * and being processed the events from there then. 441 + */ 442 + if (test_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags)) { 443 + data->evt_skb = skb_clone(skb, GFP_ATOMIC); 444 + if (!data->evt_skb) { 445 + kfree_skb(skb); 446 + kfree(urb->setup_packet); 447 + return; 448 + } 449 + } 450 + 451 + err = hci_recv_frame(hdev, skb); 452 + if (err < 0) { 453 + kfree_skb(data->evt_skb); 454 + data->evt_skb = NULL; 455 + kfree(urb->setup_packet); 456 + return; 457 + } 458 + 459 + if (test_and_clear_bit(BTMTK_TX_WAIT_VND_EVT, 460 + &data->flags)) { 461 + /* Barrier to sync with other CPUs */ 462 + smp_mb__after_atomic(); 463 + wake_up_bit(&data->flags, 464 + BTMTK_TX_WAIT_VND_EVT); 465 + } 466 + kfree(urb->setup_packet); 467 + return; 468 + } else if (urb->status == -ENOENT) { 469 + /* Avoid suspend failed when usb_kill_urb */ 470 + return; 471 + } 472 + 473 + usb_mark_last_busy(data->udev); 474 + 475 + /* The URB complete handler is still called with urb->actual_length = 0 476 + * when the event is not available, so we should keep re-submitting 477 + * URB until WMT event returns, Also, It's necessary to wait some time 478 + * between the two consecutive control URBs to relax the target device 479 + * to generate the event. Otherwise, the WMT event cannot return from 480 + * the device successfully. 481 + */ 482 + udelay(500); 483 + 484 + usb_anchor_urb(urb, data->ctrl_anchor); 485 + err = usb_submit_urb(urb, GFP_ATOMIC); 486 + if (err < 0) { 487 + kfree(urb->setup_packet); 488 + /* -EPERM: urb is being killed; 489 + * -ENODEV: device got disconnected 490 + */ 491 + if (err != -EPERM && err != -ENODEV) 492 + bt_dev_err(hdev, "urb %p failed to resubmit (%d)", 493 + urb, -err); 494 + usb_unanchor_urb(urb); 495 + } 496 + } 497 + 498 + static int btmtk_usb_submit_wmt_recv_urb(struct hci_dev *hdev) 499 + { 500 + struct btmtk_data *data = hci_get_priv(hdev); 501 + struct usb_ctrlrequest *dr; 502 + unsigned char *buf; 503 + int err, size = 64; 504 + unsigned int pipe; 505 + struct urb *urb; 506 + 507 + urb = usb_alloc_urb(0, GFP_KERNEL); 508 + if (!urb) 509 + return -ENOMEM; 510 + 511 + dr = kmalloc(sizeof(*dr), GFP_KERNEL); 512 + if (!dr) { 513 + usb_free_urb(urb); 514 + return -ENOMEM; 515 + } 516 + 517 + dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_IN; 518 + dr->bRequest = 1; 519 + dr->wIndex = cpu_to_le16(0); 520 + dr->wValue = cpu_to_le16(48); 521 + dr->wLength = cpu_to_le16(size); 522 + 523 + buf = kmalloc(size, GFP_KERNEL); 524 + if (!buf) { 525 + kfree(dr); 526 + usb_free_urb(urb); 527 + return -ENOMEM; 528 + } 529 + 530 + pipe = usb_rcvctrlpipe(data->udev, 0); 531 + 532 + usb_fill_control_urb(urb, data->udev, pipe, (void *)dr, 533 + buf, size, btmtk_usb_wmt_recv, hdev); 534 + 535 + urb->transfer_flags |= URB_FREE_BUFFER; 536 + 537 + usb_anchor_urb(urb, data->ctrl_anchor); 538 + err = usb_submit_urb(urb, GFP_KERNEL); 539 + if (err < 0) { 540 + if (err != -EPERM && err != -ENODEV) 541 + bt_dev_err(hdev, "urb %p submission failed (%d)", 542 + urb, -err); 543 + usb_unanchor_urb(urb); 544 + } 545 + 546 + usb_free_urb(urb); 547 + 548 + return err; 549 + } 550 + 551 + static int btmtk_usb_hci_wmt_sync(struct hci_dev *hdev, 552 + struct btmtk_hci_wmt_params *wmt_params) 553 + { 554 + struct btmtk_data *data = hci_get_priv(hdev); 555 + struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; 556 + u32 hlen, status = BTMTK_WMT_INVALID; 557 + struct btmtk_hci_wmt_evt *wmt_evt; 558 + struct btmtk_hci_wmt_cmd *wc; 559 + struct btmtk_wmt_hdr *hdr; 560 + int err; 561 + 562 + /* Send the WMT command and wait until the WMT event returns */ 563 + hlen = sizeof(*hdr) + wmt_params->dlen; 564 + if (hlen > 255) 565 + return -EINVAL; 566 + 567 + wc = kzalloc(hlen, GFP_KERNEL); 568 + if (!wc) 569 + return -ENOMEM; 570 + 571 + hdr = &wc->hdr; 572 + hdr->dir = 1; 573 + hdr->op = wmt_params->op; 574 + hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); 575 + hdr->flag = wmt_params->flag; 576 + memcpy(wc->data, wmt_params->data, wmt_params->dlen); 577 + 578 + set_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags); 579 + 580 + /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling, 581 + * it needs constantly polling control pipe until the host received the 582 + * WMT event, thus, we should require to specifically acquire PM counter 583 + * on the USB to prevent the interface from entering auto suspended 584 + * while WMT cmd/event in progress. 585 + */ 586 + err = usb_autopm_get_interface(data->intf); 587 + if (err < 0) 588 + goto err_free_wc; 589 + 590 + err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc); 591 + 592 + if (err < 0) { 593 + clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags); 594 + usb_autopm_put_interface(data->intf); 595 + goto err_free_wc; 596 + } 597 + 598 + /* Submit control IN URB on demand to process the WMT event */ 599 + err = btmtk_usb_submit_wmt_recv_urb(hdev); 600 + 601 + usb_autopm_put_interface(data->intf); 602 + 603 + if (err < 0) 604 + goto err_free_wc; 605 + 606 + /* The vendor specific WMT commands are all answered by a vendor 607 + * specific event and will have the Command Status or Command 608 + * Complete as with usual HCI command flow control. 609 + * 610 + * After sending the command, wait for BTUSB_TX_WAIT_VND_EVT 611 + * state to be cleared. The driver specific event receive routine 612 + * will clear that state and with that indicate completion of the 613 + * WMT command. 614 + */ 615 + err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT, 616 + TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); 617 + if (err == -EINTR) { 618 + bt_dev_err(hdev, "Execution of wmt command interrupted"); 619 + clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags); 620 + goto err_free_wc; 621 + } 622 + 623 + if (err) { 624 + bt_dev_err(hdev, "Execution of wmt command timed out"); 625 + clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags); 626 + err = -ETIMEDOUT; 627 + goto err_free_wc; 628 + } 629 + 630 + if (data->evt_skb == NULL) 631 + goto err_free_wc; 632 + 633 + /* Parse and handle the return WMT event */ 634 + wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data; 635 + if (wmt_evt->whdr.op != hdr->op) { 636 + bt_dev_err(hdev, "Wrong op received %d expected %d", 637 + wmt_evt->whdr.op, hdr->op); 638 + err = -EIO; 639 + goto err_free_skb; 640 + } 641 + 642 + switch (wmt_evt->whdr.op) { 643 + case BTMTK_WMT_SEMAPHORE: 644 + if (wmt_evt->whdr.flag == 2) 645 + status = BTMTK_WMT_PATCH_UNDONE; 646 + else 647 + status = BTMTK_WMT_PATCH_DONE; 648 + break; 649 + case BTMTK_WMT_FUNC_CTRL: 650 + wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt; 651 + if (be16_to_cpu(wmt_evt_funcc->status) == 0x404) 652 + status = BTMTK_WMT_ON_DONE; 653 + else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420) 654 + status = BTMTK_WMT_ON_PROGRESS; 655 + else 656 + status = BTMTK_WMT_ON_UNDONE; 657 + break; 658 + case BTMTK_WMT_PATCH_DWNLD: 659 + if (wmt_evt->whdr.flag == 2) 660 + status = BTMTK_WMT_PATCH_DONE; 661 + else if (wmt_evt->whdr.flag == 1) 662 + status = BTMTK_WMT_PATCH_PROGRESS; 663 + else 664 + status = BTMTK_WMT_PATCH_UNDONE; 665 + break; 666 + } 667 + 668 + if (wmt_params->status) 669 + *wmt_params->status = status; 670 + 671 + err_free_skb: 672 + kfree_skb(data->evt_skb); 673 + data->evt_skb = NULL; 674 + err_free_wc: 675 + kfree(wc); 676 + return err; 677 + } 678 + 679 + static int btmtk_usb_func_query(struct hci_dev *hdev) 680 + { 681 + struct btmtk_hci_wmt_params wmt_params; 682 + int status, err; 683 + u8 param = 0; 684 + 685 + /* Query whether the function is enabled */ 686 + wmt_params.op = BTMTK_WMT_FUNC_CTRL; 687 + wmt_params.flag = 4; 688 + wmt_params.dlen = sizeof(param); 689 + wmt_params.data = &param; 690 + wmt_params.status = &status; 691 + 692 + err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params); 693 + if (err < 0) { 694 + bt_dev_err(hdev, "Failed to query function status (%d)", err); 695 + return err; 696 + } 697 + 698 + return status; 699 + } 700 + 701 + static int btmtk_usb_uhw_reg_write(struct hci_dev *hdev, u32 reg, u32 val) 702 + { 703 + struct btmtk_data *data = hci_get_priv(hdev); 704 + int pipe, err; 705 + void *buf; 706 + 707 + buf = kzalloc(4, GFP_KERNEL); 708 + if (!buf) 709 + return -ENOMEM; 710 + 711 + put_unaligned_le32(val, buf); 712 + 713 + pipe = usb_sndctrlpipe(data->udev, 0); 714 + err = usb_control_msg(data->udev, pipe, 0x02, 715 + 0x5E, 716 + reg >> 16, reg & 0xffff, 717 + buf, 4, USB_CTRL_SET_TIMEOUT); 718 + if (err < 0) 719 + bt_dev_err(hdev, "Failed to write uhw reg(%d)", err); 720 + 721 + kfree(buf); 722 + 723 + return err; 724 + } 725 + 726 + static int btmtk_usb_uhw_reg_read(struct hci_dev *hdev, u32 reg, u32 *val) 727 + { 728 + struct btmtk_data *data = hci_get_priv(hdev); 729 + int pipe, err; 730 + void *buf; 731 + 732 + buf = kzalloc(4, GFP_KERNEL); 733 + if (!buf) 734 + return -ENOMEM; 735 + 736 + pipe = usb_rcvctrlpipe(data->udev, 0); 737 + err = usb_control_msg(data->udev, pipe, 0x01, 738 + 0xDE, 739 + reg >> 16, reg & 0xffff, 740 + buf, 4, USB_CTRL_GET_TIMEOUT); 741 + if (err < 0) { 742 + bt_dev_err(hdev, "Failed to read uhw reg(%d)", err); 743 + goto err_free_buf; 744 + } 745 + 746 + *val = get_unaligned_le32(buf); 747 + bt_dev_dbg(hdev, "reg=%x, value=0x%08x", reg, *val); 748 + 749 + err_free_buf: 750 + kfree(buf); 751 + 752 + return err; 753 + } 754 + 755 + static int btmtk_usb_reg_read(struct hci_dev *hdev, u32 reg, u32 *val) 756 + { 757 + struct btmtk_data *data = hci_get_priv(hdev); 758 + int pipe, err, size = sizeof(u32); 759 + void *buf; 760 + 761 + buf = kzalloc(size, GFP_KERNEL); 762 + if (!buf) 763 + return -ENOMEM; 764 + 765 + pipe = usb_rcvctrlpipe(data->udev, 0); 766 + err = usb_control_msg(data->udev, pipe, 0x63, 767 + USB_TYPE_VENDOR | USB_DIR_IN, 768 + reg >> 16, reg & 0xffff, 769 + buf, size, USB_CTRL_GET_TIMEOUT); 770 + if (err < 0) 771 + goto err_free_buf; 772 + 773 + *val = get_unaligned_le32(buf); 774 + 775 + err_free_buf: 776 + kfree(buf); 777 + 778 + return err; 779 + } 780 + 781 + static int btmtk_usb_id_get(struct hci_dev *hdev, u32 reg, u32 *id) 782 + { 783 + return btmtk_usb_reg_read(hdev, reg, id); 784 + } 785 + 786 + static u32 btmtk_usb_reset_done(struct hci_dev *hdev) 787 + { 788 + u32 val = 0; 789 + 790 + btmtk_usb_uhw_reg_read(hdev, MTK_BT_MISC, &val); 791 + 792 + return val & MTK_BT_RST_DONE; 793 + } 794 + 795 + int btmtk_usb_subsys_reset(struct hci_dev *hdev, u32 dev_id) 796 + { 797 + u32 val; 798 + int err; 799 + 800 + if (dev_id == 0x7922) { 801 + err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_SUBSYS_RST, &val); 802 + if (err < 0) 803 + return err; 804 + val |= 0x00002020; 805 + err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_SUBSYS_RST, val); 806 + if (err < 0) 807 + return err; 808 + err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT, 0x00010001); 809 + if (err < 0) 810 + return err; 811 + err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_SUBSYS_RST, &val); 812 + if (err < 0) 813 + return err; 814 + val |= BIT(0); 815 + err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_SUBSYS_RST, val); 816 + if (err < 0) 817 + return err; 818 + msleep(100); 819 + } else if (dev_id == 0x7925) { 820 + err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_RESET_REG_CONNV3, &val); 821 + if (err < 0) 822 + return err; 823 + val |= (1 << 5); 824 + err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_RESET_REG_CONNV3, val); 825 + if (err < 0) 826 + return err; 827 + err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_RESET_REG_CONNV3, &val); 828 + if (err < 0) 829 + return err; 830 + val &= 0xFFFF00FF; 831 + val |= (1 << 13); 832 + err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_RESET_REG_CONNV3, val); 833 + if (err < 0) 834 + return err; 835 + err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT, 0x00010001); 836 + if (err < 0) 837 + return err; 838 + err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_RESET_REG_CONNV3, &val); 839 + if (err < 0) 840 + return err; 841 + val |= (1 << 0); 842 + err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_RESET_REG_CONNV3, val); 843 + if (err < 0) 844 + return err; 845 + err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT, 0x000000FF); 846 + if (err < 0) 847 + return err; 848 + err = btmtk_usb_uhw_reg_read(hdev, MTK_UDMA_INT_STA_BT, &val); 849 + if (err < 0) 850 + return err; 851 + err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT1, 0x000000FF); 852 + if (err < 0) 853 + return err; 854 + err = btmtk_usb_uhw_reg_read(hdev, MTK_UDMA_INT_STA_BT1, &val); 855 + if (err < 0) 856 + return err; 857 + msleep(100); 858 + } else { 859 + /* It's Device EndPoint Reset Option Register */ 860 + bt_dev_dbg(hdev, "Initiating reset mechanism via uhw"); 861 + err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT); 862 + if (err < 0) 863 + return err; 864 + err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_WDT_STATUS, &val); 865 + if (err < 0) 866 + return err; 867 + /* Reset the bluetooth chip via USB interface. */ 868 + err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_SUBSYS_RST, 1); 869 + if (err < 0) 870 + return err; 871 + err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT, 0x000000FF); 872 + if (err < 0) 873 + return err; 874 + err = btmtk_usb_uhw_reg_read(hdev, MTK_UDMA_INT_STA_BT, &val); 875 + if (err < 0) 876 + return err; 877 + err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT1, 0x000000FF); 878 + if (err < 0) 879 + return err; 880 + err = btmtk_usb_uhw_reg_read(hdev, MTK_UDMA_INT_STA_BT1, &val); 881 + if (err < 0) 882 + return err; 883 + /* MT7921 need to delay 20ms between toggle reset bit */ 884 + msleep(20); 885 + err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_SUBSYS_RST, 0); 886 + if (err < 0) 887 + return err; 888 + err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_SUBSYS_RST, &val); 889 + if (err < 0) 890 + return err; 891 + } 892 + 893 + err = readx_poll_timeout(btmtk_usb_reset_done, hdev, val, 894 + val & MTK_BT_RST_DONE, 20000, 1000000); 895 + if (err < 0) 896 + bt_dev_err(hdev, "Reset timeout"); 897 + 898 + if (dev_id == 0x7922) { 899 + err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT, 0x000000FF); 900 + if (err < 0) 901 + return err; 902 + } 903 + 904 + err = btmtk_usb_id_get(hdev, 0x70010200, &val); 905 + if (err < 0 || !val) 906 + bt_dev_err(hdev, "Can't get device id, subsys reset fail."); 907 + 908 + return err; 909 + } 910 + EXPORT_SYMBOL_GPL(btmtk_usb_subsys_reset); 911 + 912 + int btmtk_usb_recv_acl(struct hci_dev *hdev, struct sk_buff *skb) 913 + { 914 + struct btmtk_data *data = hci_get_priv(hdev); 915 + u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle); 916 + 917 + switch (handle) { 918 + case 0xfc6f: /* Firmware dump from device */ 919 + /* When the firmware hangs, the device can no longer 920 + * suspend and thus disable auto-suspend. 921 + */ 922 + usb_disable_autosuspend(data->udev); 923 + 924 + /* We need to forward the diagnostic packet to userspace daemon 925 + * for backward compatibility, so we have to clone the packet 926 + * extraly for the in-kernel coredump support. 927 + */ 928 + if (IS_ENABLED(CONFIG_DEV_COREDUMP)) { 929 + struct sk_buff *skb_cd = skb_clone(skb, GFP_ATOMIC); 930 + 931 + if (skb_cd) 932 + btmtk_process_coredump(hdev, skb_cd); 933 + } 934 + 935 + fallthrough; 936 + case 0x05ff: /* Firmware debug logging 1 */ 937 + case 0x05fe: /* Firmware debug logging 2 */ 938 + return hci_recv_diag(hdev, skb); 939 + } 940 + 941 + return hci_recv_frame(hdev, skb); 942 + } 943 + EXPORT_SYMBOL_GPL(btmtk_usb_recv_acl); 944 + 945 + static int btmtk_isopkt_pad(struct hci_dev *hdev, struct sk_buff *skb) 946 + { 947 + if (skb->len > MTK_ISO_THRESHOLD) 948 + return -EINVAL; 949 + 950 + if (skb_pad(skb, MTK_ISO_THRESHOLD - skb->len)) 951 + return -ENOMEM; 952 + 953 + __skb_put(skb, MTK_ISO_THRESHOLD - skb->len); 954 + 955 + return 0; 956 + } 957 + 958 + static int __set_mtk_intr_interface(struct hci_dev *hdev) 959 + { 960 + struct btmtk_data *btmtk_data = hci_get_priv(hdev); 961 + struct usb_interface *intf = btmtk_data->isopkt_intf; 962 + int i, err; 963 + 964 + if (!btmtk_data->isopkt_intf) 965 + return -ENODEV; 966 + 967 + err = usb_set_interface(btmtk_data->udev, MTK_ISO_IFNUM, 1); 968 + if (err < 0) { 969 + bt_dev_err(hdev, "setting interface failed (%d)", -err); 970 + return err; 971 + } 972 + 973 + btmtk_data->isopkt_tx_ep = NULL; 974 + btmtk_data->isopkt_rx_ep = NULL; 975 + 976 + for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { 977 + struct usb_endpoint_descriptor *ep_desc; 978 + 979 + ep_desc = &intf->cur_altsetting->endpoint[i].desc; 980 + 981 + if (!btmtk_data->isopkt_tx_ep && 982 + usb_endpoint_is_int_out(ep_desc)) { 983 + btmtk_data->isopkt_tx_ep = ep_desc; 984 + continue; 985 + } 986 + 987 + if (!btmtk_data->isopkt_rx_ep && 988 + usb_endpoint_is_int_in(ep_desc)) { 989 + btmtk_data->isopkt_rx_ep = ep_desc; 990 + continue; 991 + } 992 + } 993 + 994 + if (!btmtk_data->isopkt_tx_ep || 995 + !btmtk_data->isopkt_rx_ep) { 996 + bt_dev_err(hdev, "invalid interrupt descriptors"); 997 + return -ENODEV; 998 + } 999 + 1000 + return 0; 1001 + } 1002 + 1003 + struct urb *alloc_mtk_intr_urb(struct hci_dev *hdev, struct sk_buff *skb, 1004 + usb_complete_t tx_complete) 1005 + { 1006 + struct btmtk_data *btmtk_data = hci_get_priv(hdev); 1007 + struct urb *urb; 1008 + unsigned int pipe; 1009 + 1010 + if (!btmtk_data->isopkt_tx_ep) 1011 + return ERR_PTR(-ENODEV); 1012 + 1013 + urb = usb_alloc_urb(0, GFP_KERNEL); 1014 + if (!urb) 1015 + return ERR_PTR(-ENOMEM); 1016 + 1017 + if (btmtk_isopkt_pad(hdev, skb)) 1018 + return ERR_PTR(-EINVAL); 1019 + 1020 + pipe = usb_sndintpipe(btmtk_data->udev, 1021 + btmtk_data->isopkt_tx_ep->bEndpointAddress); 1022 + 1023 + usb_fill_int_urb(urb, btmtk_data->udev, pipe, 1024 + skb->data, skb->len, tx_complete, 1025 + skb, btmtk_data->isopkt_tx_ep->bInterval); 1026 + 1027 + skb->dev = (void *)hdev; 1028 + 1029 + return urb; 1030 + } 1031 + EXPORT_SYMBOL_GPL(alloc_mtk_intr_urb); 1032 + 1033 + static int btmtk_recv_isopkt(struct hci_dev *hdev, void *buffer, int count) 1034 + { 1035 + struct btmtk_data *btmtk_data = hci_get_priv(hdev); 1036 + struct sk_buff *skb; 1037 + unsigned long flags; 1038 + int err = 0; 1039 + 1040 + spin_lock_irqsave(&btmtk_data->isorxlock, flags); 1041 + skb = btmtk_data->isopkt_skb; 1042 + 1043 + while (count) { 1044 + int len; 1045 + 1046 + if (!skb) { 1047 + skb = bt_skb_alloc(HCI_MAX_ISO_SIZE, GFP_ATOMIC); 1048 + if (!skb) { 1049 + err = -ENOMEM; 1050 + break; 1051 + } 1052 + 1053 + hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; 1054 + hci_skb_expect(skb) = HCI_ISO_HDR_SIZE; 1055 + } 1056 + 1057 + len = min_t(uint, hci_skb_expect(skb), count); 1058 + skb_put_data(skb, buffer, len); 1059 + 1060 + count -= len; 1061 + buffer += len; 1062 + hci_skb_expect(skb) -= len; 1063 + 1064 + if (skb->len == HCI_ISO_HDR_SIZE) { 1065 + __le16 dlen = ((struct hci_iso_hdr *)skb->data)->dlen; 1066 + 1067 + /* Complete ISO header */ 1068 + hci_skb_expect(skb) = __le16_to_cpu(dlen); 1069 + 1070 + if (skb_tailroom(skb) < hci_skb_expect(skb)) { 1071 + kfree_skb(skb); 1072 + skb = NULL; 1073 + 1074 + err = -EILSEQ; 1075 + break; 1076 + } 1077 + } 1078 + 1079 + if (!hci_skb_expect(skb)) { 1080 + /* Complete frame */ 1081 + hci_recv_frame(hdev, skb); 1082 + skb = NULL; 1083 + } 1084 + } 1085 + 1086 + btmtk_data->isopkt_skb = skb; 1087 + spin_unlock_irqrestore(&btmtk_data->isorxlock, flags); 1088 + 1089 + return err; 1090 + } 1091 + 1092 + static void btmtk_intr_complete(struct urb *urb) 1093 + { 1094 + struct hci_dev *hdev = urb->context; 1095 + struct btmtk_data *btmtk_data = hci_get_priv(hdev); 1096 + int err; 1097 + 1098 + BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, 1099 + urb->actual_length); 1100 + 1101 + if (!test_bit(HCI_RUNNING, &hdev->flags)) 1102 + return; 1103 + 1104 + if (hdev->suspended) 1105 + return; 1106 + 1107 + if (urb->status == 0) { 1108 + hdev->stat.byte_rx += urb->actual_length; 1109 + 1110 + if (btmtk_recv_isopkt(hdev, urb->transfer_buffer, 1111 + urb->actual_length) < 0) { 1112 + bt_dev_err(hdev, "corrupted iso packet"); 1113 + hdev->stat.err_rx++; 1114 + } 1115 + } else if (urb->status == -ENOENT) { 1116 + /* Avoid suspend failed when usb_kill_urb */ 1117 + return; 1118 + } 1119 + 1120 + usb_mark_last_busy(btmtk_data->udev); 1121 + usb_anchor_urb(urb, &btmtk_data->isopkt_anchor); 1122 + 1123 + err = usb_submit_urb(urb, GFP_ATOMIC); 1124 + if (err < 0) { 1125 + /* -EPERM: urb is being killed; 1126 + * -ENODEV: device got disconnected 1127 + */ 1128 + if (err != -EPERM && err != -ENODEV) 1129 + bt_dev_err(hdev, "urb %p failed to resubmit (%d)", 1130 + urb, -err); 1131 + if (err != -EPERM) 1132 + hci_cmd_sync_cancel(hdev, -err); 1133 + usb_unanchor_urb(urb); 1134 + } 1135 + } 1136 + 1137 + static int btmtk_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) 1138 + { 1139 + struct btmtk_data *btmtk_data = hci_get_priv(hdev); 1140 + unsigned char *buf; 1141 + unsigned int pipe; 1142 + struct urb *urb; 1143 + int err, size; 1144 + 1145 + BT_DBG("%s", hdev->name); 1146 + 1147 + if (!btmtk_data->isopkt_rx_ep) 1148 + return -ENODEV; 1149 + 1150 + urb = usb_alloc_urb(0, mem_flags); 1151 + if (!urb) 1152 + return -ENOMEM; 1153 + size = le16_to_cpu(btmtk_data->isopkt_rx_ep->wMaxPacketSize); 1154 + 1155 + buf = kmalloc(size, mem_flags); 1156 + if (!buf) { 1157 + usb_free_urb(urb); 1158 + return -ENOMEM; 1159 + } 1160 + 1161 + pipe = usb_rcvintpipe(btmtk_data->udev, 1162 + btmtk_data->isopkt_rx_ep->bEndpointAddress); 1163 + 1164 + usb_fill_int_urb(urb, btmtk_data->udev, pipe, buf, size, 1165 + btmtk_intr_complete, hdev, 1166 + btmtk_data->isopkt_rx_ep->bInterval); 1167 + 1168 + urb->transfer_flags |= URB_FREE_BUFFER; 1169 + 1170 + usb_mark_last_busy(btmtk_data->udev); 1171 + usb_anchor_urb(urb, &btmtk_data->isopkt_anchor); 1172 + 1173 + err = usb_submit_urb(urb, mem_flags); 1174 + if (err < 0) { 1175 + if (err != -EPERM && err != -ENODEV) 1176 + bt_dev_err(hdev, "urb %p submission failed (%d)", 1177 + urb, -err); 1178 + usb_unanchor_urb(urb); 1179 + } 1180 + 1181 + usb_free_urb(urb); 1182 + 1183 + return err; 1184 + } 1185 + 1186 + static int btmtk_usb_isointf_init(struct hci_dev *hdev) 1187 + { 1188 + struct btmtk_data *btmtk_data = hci_get_priv(hdev); 1189 + u8 iso_param[2] = { 0x08, 0x01 }; 1190 + struct sk_buff *skb; 1191 + int err; 1192 + 1193 + init_usb_anchor(&btmtk_data->isopkt_anchor); 1194 + spin_lock_init(&btmtk_data->isorxlock); 1195 + 1196 + __set_mtk_intr_interface(hdev); 1197 + 1198 + err = btmtk_submit_intr_urb(hdev, GFP_KERNEL); 1199 + if (err < 0) { 1200 + usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor); 1201 + bt_dev_err(hdev, "ISO intf not support (%d)", err); 1202 + return err; 1203 + } 1204 + 1205 + skb = __hci_cmd_sync(hdev, 0xfd98, sizeof(iso_param), iso_param, 1206 + HCI_INIT_TIMEOUT); 1207 + if (IS_ERR(skb)) { 1208 + bt_dev_err(hdev, "Failed to apply iso setting (%ld)", PTR_ERR(skb)); 1209 + return PTR_ERR(skb); 1210 + } 1211 + kfree_skb(skb); 1212 + 1213 + return 0; 1214 + } 1215 + 1216 + int btmtk_usb_resume(struct hci_dev *hdev) 1217 + { 1218 + /* This function describes the specific additional steps taken by MediaTek 1219 + * when Bluetooth usb driver's resume function is called. 1220 + */ 1221 + struct btmtk_data *btmtk_data = hci_get_priv(hdev); 1222 + 1223 + /* Resubmit urb for iso data transmission */ 1224 + if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags)) { 1225 + if (btmtk_submit_intr_urb(hdev, GFP_NOIO) < 0) 1226 + clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags); 1227 + } 1228 + 1229 + return 0; 1230 + } 1231 + EXPORT_SYMBOL_GPL(btmtk_usb_resume); 1232 + 1233 + int btmtk_usb_suspend(struct hci_dev *hdev) 1234 + { 1235 + /* This function describes the specific additional steps taken by MediaTek 1236 + * when Bluetooth usb driver's suspend function is called. 1237 + */ 1238 + struct btmtk_data *btmtk_data = hci_get_priv(hdev); 1239 + 1240 + /* Stop urb anchor for iso data transmission */ 1241 + usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor); 1242 + 1243 + return 0; 1244 + } 1245 + EXPORT_SYMBOL_GPL(btmtk_usb_suspend); 1246 + 1247 + int btmtk_usb_setup(struct hci_dev *hdev) 1248 + { 1249 + struct btmtk_data *btmtk_data = hci_get_priv(hdev); 1250 + struct btmtk_hci_wmt_params wmt_params; 1251 + ktime_t calltime, delta, rettime; 1252 + struct btmtk_tci_sleep tci_sleep; 1253 + unsigned long long duration; 1254 + struct sk_buff *skb; 1255 + const char *fwname; 1256 + int err, status; 1257 + u32 dev_id = 0; 1258 + char fw_bin_name[64]; 1259 + u32 fw_version = 0, fw_flavor = 0; 1260 + u8 param; 1261 + 1262 + calltime = ktime_get(); 1263 + 1264 + err = btmtk_usb_id_get(hdev, 0x80000008, &dev_id); 1265 + if (err < 0) { 1266 + bt_dev_err(hdev, "Failed to get device id (%d)", err); 1267 + return err; 1268 + } 1269 + 1270 + if (!dev_id || dev_id != 0x7663) { 1271 + err = btmtk_usb_id_get(hdev, 0x70010200, &dev_id); 1272 + if (err < 0) { 1273 + bt_dev_err(hdev, "Failed to get device id (%d)", err); 1274 + return err; 1275 + } 1276 + err = btmtk_usb_id_get(hdev, 0x80021004, &fw_version); 1277 + if (err < 0) { 1278 + bt_dev_err(hdev, "Failed to get fw version (%d)", err); 1279 + return err; 1280 + } 1281 + err = btmtk_usb_id_get(hdev, 0x70010020, &fw_flavor); 1282 + if (err < 0) { 1283 + bt_dev_err(hdev, "Failed to get fw flavor (%d)", err); 1284 + return err; 1285 + } 1286 + fw_flavor = (fw_flavor & 0x00000080) >> 7; 1287 + } 1288 + 1289 + btmtk_data->dev_id = dev_id; 1290 + 1291 + err = btmtk_register_coredump(hdev, btmtk_data->drv_name, fw_version); 1292 + if (err < 0) 1293 + bt_dev_err(hdev, "Failed to register coredump (%d)", err); 1294 + 1295 + switch (dev_id) { 1296 + case 0x7663: 1297 + fwname = FIRMWARE_MT7663; 1298 + break; 1299 + case 0x7668: 1300 + fwname = FIRMWARE_MT7668; 1301 + break; 1302 + case 0x7922: 1303 + case 0x7961: 1304 + case 0x7925: 1305 + /* Reset the device to ensure it's in the initial state before 1306 + * downloading the firmware to ensure. 1307 + */ 1308 + 1309 + if (!test_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags)) 1310 + btmtk_usb_subsys_reset(hdev, dev_id); 1311 + 1312 + btmtk_fw_get_filename(fw_bin_name, sizeof(fw_bin_name), dev_id, 1313 + fw_version, fw_flavor); 1314 + 1315 + err = btmtk_setup_firmware_79xx(hdev, fw_bin_name, 1316 + btmtk_usb_hci_wmt_sync); 1317 + if (err < 0) { 1318 + bt_dev_err(hdev, "Failed to set up firmware (%d)", err); 1319 + clear_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags); 1320 + return err; 1321 + } 1322 + 1323 + set_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags); 1324 + 1325 + /* It's Device EndPoint Reset Option Register */ 1326 + err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT, 1327 + MTK_EP_RST_IN_OUT_OPT); 1328 + if (err < 0) 1329 + return err; 1330 + 1331 + /* Enable Bluetooth protocol */ 1332 + param = 1; 1333 + wmt_params.op = BTMTK_WMT_FUNC_CTRL; 1334 + wmt_params.flag = 0; 1335 + wmt_params.dlen = sizeof(param); 1336 + wmt_params.data = &param; 1337 + wmt_params.status = NULL; 1338 + 1339 + err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params); 1340 + if (err < 0) { 1341 + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); 1342 + return err; 1343 + } 1344 + 1345 + hci_set_msft_opcode(hdev, 0xFD30); 1346 + hci_set_aosp_capable(hdev); 1347 + 1348 + /* Set up ISO interface after protocol enabled */ 1349 + if (test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) { 1350 + if (!btmtk_usb_isointf_init(hdev)) 1351 + set_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags); 1352 + } 1353 + 1354 + goto done; 1355 + default: 1356 + bt_dev_err(hdev, "Unsupported hardware variant (%08x)", 1357 + dev_id); 1358 + return -ENODEV; 1359 + } 1360 + 1361 + /* Query whether the firmware is already download */ 1362 + wmt_params.op = BTMTK_WMT_SEMAPHORE; 1363 + wmt_params.flag = 1; 1364 + wmt_params.dlen = 0; 1365 + wmt_params.data = NULL; 1366 + wmt_params.status = &status; 1367 + 1368 + err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params); 1369 + if (err < 0) { 1370 + bt_dev_err(hdev, "Failed to query firmware status (%d)", err); 1371 + return err; 1372 + } 1373 + 1374 + if (status == BTMTK_WMT_PATCH_DONE) { 1375 + bt_dev_info(hdev, "firmware already downloaded"); 1376 + goto ignore_setup_fw; 1377 + } 1378 + 1379 + /* Setup a firmware which the device definitely requires */ 1380 + err = btmtk_setup_firmware(hdev, fwname, 1381 + btmtk_usb_hci_wmt_sync); 1382 + if (err < 0) 1383 + return err; 1384 + 1385 + ignore_setup_fw: 1386 + err = readx_poll_timeout(btmtk_usb_func_query, hdev, status, 1387 + status < 0 || status != BTMTK_WMT_ON_PROGRESS, 1388 + 2000, 5000000); 1389 + /* -ETIMEDOUT happens */ 1390 + if (err < 0) 1391 + return err; 1392 + 1393 + /* The other errors happen in btmtk_usb_func_query */ 1394 + if (status < 0) 1395 + return status; 1396 + 1397 + if (status == BTMTK_WMT_ON_DONE) { 1398 + bt_dev_info(hdev, "function already on"); 1399 + goto ignore_func_on; 1400 + } 1401 + 1402 + /* Enable Bluetooth protocol */ 1403 + param = 1; 1404 + wmt_params.op = BTMTK_WMT_FUNC_CTRL; 1405 + wmt_params.flag = 0; 1406 + wmt_params.dlen = sizeof(param); 1407 + wmt_params.data = &param; 1408 + wmt_params.status = NULL; 1409 + 1410 + err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params); 1411 + if (err < 0) { 1412 + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); 1413 + return err; 1414 + } 1415 + 1416 + ignore_func_on: 1417 + /* Apply the low power environment setup */ 1418 + tci_sleep.mode = 0x5; 1419 + tci_sleep.duration = cpu_to_le16(0x640); 1420 + tci_sleep.host_duration = cpu_to_le16(0x640); 1421 + tci_sleep.host_wakeup_pin = 0; 1422 + tci_sleep.time_compensation = 0; 1423 + 1424 + skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep, 1425 + HCI_INIT_TIMEOUT); 1426 + if (IS_ERR(skb)) { 1427 + err = PTR_ERR(skb); 1428 + bt_dev_err(hdev, "Failed to apply low power setting (%d)", err); 1429 + return err; 1430 + } 1431 + kfree_skb(skb); 1432 + 1433 + done: 1434 + rettime = ktime_get(); 1435 + delta = ktime_sub(rettime, calltime); 1436 + duration = (unsigned long long)ktime_to_ns(delta) >> 10; 1437 + 1438 + bt_dev_info(hdev, "Device setup in %llu usecs", duration); 1439 + 1440 + return 0; 1441 + } 1442 + EXPORT_SYMBOL_GPL(btmtk_usb_setup); 1443 + 1444 + int btmtk_usb_shutdown(struct hci_dev *hdev) 1445 + { 1446 + struct btmtk_hci_wmt_params wmt_params; 1447 + u8 param = 0; 1448 + int err; 1449 + 1450 + /* Disable the device */ 1451 + wmt_params.op = BTMTK_WMT_FUNC_CTRL; 1452 + wmt_params.flag = 0; 1453 + wmt_params.dlen = sizeof(param); 1454 + wmt_params.data = &param; 1455 + wmt_params.status = NULL; 1456 + 1457 + err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params); 1458 + if (err < 0) { 1459 + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); 1460 + return err; 1461 + } 1462 + 1463 + return 0; 1464 + } 1465 + EXPORT_SYMBOL_GPL(btmtk_usb_shutdown); 439 1466 440 1467 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); 441 1468 MODULE_AUTHOR("Mark Chen <mark-yw.chen@mediatek.com>");
+109 -9
drivers/bluetooth/btmtk.h
··· 28 28 #define MTK_COREDUMP_END_LEN (sizeof(MTK_COREDUMP_END)) 29 29 #define MTK_COREDUMP_NUM 255 30 30 31 + /* UHW CR mapping */ 32 + #define MTK_BT_MISC 0x70002510 33 + #define MTK_BT_SUBSYS_RST 0x70002610 34 + #define MTK_UDMA_INT_STA_BT 0x74000024 35 + #define MTK_UDMA_INT_STA_BT1 0x74000308 36 + #define MTK_BT_WDT_STATUS 0x740003A0 37 + #define MTK_EP_RST_OPT 0x74011890 38 + #define MTK_EP_RST_IN_OUT_OPT 0x00010001 39 + #define MTK_BT_RST_DONE 0x00000100 40 + #define MTK_BT_RESET_REG_CONNV3 0x70028610 41 + #define MTK_BT_READ_DEV_ID 0x70010200 42 + 43 + /* MediaTek ISO Interface */ 44 + #define MTK_ISO_IFNUM 2 45 + 31 46 enum { 32 47 BTMTK_WMT_PATCH_DWNLD = 0x1, 33 48 BTMTK_WMT_TEST = 0x2, ··· 141 126 u32 *status; 142 127 }; 143 128 129 + enum { 130 + BTMTK_TX_WAIT_VND_EVT, 131 + BTMTK_FIRMWARE_LOADED, 132 + BTMTK_HW_RESET_ACTIVE, 133 + BTMTK_ISOPKT_OVER_INTR, 134 + BTMTK_ISOPKT_RUNNING, 135 + }; 136 + 144 137 typedef int (*btmtk_reset_sync_func_t)(struct hci_dev *, void *); 145 138 146 139 struct btmtk_coredump_info { ··· 158 135 int state; 159 136 }; 160 137 161 - struct btmediatek_data { 138 + struct btmtk_data { 139 + const char *drv_name; 140 + unsigned long flags; 162 141 u32 dev_id; 163 142 btmtk_reset_sync_func_t reset_sync; 164 143 struct btmtk_coredump_info cd_info; 144 + 145 + struct usb_device *udev; 146 + struct usb_interface *intf; 147 + struct usb_anchor *ctrl_anchor; 148 + struct sk_buff *evt_skb; 149 + struct usb_endpoint_descriptor *isopkt_tx_ep; 150 + struct usb_endpoint_descriptor *isopkt_rx_ep; 151 + struct usb_interface *isopkt_intf; 152 + struct usb_anchor isopkt_anchor; 153 + struct sk_buff *isopkt_skb; 154 + 155 + /* spinlock for ISO data transmission */ 156 + spinlock_t isorxlock; 165 157 }; 166 158 167 159 typedef int (*wmt_cmd_sync_func_t)(struct hci_dev *, ··· 198 160 u32 fw_version); 199 161 200 162 int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb); 163 + 164 + void btmtk_fw_get_filename(char *buf, size_t size, u32 dev_id, u32 fw_ver, 165 + u32 fw_flavor); 166 + 167 + int btmtk_usb_subsys_reset(struct hci_dev *hdev, u32 dev_id); 168 + 169 + int btmtk_usb_recv_acl(struct hci_dev *hdev, struct sk_buff *skb); 170 + 171 + struct urb *alloc_mtk_intr_urb(struct hci_dev *hdev, struct sk_buff *skb, 172 + usb_complete_t tx_complete); 173 + 174 + int btmtk_usb_resume(struct hci_dev *hdev); 175 + 176 + int btmtk_usb_suspend(struct hci_dev *hdev); 177 + 178 + int btmtk_usb_setup(struct hci_dev *hdev); 179 + 180 + int btmtk_usb_shutdown(struct hci_dev *hdev); 201 181 #else 202 182 203 183 static inline int btmtk_set_bdaddr(struct hci_dev *hdev, ··· 224 168 return -EOPNOTSUPP; 225 169 } 226 170 227 - static int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname, 228 - wmt_cmd_sync_func_t wmt_cmd_sync) 171 + static inline int btmtk_setup_firmware_79xx(struct hci_dev *hdev, 172 + const char *fwname, 173 + wmt_cmd_sync_func_t wmt_cmd_sync) 229 174 { 230 175 return -EOPNOTSUPP; 231 176 } 232 177 233 - static int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname, 234 - wmt_cmd_sync_func_t wmt_cmd_sync) 178 + static inline int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname, 179 + wmt_cmd_sync_func_t wmt_cmd_sync) 235 180 { 236 181 return -EOPNOTSUPP; 237 182 } 238 183 239 - static void btmtk_reset_sync(struct hci_dev *hdev) 184 + static inline void btmtk_reset_sync(struct hci_dev *hdev) 240 185 { 241 186 } 242 187 243 - static int btmtk_register_coredump(struct hci_dev *hdev, const char *name, 244 - u32 fw_version) 188 + static inline int btmtk_register_coredump(struct hci_dev *hdev, 189 + const char *name, u32 fw_version) 245 190 { 246 191 return -EOPNOTSUPP; 247 192 } 248 193 249 - static int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb) 194 + static inline int btmtk_process_coredump(struct hci_dev *hdev, 195 + struct sk_buff *skb) 196 + { 197 + return -EOPNOTSUPP; 198 + } 199 + 200 + static inline void btmtk_fw_get_filename(char *buf, size_t size, u32 dev_id, 201 + u32 fw_ver, u32 fw_flavor) 202 + { 203 + } 204 + 205 + static inline int btmtk_usb_subsys_reset(struct hci_dev *hdev, u32 dev_id) 206 + { 207 + return -EOPNOTSUPP; 208 + } 209 + 210 + static inline int btmtk_usb_recv_acl(struct hci_dev *hdev, struct sk_buff *skb) 211 + { 212 + return -EOPNOTSUPP; 213 + } 214 + 215 + static inline struct urb *alloc_mtk_intr_urb(struct hci_dev *hdev, 216 + struct sk_buff *skb, 217 + usb_complete_t tx_complete) 218 + { 219 + return ERR_PTR(-EOPNOTSUPP); 220 + } 221 + 222 + static inline int btmtk_usb_resume(struct hci_dev *hdev) 223 + { 224 + return -EOPNOTSUPP; 225 + } 226 + 227 + static inline int btmtk_usb_suspend(struct hci_dev *hdev) 228 + { 229 + return -EOPNOTSUPP; 230 + } 231 + 232 + static inline int btmtk_usb_setup(struct hci_dev *hdev) 233 + { 234 + return -EOPNOTSUPP; 235 + } 236 + 237 + static inline int btmtk_usb_shutdown(struct hci_dev *hdev) 250 238 { 251 239 return -EOPNOTSUPP; 252 240 }
+4
drivers/bluetooth/btmtksdio.c
··· 20 20 #include <linux/of.h> 21 21 #include <linux/pm_runtime.h> 22 22 #include <linux/skbuff.h> 23 + #include <linux/usb.h> 23 24 24 25 #include <linux/mmc/host.h> 25 26 #include <linux/mmc/sdio_ids.h> ··· 1117 1116 bt_dev_err(hdev, "Failed to get fw version (%d)", err); 1118 1117 return err; 1119 1118 } 1119 + 1120 + btmtk_fw_get_filename(fwname, sizeof(fwname), dev_id, 1121 + fw_version, 0); 1120 1122 1121 1123 snprintf(fwname, sizeof(fwname), 1122 1124 "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
+1
drivers/bluetooth/btmtkuart.c
··· 22 22 #include <linux/regulator/consumer.h> 23 23 #include <linux/serdev.h> 24 24 #include <linux/skbuff.h> 25 + #include <linux/usb.h> 25 26 26 27 #include <net/bluetooth/bluetooth.h> 27 28 #include <net/bluetooth/hci_core.h>
+196 -46
drivers/bluetooth/btnxpuart.c
··· 29 29 #define BTNXPUART_CHECK_BOOT_SIGNATURE 3 30 30 #define BTNXPUART_SERDEV_OPEN 4 31 31 #define BTNXPUART_IR_IN_PROGRESS 5 32 + #define BTNXPUART_FW_DOWNLOAD_ABORT 6 32 33 33 34 /* NXP HW err codes */ 34 35 #define BTNXPUART_IR_HW_ERR 0xb0 35 36 36 - #define FIRMWARE_W8987 "nxp/uartuart8987_bt.bin" 37 - #define FIRMWARE_W8997 "nxp/uartuart8997_bt_v4.bin" 38 - #define FIRMWARE_W9098 "nxp/uartuart9098_bt_v1.bin" 39 - #define FIRMWARE_IW416 "nxp/uartiw416_bt_v0.bin" 40 - #define FIRMWARE_IW612 "nxp/uartspi_n61x_v1.bin.se" 41 - #define FIRMWARE_IW624 "nxp/uartiw624_bt.bin" 42 - #define FIRMWARE_SECURE_IW624 "nxp/uartiw624_bt.bin.se" 43 - #define FIRMWARE_AW693 "nxp/uartaw693_bt.bin" 44 - #define FIRMWARE_SECURE_AW693 "nxp/uartaw693_bt.bin.se" 45 - #define FIRMWARE_HELPER "nxp/helper_uart_3000000.bin" 37 + #define FIRMWARE_W8987 "uart8987_bt_v0.bin" 38 + #define FIRMWARE_W8987_OLD "uartuart8987_bt.bin" 39 + #define FIRMWARE_W8997 "uart8997_bt_v4.bin" 40 + #define FIRMWARE_W8997_OLD "uartuart8997_bt_v4.bin" 41 + #define FIRMWARE_W9098 "uart9098_bt_v1.bin" 42 + #define FIRMWARE_W9098_OLD "uartuart9098_bt_v1.bin" 43 + #define FIRMWARE_IW416 "uartiw416_bt_v0.bin" 44 + #define FIRMWARE_IW612 "uartspi_n61x_v1.bin.se" 45 + #define FIRMWARE_IW615 "uartspi_iw610_v0.bin" 46 + #define FIRMWARE_SECURE_IW615 "uartspi_iw610_v0.bin.se" 47 + #define FIRMWARE_IW624 "uartiw624_bt.bin" 48 + #define FIRMWARE_SECURE_IW624 "uartiw624_bt.bin.se" 49 + #define FIRMWARE_AW693 "uartaw693_bt.bin" 50 + #define FIRMWARE_SECURE_AW693 "uartaw693_bt.bin.se" 51 + #define FIRMWARE_AW693_A1 "uartaw693_bt_v1.bin" 52 + #define FIRMWARE_SECURE_AW693_A1 "uartaw693_bt_v1.bin.se" 53 + #define FIRMWARE_HELPER "helper_uart_3000000.bin" 46 54 47 55 #define CHIP_ID_W9098 0x5c03 48 56 #define CHIP_ID_IW416 0x7201 49 57 #define CHIP_ID_IW612 0x7601 50 58 #define CHIP_ID_IW624a 0x8000 51 59 #define CHIP_ID_IW624c 0x8001 52 - #define CHIP_ID_AW693 0x8200 60 + #define CHIP_ID_AW693a0 0x8200 61 + #define CHIP_ID_AW693a1 0x8201 62 + #define CHIP_ID_IW615a0 0x8800 63 + #define CHIP_ID_IW615a1 0x8801 53 64 54 65 #define FW_SECURE_MASK 0xc0 55 66 #define FW_OPEN 0x00 ··· 155 144 struct btnxpuart_data { 156 145 const char *helper_fw_name; 157 146 const char *fw_name; 147 + const char *fw_name_old; 158 148 }; 159 149 160 150 struct btnxpuart_dev { ··· 171 159 u8 fw_name[MAX_FW_FILE_NAME_LEN]; 172 160 u32 fw_dnld_v1_offset; 173 161 u32 fw_v1_sent_bytes; 162 + u32 fw_dnld_v3_offset; 174 163 u32 fw_v3_offset_correction; 175 164 u32 fw_v1_expected_len; 176 165 u32 boot_reg_offset; ··· 199 186 #define NXP_ACK_V3 0x7a 200 187 #define NXP_NAK_V3 0x7b 201 188 #define NXP_CRC_ERROR_V3 0x7c 189 + 190 + /* Bootloader signature error codes */ 191 + #define NXP_ACK_RX_TIMEOUT 0x0002 /* ACK not received from host */ 192 + #define NXP_HDR_RX_TIMEOUT 0x0003 /* FW Header chunk not received */ 193 + #define NXP_DATA_RX_TIMEOUT 0x0004 /* FW Data chunk not received */ 202 194 203 195 #define HDR_LEN 16 204 196 ··· 295 277 __be32 crc; 296 278 } __packed; 297 279 280 + struct nxp_v3_rx_timeout_nak { 281 + u8 nak; 282 + __le32 offset; 283 + u8 crc; 284 + } __packed; 285 + 286 + union nxp_v3_rx_timeout_nak_u { 287 + struct nxp_v3_rx_timeout_nak pkt; 288 + u8 buf[6]; 289 + }; 290 + 298 291 static u8 crc8_table[CRC8_TABLE_SIZE]; 299 292 300 293 /* Default configurations */ ··· 357 328 struct ps_data *psdata = &nxpdev->psdata; 358 329 359 330 flush_work(&psdata->work); 360 - del_timer_sync(&psdata->ps_timer); 331 + timer_shutdown_sync(&psdata->ps_timer); 361 332 } 362 333 363 334 static void ps_control(struct hci_dev *hdev, u8 ps_state) ··· 579 550 nxpdev->fw_v1_sent_bytes = 0; 580 551 nxpdev->fw_v1_expected_len = HDR_LEN; 581 552 nxpdev->boot_reg_offset = 0; 553 + nxpdev->fw_dnld_v3_offset = 0; 582 554 nxpdev->fw_v3_offset_correction = 0; 583 555 nxpdev->baudrate_changed = false; 584 556 nxpdev->timeout_changed = false; ··· 594 564 !test_bit(BTNXPUART_FW_DOWNLOADING, 595 565 &nxpdev->tx_state), 596 566 msecs_to_jiffies(60000)); 567 + 568 + release_firmware(nxpdev->fw); 569 + memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); 570 + 597 571 if (err == 0) { 598 - bt_dev_err(hdev, "FW Download Timeout."); 572 + bt_dev_err(hdev, "FW Download Timeout. offset: %d", 573 + nxpdev->fw_dnld_v1_offset ? 574 + nxpdev->fw_dnld_v1_offset : 575 + nxpdev->fw_dnld_v3_offset); 599 576 return -ETIMEDOUT; 577 + } 578 + if (test_bit(BTNXPUART_FW_DOWNLOAD_ABORT, &nxpdev->tx_state)) { 579 + bt_dev_err(hdev, "FW Download Aborted"); 580 + return -EINTR; 600 581 } 601 582 602 583 serdev_device_set_flow_control(nxpdev->serdev, true); 603 - release_firmware(nxpdev->fw); 604 - memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); 605 584 606 585 /* Allow the downloaded FW to initialize */ 607 586 msleep(1200); ··· 721 682 return is_fw_downloading(nxpdev); 722 683 } 723 684 724 - static int nxp_request_firmware(struct hci_dev *hdev, const char *fw_name) 685 + static int nxp_request_firmware(struct hci_dev *hdev, const char *fw_name, 686 + const char *fw_name_old) 725 687 { 726 688 struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); 689 + const char *fw_name_dt; 727 690 int err = 0; 728 691 729 692 if (!fw_name) 730 693 return -ENOENT; 731 694 732 695 if (!strlen(nxpdev->fw_name)) { 733 - snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "%s", fw_name); 696 + if (strcmp(fw_name, FIRMWARE_HELPER) && 697 + !device_property_read_string(&nxpdev->serdev->dev, 698 + "firmware-name", 699 + &fw_name_dt)) 700 + fw_name = fw_name_dt; 701 + snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "nxp/%s", fw_name); 702 + err = request_firmware_direct(&nxpdev->fw, nxpdev->fw_name, &hdev->dev); 703 + if (err < 0 && fw_name_old) { 704 + snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "nxp/%s", fw_name_old); 705 + err = request_firmware_direct(&nxpdev->fw, nxpdev->fw_name, &hdev->dev); 706 + } 734 707 735 - bt_dev_dbg(hdev, "Request Firmware: %s", nxpdev->fw_name); 736 - err = request_firmware(&nxpdev->fw, nxpdev->fw_name, &hdev->dev); 708 + bt_dev_info(hdev, "Request Firmware: %s", nxpdev->fw_name); 737 709 if (err < 0) { 738 710 bt_dev_err(hdev, "Firmware file %s not found", nxpdev->fw_name); 739 711 clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); ··· 823 773 } 824 774 825 775 if (!nxp_data->helper_fw_name || nxpdev->helper_downloaded) { 826 - if (nxp_request_firmware(hdev, nxp_data->fw_name)) 776 + if (nxp_request_firmware(hdev, nxp_data->fw_name, nxp_data->fw_name_old)) 827 777 goto free_skb; 828 778 } else if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) { 829 - if (nxp_request_firmware(hdev, nxp_data->helper_fw_name)) 779 + if (nxp_request_firmware(hdev, nxp_data->helper_fw_name, NULL)) 830 780 goto free_skb; 831 781 } 832 782 833 783 if (!len) { 834 - bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes", 784 + bt_dev_info(hdev, "FW Download Complete: %zu bytes", 835 785 nxpdev->fw->size); 836 786 if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) { 837 787 nxpdev->helper_downloaded = true; ··· 913 863 else 914 864 bt_dev_err(hdev, "Illegal loader version %02x", loader_ver); 915 865 break; 916 - case CHIP_ID_AW693: 866 + case CHIP_ID_AW693a0: 917 867 if ((loader_ver & FW_SECURE_MASK) == FW_OPEN) 918 868 fw_name = FIRMWARE_AW693; 919 869 else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL) 920 870 fw_name = FIRMWARE_SECURE_AW693; 871 + else 872 + bt_dev_err(hdev, "Illegal loader version %02x", loader_ver); 873 + break; 874 + case CHIP_ID_AW693a1: 875 + if ((loader_ver & FW_SECURE_MASK) == FW_OPEN) 876 + fw_name = FIRMWARE_AW693_A1; 877 + else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL) 878 + fw_name = FIRMWARE_SECURE_AW693_A1; 879 + else 880 + bt_dev_err(hdev, "Illegal loader version %02x", loader_ver); 881 + break; 882 + case CHIP_ID_IW615a0: 883 + case CHIP_ID_IW615a1: 884 + if ((loader_ver & FW_SECURE_MASK) == FW_OPEN) 885 + fw_name = FIRMWARE_IW615; 886 + else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL) 887 + fw_name = FIRMWARE_SECURE_IW615; 921 888 else 922 889 bt_dev_err(hdev, "Illegal loader version %02x", loader_ver); 923 890 break; ··· 945 878 return fw_name; 946 879 } 947 880 881 + static char *nxp_get_old_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid, 882 + u8 loader_ver) 883 + { 884 + char *fw_name_old = NULL; 885 + 886 + switch (chipid) { 887 + case CHIP_ID_W9098: 888 + fw_name_old = FIRMWARE_W9098_OLD; 889 + break; 890 + } 891 + return fw_name_old; 892 + } 893 + 948 894 static int nxp_recv_chip_ver_v3(struct hci_dev *hdev, struct sk_buff *skb) 949 895 { 950 896 struct v3_start_ind *req = skb_pull_data(skb, sizeof(*req)); 951 897 struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); 898 + const char *fw_name; 899 + const char *fw_name_old; 952 900 u16 chip_id; 953 901 u8 loader_ver; 954 902 ··· 972 890 973 891 chip_id = le16_to_cpu(req->chip_id); 974 892 loader_ver = req->loader_ver; 975 - if (!nxp_request_firmware(hdev, nxp_get_fw_name_from_chipid(hdev, 976 - chip_id, loader_ver))) 893 + bt_dev_info(hdev, "ChipID: %04x, Version: %d", chip_id, loader_ver); 894 + fw_name = nxp_get_fw_name_from_chipid(hdev, chip_id, loader_ver); 895 + fw_name_old = nxp_get_old_fw_name_from_chipid(hdev, chip_id, loader_ver); 896 + if (!nxp_request_firmware(hdev, fw_name, fw_name_old)) 977 897 nxp_send_ack(NXP_ACK_V3, hdev); 978 898 979 899 free_skb: 980 900 kfree_skb(skb); 981 901 return 0; 902 + } 903 + 904 + static void nxp_handle_fw_download_error(struct hci_dev *hdev, struct v3_data_req *req) 905 + { 906 + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); 907 + __u32 offset = __le32_to_cpu(req->offset); 908 + __u16 err = __le16_to_cpu(req->error); 909 + union nxp_v3_rx_timeout_nak_u nak_tx_buf; 910 + 911 + switch (err) { 912 + case NXP_ACK_RX_TIMEOUT: 913 + case NXP_HDR_RX_TIMEOUT: 914 + case NXP_DATA_RX_TIMEOUT: 915 + nak_tx_buf.pkt.nak = NXP_NAK_V3; 916 + nak_tx_buf.pkt.offset = __cpu_to_le32(offset); 917 + nak_tx_buf.pkt.crc = crc8(crc8_table, nak_tx_buf.buf, 918 + sizeof(nak_tx_buf) - 1, 0xff); 919 + serdev_device_write_buf(nxpdev->serdev, nak_tx_buf.buf, 920 + sizeof(nak_tx_buf)); 921 + break; 922 + default: 923 + bt_dev_dbg(hdev, "Unknown bootloader error code: %d", err); 924 + break; 925 + 926 + } 927 + 982 928 } 983 929 984 930 static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) ··· 1023 913 if (!req || !nxpdev->fw) 1024 914 goto free_skb; 1025 915 1026 - nxp_send_ack(NXP_ACK_V3, hdev); 916 + if (!req->error) { 917 + nxp_send_ack(NXP_ACK_V3, hdev); 918 + } else { 919 + nxp_handle_fw_download_error(hdev, req); 920 + goto free_skb; 921 + } 1027 922 1028 923 len = __le16_to_cpu(req->len); 1029 924 ··· 1049 934 } 1050 935 1051 936 if (req->len == 0) { 1052 - bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes", 937 + bt_dev_info(hdev, "FW Download Complete: %zu bytes", 1053 938 nxpdev->fw->size); 1054 939 clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); 1055 940 wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q); 1056 941 goto free_skb; 1057 942 } 1058 - if (req->error) 1059 - bt_dev_dbg(hdev, "FW Download received err 0x%02x from chip", 1060 - req->error); 1061 943 1062 944 offset = __le32_to_cpu(req->offset); 1063 945 if (offset < nxpdev->fw_v3_offset_correction) { ··· 1066 954 goto free_skb; 1067 955 } 1068 956 1069 - serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + offset - 1070 - nxpdev->fw_v3_offset_correction, len); 957 + nxpdev->fw_dnld_v3_offset = offset - nxpdev->fw_v3_offset_correction; 958 + serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + 959 + nxpdev->fw_dnld_v3_offset, len); 1071 960 1072 961 free_skb: 1073 962 kfree_skb(skb); ··· 1150 1037 if (err < 0) 1151 1038 return err; 1152 1039 } else { 1153 - bt_dev_dbg(hdev, "FW already running."); 1040 + bt_dev_info(hdev, "FW already running."); 1154 1041 clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); 1155 1042 } 1156 1043 ··· 1366 1253 ps_wakeup(nxpdev); 1367 1254 serdev_device_close(nxpdev->serdev); 1368 1255 skb_queue_purge(&nxpdev->txq); 1369 - kfree_skb(nxpdev->rx_skb); 1370 - nxpdev->rx_skb = NULL; 1256 + if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) { 1257 + kfree_skb(nxpdev->rx_skb); 1258 + nxpdev->rx_skb = NULL; 1259 + } 1371 1260 clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state); 1372 1261 return 0; 1373 1262 } ··· 1384 1269 1385 1270 cancel_work_sync(&nxpdev->tx_work); 1386 1271 1387 - kfree_skb(nxpdev->rx_skb); 1388 - nxpdev->rx_skb = NULL; 1272 + if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) { 1273 + kfree_skb(nxpdev->rx_skb); 1274 + nxpdev->rx_skb = NULL; 1275 + } 1389 1276 1390 1277 return 0; 1391 1278 } ··· 1502 1385 struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev); 1503 1386 struct hci_dev *hdev = nxpdev->hdev; 1504 1387 1505 - /* Restore FW baudrate to fw_init_baudrate if changed. 1506 - * This will ensure FW baudrate is in sync with 1507 - * driver baudrate in case this driver is re-inserted. 1508 - */ 1509 - if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) { 1510 - nxpdev->new_baudrate = nxpdev->fw_init_baudrate; 1511 - nxp_set_baudrate_cmd(hdev, NULL); 1388 + if (is_fw_downloading(nxpdev)) { 1389 + set_bit(BTNXPUART_FW_DOWNLOAD_ABORT, &nxpdev->tx_state); 1390 + clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); 1391 + wake_up_interruptible(&nxpdev->check_boot_sign_wait_q); 1392 + wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q); 1393 + } else { 1394 + /* Restore FW baudrate to fw_init_baudrate if changed. 1395 + * This will ensure FW baudrate is in sync with 1396 + * driver baudrate in case this driver is re-inserted. 1397 + */ 1398 + if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) { 1399 + nxpdev->new_baudrate = nxpdev->fw_init_baudrate; 1400 + nxp_set_baudrate_cmd(hdev, NULL); 1401 + } 1402 + ps_cancel_timer(nxpdev); 1512 1403 } 1513 - 1514 - ps_cancel_timer(nxpdev); 1515 1404 hci_unregister_dev(hdev); 1516 1405 hci_free_dev(hdev); 1517 1406 } 1518 1407 1408 + #ifdef CONFIG_PM_SLEEP 1409 + static int nxp_serdev_suspend(struct device *dev) 1410 + { 1411 + struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev); 1412 + struct ps_data *psdata = &nxpdev->psdata; 1413 + 1414 + ps_control(psdata->hdev, PS_STATE_SLEEP); 1415 + return 0; 1416 + } 1417 + 1418 + static int nxp_serdev_resume(struct device *dev) 1419 + { 1420 + struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev); 1421 + struct ps_data *psdata = &nxpdev->psdata; 1422 + 1423 + ps_control(psdata->hdev, PS_STATE_AWAKE); 1424 + return 0; 1425 + } 1426 + #endif 1427 + 1519 1428 static struct btnxpuart_data w8987_data __maybe_unused = { 1520 1429 .helper_fw_name = NULL, 1521 1430 .fw_name = FIRMWARE_W8987, 1431 + .fw_name_old = FIRMWARE_W8987_OLD, 1522 1432 }; 1523 1433 1524 1434 static struct btnxpuart_data w8997_data __maybe_unused = { 1525 1435 .helper_fw_name = FIRMWARE_HELPER, 1526 1436 .fw_name = FIRMWARE_W8997, 1437 + .fw_name_old = FIRMWARE_W8997_OLD, 1527 1438 }; 1528 1439 1529 1440 static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = { ··· 1561 1416 }; 1562 1417 MODULE_DEVICE_TABLE(of, nxpuart_of_match_table); 1563 1418 1419 + static const struct dev_pm_ops nxp_pm_ops = { 1420 + SET_SYSTEM_SLEEP_PM_OPS(nxp_serdev_suspend, nxp_serdev_resume) 1421 + }; 1422 + 1564 1423 static struct serdev_device_driver nxp_serdev_driver = { 1565 1424 .probe = nxp_serdev_probe, 1566 1425 .remove = nxp_serdev_remove, 1567 1426 .driver = { 1568 1427 .name = "btnxpuart", 1569 1428 .of_match_table = of_match_ptr(nxpuart_of_match_table), 1429 + .pm = &nxp_pm_ops, 1570 1430 }, 1571 1431 }; 1572 1432
+1 -1
drivers/bluetooth/btrtl.c
··· 811 811 struct sk_buff *skb; 812 812 struct hci_rp_read_local_version *rp; 813 813 814 - dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL); 814 + dl_cmd = kmalloc(sizeof(*dl_cmd), GFP_KERNEL); 815 815 if (!dl_cmd) 816 816 return -ENOMEM; 817 817
+80 -659
drivers/bluetooth/btusb.c
··· 479 479 { USB_DEVICE(0x8087, 0x0036), .driver_info = BTUSB_INTEL_COMBINED }, 480 480 { USB_DEVICE(0x8087, 0x0037), .driver_info = BTUSB_INTEL_COMBINED }, 481 481 { USB_DEVICE(0x8087, 0x0038), .driver_info = BTUSB_INTEL_COMBINED }, 482 + { USB_DEVICE(0x8087, 0x0039), .driver_info = BTUSB_INTEL_COMBINED }, 482 483 { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, 483 484 { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED | 484 485 BTUSB_INTEL_NO_WBS_SUPPORT | ··· 555 554 { USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK | 556 555 BTUSB_WIDEBAND_SPEECH }, 557 556 { USB_DEVICE(0x13d3, 0x3572), .driver_info = BTUSB_REALTEK | 557 + BTUSB_WIDEBAND_SPEECH }, 558 + { USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK | 559 + BTUSB_WIDEBAND_SPEECH }, 560 + { USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK | 558 561 BTUSB_WIDEBAND_SPEECH }, 559 562 560 563 /* Realtek 8852BT/8852BE-VT Bluetooth devices */ ··· 895 890 int (*recv_bulk)(struct btusb_data *data, void *buffer, int count); 896 891 897 892 int (*setup_on_usb)(struct hci_dev *hdev); 893 + 894 + int (*suspend)(struct hci_dev *hdev); 895 + int (*resume)(struct hci_dev *hdev); 898 896 899 897 int oob_wake_irq; /* irq for out-of-band wake-on-bt */ 900 898 unsigned cmd_timeout_cnt; ··· 2646 2638 return hci_recv_frame(hdev, skb); 2647 2639 } 2648 2640 2649 - /* UHW CR mapping */ 2650 - #define MTK_BT_MISC 0x70002510 2651 - #define MTK_BT_SUBSYS_RST 0x70002610 2652 - #define MTK_UDMA_INT_STA_BT 0x74000024 2653 - #define MTK_UDMA_INT_STA_BT1 0x74000308 2654 - #define MTK_BT_WDT_STATUS 0x740003A0 2655 - #define MTK_EP_RST_OPT 0x74011890 2656 - #define MTK_EP_RST_IN_OUT_OPT 0x00010001 2657 - #define MTK_BT_RST_DONE 0x00000100 2658 - #define MTK_BT_RESET_REG_CONNV3 0x70028610 2659 - #define MTK_BT_READ_DEV_ID 0x70010200 2660 - 2661 - 2662 - static void btusb_mtk_wmt_recv(struct urb *urb) 2641 + static void btusb_mtk_claim_iso_intf(struct btusb_data *data) 2663 2642 { 2664 - struct hci_dev *hdev = urb->context; 2665 - struct btusb_data *data = hci_get_drvdata(hdev); 2666 - struct sk_buff *skb; 2643 + struct btmtk_data *btmtk_data = hci_get_priv(data->hdev); 2667 2644 int err; 2668 2645 2669 - if (urb->status == 0 && urb->actual_length > 0) { 2670 - hdev->stat.byte_rx += urb->actual_length; 2671 - 2672 - /* WMT event shouldn't be fragmented and the size should be 2673 - * less than HCI_WMT_MAX_EVENT_SIZE. 2674 - */ 2675 - skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC); 2676 - if (!skb) { 2677 - hdev->stat.err_rx++; 2678 - kfree(urb->setup_packet); 2679 - return; 2680 - } 2681 - 2682 - hci_skb_pkt_type(skb) = HCI_EVENT_PKT; 2683 - skb_put_data(skb, urb->transfer_buffer, urb->actual_length); 2684 - 2685 - /* When someone waits for the WMT event, the skb is being cloned 2686 - * and being processed the events from there then. 2687 - */ 2688 - if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) { 2689 - data->evt_skb = skb_clone(skb, GFP_ATOMIC); 2690 - if (!data->evt_skb) { 2691 - kfree_skb(skb); 2692 - kfree(urb->setup_packet); 2693 - return; 2694 - } 2695 - } 2696 - 2697 - err = hci_recv_frame(hdev, skb); 2698 - if (err < 0) { 2699 - kfree_skb(data->evt_skb); 2700 - data->evt_skb = NULL; 2701 - kfree(urb->setup_packet); 2702 - return; 2703 - } 2704 - 2705 - if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT, 2706 - &data->flags)) { 2707 - /* Barrier to sync with other CPUs */ 2708 - smp_mb__after_atomic(); 2709 - wake_up_bit(&data->flags, 2710 - BTUSB_TX_WAIT_VND_EVT); 2711 - } 2712 - kfree(urb->setup_packet); 2713 - return; 2714 - } else if (urb->status == -ENOENT) { 2715 - /* Avoid suspend failed when usb_kill_urb */ 2646 + err = usb_driver_claim_interface(&btusb_driver, 2647 + btmtk_data->isopkt_intf, data); 2648 + if (err < 0) { 2649 + btmtk_data->isopkt_intf = NULL; 2650 + bt_dev_err(data->hdev, "Failed to claim iso interface"); 2716 2651 return; 2717 2652 } 2718 2653 2719 - usb_mark_last_busy(data->udev); 2720 - 2721 - /* The URB complete handler is still called with urb->actual_length = 0 2722 - * when the event is not available, so we should keep re-submitting 2723 - * URB until WMT event returns, Also, It's necessary to wait some time 2724 - * between the two consecutive control URBs to relax the target device 2725 - * to generate the event. Otherwise, the WMT event cannot return from 2726 - * the device successfully. 2727 - */ 2728 - udelay(500); 2729 - 2730 - usb_anchor_urb(urb, &data->ctrl_anchor); 2731 - err = usb_submit_urb(urb, GFP_ATOMIC); 2732 - if (err < 0) { 2733 - kfree(urb->setup_packet); 2734 - /* -EPERM: urb is being killed; 2735 - * -ENODEV: device got disconnected 2736 - */ 2737 - if (err != -EPERM && err != -ENODEV) 2738 - bt_dev_err(hdev, "urb %p failed to resubmit (%d)", 2739 - urb, -err); 2740 - usb_unanchor_urb(urb); 2741 - } 2654 + set_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags); 2742 2655 } 2743 2656 2744 - static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev) 2657 + static void btusb_mtk_release_iso_intf(struct btusb_data *data) 2745 2658 { 2746 - struct btusb_data *data = hci_get_drvdata(hdev); 2747 - struct usb_ctrlrequest *dr; 2748 - unsigned char *buf; 2749 - int err, size = 64; 2750 - unsigned int pipe; 2751 - struct urb *urb; 2659 + struct btmtk_data *btmtk_data = hci_get_priv(data->hdev); 2752 2660 2753 - urb = usb_alloc_urb(0, GFP_KERNEL); 2754 - if (!urb) 2755 - return -ENOMEM; 2661 + if (btmtk_data->isopkt_intf) { 2662 + usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor); 2663 + clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags); 2756 2664 2757 - dr = kmalloc(sizeof(*dr), GFP_KERNEL); 2758 - if (!dr) { 2759 - usb_free_urb(urb); 2760 - return -ENOMEM; 2665 + dev_kfree_skb_irq(btmtk_data->isopkt_skb); 2666 + btmtk_data->isopkt_skb = NULL; 2667 + usb_set_intfdata(btmtk_data->isopkt_intf, NULL); 2668 + usb_driver_release_interface(&btusb_driver, 2669 + btmtk_data->isopkt_intf); 2761 2670 } 2762 2671 2763 - dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_IN; 2764 - dr->bRequest = 1; 2765 - dr->wIndex = cpu_to_le16(0); 2766 - dr->wValue = cpu_to_le16(48); 2767 - dr->wLength = cpu_to_le16(size); 2768 - 2769 - buf = kmalloc(size, GFP_KERNEL); 2770 - if (!buf) { 2771 - kfree(dr); 2772 - usb_free_urb(urb); 2773 - return -ENOMEM; 2774 - } 2775 - 2776 - pipe = usb_rcvctrlpipe(data->udev, 0); 2777 - 2778 - usb_fill_control_urb(urb, data->udev, pipe, (void *)dr, 2779 - buf, size, btusb_mtk_wmt_recv, hdev); 2780 - 2781 - urb->transfer_flags |= URB_FREE_BUFFER; 2782 - 2783 - usb_anchor_urb(urb, &data->ctrl_anchor); 2784 - err = usb_submit_urb(urb, GFP_KERNEL); 2785 - if (err < 0) { 2786 - if (err != -EPERM && err != -ENODEV) 2787 - bt_dev_err(hdev, "urb %p submission failed (%d)", 2788 - urb, -err); 2789 - usb_unanchor_urb(urb); 2790 - } 2791 - 2792 - usb_free_urb(urb); 2793 - 2794 - return err; 2795 - } 2796 - 2797 - static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev, 2798 - struct btmtk_hci_wmt_params *wmt_params) 2799 - { 2800 - struct btusb_data *data = hci_get_drvdata(hdev); 2801 - struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; 2802 - u32 hlen, status = BTMTK_WMT_INVALID; 2803 - struct btmtk_hci_wmt_evt *wmt_evt; 2804 - struct btmtk_hci_wmt_cmd *wc; 2805 - struct btmtk_wmt_hdr *hdr; 2806 - int err; 2807 - 2808 - /* Send the WMT command and wait until the WMT event returns */ 2809 - hlen = sizeof(*hdr) + wmt_params->dlen; 2810 - if (hlen > 255) 2811 - return -EINVAL; 2812 - 2813 - wc = kzalloc(hlen, GFP_KERNEL); 2814 - if (!wc) 2815 - return -ENOMEM; 2816 - 2817 - hdr = &wc->hdr; 2818 - hdr->dir = 1; 2819 - hdr->op = wmt_params->op; 2820 - hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); 2821 - hdr->flag = wmt_params->flag; 2822 - memcpy(wc->data, wmt_params->data, wmt_params->dlen); 2823 - 2824 - set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); 2825 - 2826 - /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling, 2827 - * it needs constantly polling control pipe until the host received the 2828 - * WMT event, thus, we should require to specifically acquire PM counter 2829 - * on the USB to prevent the interface from entering auto suspended 2830 - * while WMT cmd/event in progress. 2831 - */ 2832 - err = usb_autopm_get_interface(data->intf); 2833 - if (err < 0) 2834 - goto err_free_wc; 2835 - 2836 - err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc); 2837 - 2838 - if (err < 0) { 2839 - clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); 2840 - usb_autopm_put_interface(data->intf); 2841 - goto err_free_wc; 2842 - } 2843 - 2844 - /* Submit control IN URB on demand to process the WMT event */ 2845 - err = btusb_mtk_submit_wmt_recv_urb(hdev); 2846 - 2847 - usb_autopm_put_interface(data->intf); 2848 - 2849 - if (err < 0) 2850 - goto err_free_wc; 2851 - 2852 - /* The vendor specific WMT commands are all answered by a vendor 2853 - * specific event and will have the Command Status or Command 2854 - * Complete as with usual HCI command flow control. 2855 - * 2856 - * After sending the command, wait for BTUSB_TX_WAIT_VND_EVT 2857 - * state to be cleared. The driver specific event receive routine 2858 - * will clear that state and with that indicate completion of the 2859 - * WMT command. 2860 - */ 2861 - err = wait_on_bit_timeout(&data->flags, BTUSB_TX_WAIT_VND_EVT, 2862 - TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); 2863 - if (err == -EINTR) { 2864 - bt_dev_err(hdev, "Execution of wmt command interrupted"); 2865 - clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); 2866 - goto err_free_wc; 2867 - } 2868 - 2869 - if (err) { 2870 - bt_dev_err(hdev, "Execution of wmt command timed out"); 2871 - clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); 2872 - err = -ETIMEDOUT; 2873 - goto err_free_wc; 2874 - } 2875 - 2876 - if (data->evt_skb == NULL) 2877 - goto err_free_wc; 2878 - 2879 - /* Parse and handle the return WMT event */ 2880 - wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data; 2881 - if (wmt_evt->whdr.op != hdr->op) { 2882 - bt_dev_err(hdev, "Wrong op received %d expected %d", 2883 - wmt_evt->whdr.op, hdr->op); 2884 - err = -EIO; 2885 - goto err_free_skb; 2886 - } 2887 - 2888 - switch (wmt_evt->whdr.op) { 2889 - case BTMTK_WMT_SEMAPHORE: 2890 - if (wmt_evt->whdr.flag == 2) 2891 - status = BTMTK_WMT_PATCH_UNDONE; 2892 - else 2893 - status = BTMTK_WMT_PATCH_DONE; 2894 - break; 2895 - case BTMTK_WMT_FUNC_CTRL: 2896 - wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt; 2897 - if (be16_to_cpu(wmt_evt_funcc->status) == 0x404) 2898 - status = BTMTK_WMT_ON_DONE; 2899 - else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420) 2900 - status = BTMTK_WMT_ON_PROGRESS; 2901 - else 2902 - status = BTMTK_WMT_ON_UNDONE; 2903 - break; 2904 - case BTMTK_WMT_PATCH_DWNLD: 2905 - if (wmt_evt->whdr.flag == 2) 2906 - status = BTMTK_WMT_PATCH_DONE; 2907 - else if (wmt_evt->whdr.flag == 1) 2908 - status = BTMTK_WMT_PATCH_PROGRESS; 2909 - else 2910 - status = BTMTK_WMT_PATCH_UNDONE; 2911 - break; 2912 - } 2913 - 2914 - if (wmt_params->status) 2915 - *wmt_params->status = status; 2916 - 2917 - err_free_skb: 2918 - kfree_skb(data->evt_skb); 2919 - data->evt_skb = NULL; 2920 - err_free_wc: 2921 - kfree(wc); 2922 - return err; 2923 - } 2924 - 2925 - static int btusb_mtk_func_query(struct hci_dev *hdev) 2926 - { 2927 - struct btmtk_hci_wmt_params wmt_params; 2928 - int status, err; 2929 - u8 param = 0; 2930 - 2931 - /* Query whether the function is enabled */ 2932 - wmt_params.op = BTMTK_WMT_FUNC_CTRL; 2933 - wmt_params.flag = 4; 2934 - wmt_params.dlen = sizeof(param); 2935 - wmt_params.data = &param; 2936 - wmt_params.status = &status; 2937 - 2938 - err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); 2939 - if (err < 0) { 2940 - bt_dev_err(hdev, "Failed to query function status (%d)", err); 2941 - return err; 2942 - } 2943 - 2944 - return status; 2945 - } 2946 - 2947 - static int btusb_mtk_uhw_reg_write(struct btusb_data *data, u32 reg, u32 val) 2948 - { 2949 - struct hci_dev *hdev = data->hdev; 2950 - int pipe, err; 2951 - void *buf; 2952 - 2953 - buf = kzalloc(4, GFP_KERNEL); 2954 - if (!buf) 2955 - return -ENOMEM; 2956 - 2957 - put_unaligned_le32(val, buf); 2958 - 2959 - pipe = usb_sndctrlpipe(data->udev, 0); 2960 - err = usb_control_msg(data->udev, pipe, 0x02, 2961 - 0x5E, 2962 - reg >> 16, reg & 0xffff, 2963 - buf, 4, USB_CTRL_SET_TIMEOUT); 2964 - if (err < 0) { 2965 - bt_dev_err(hdev, "Failed to write uhw reg(%d)", err); 2966 - goto err_free_buf; 2967 - } 2968 - 2969 - err_free_buf: 2970 - kfree(buf); 2971 - 2972 - return err; 2973 - } 2974 - 2975 - static int btusb_mtk_uhw_reg_read(struct btusb_data *data, u32 reg, u32 *val) 2976 - { 2977 - struct hci_dev *hdev = data->hdev; 2978 - int pipe, err; 2979 - void *buf; 2980 - 2981 - buf = kzalloc(4, GFP_KERNEL); 2982 - if (!buf) 2983 - return -ENOMEM; 2984 - 2985 - pipe = usb_rcvctrlpipe(data->udev, 0); 2986 - err = usb_control_msg(data->udev, pipe, 0x01, 2987 - 0xDE, 2988 - reg >> 16, reg & 0xffff, 2989 - buf, 4, USB_CTRL_GET_TIMEOUT); 2990 - if (err < 0) { 2991 - bt_dev_err(hdev, "Failed to read uhw reg(%d)", err); 2992 - goto err_free_buf; 2993 - } 2994 - 2995 - *val = get_unaligned_le32(buf); 2996 - bt_dev_dbg(hdev, "reg=%x, value=0x%08x", reg, *val); 2997 - 2998 - err_free_buf: 2999 - kfree(buf); 3000 - 3001 - return err; 3002 - } 3003 - 3004 - static int btusb_mtk_reg_read(struct btusb_data *data, u32 reg, u32 *val) 3005 - { 3006 - int pipe, err, size = sizeof(u32); 3007 - void *buf; 3008 - 3009 - buf = kzalloc(size, GFP_KERNEL); 3010 - if (!buf) 3011 - return -ENOMEM; 3012 - 3013 - pipe = usb_rcvctrlpipe(data->udev, 0); 3014 - err = usb_control_msg(data->udev, pipe, 0x63, 3015 - USB_TYPE_VENDOR | USB_DIR_IN, 3016 - reg >> 16, reg & 0xffff, 3017 - buf, size, USB_CTRL_GET_TIMEOUT); 3018 - if (err < 0) 3019 - goto err_free_buf; 3020 - 3021 - *val = get_unaligned_le32(buf); 3022 - 3023 - err_free_buf: 3024 - kfree(buf); 3025 - 3026 - return err; 3027 - } 3028 - 3029 - static int btusb_mtk_id_get(struct btusb_data *data, u32 reg, u32 *id) 3030 - { 3031 - return btusb_mtk_reg_read(data, reg, id); 3032 - } 3033 - 3034 - static u32 btusb_mtk_reset_done(struct hci_dev *hdev) 3035 - { 3036 - struct btusb_data *data = hci_get_drvdata(hdev); 3037 - u32 val = 0; 3038 - 3039 - btusb_mtk_uhw_reg_read(data, MTK_BT_MISC, &val); 3040 - 3041 - return val & MTK_BT_RST_DONE; 2672 + clear_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags); 3042 2673 } 3043 2674 3044 2675 static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data) 3045 2676 { 3046 2677 struct btusb_data *data = hci_get_drvdata(hdev); 3047 - struct btmediatek_data *mediatek; 3048 - u32 val; 2678 + struct btmtk_data *btmtk_data = hci_get_priv(hdev); 3049 2679 int err; 3050 2680 3051 2681 /* It's MediaTek specific bluetooth reset mechanism via USB */ 3052 - if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) { 2682 + if (test_and_set_bit(BTMTK_HW_RESET_ACTIVE, &btmtk_data->flags)) { 3053 2683 bt_dev_err(hdev, "last reset failed? Not resetting again"); 3054 2684 return -EBUSY; 3055 2685 } ··· 2696 3050 if (err < 0) 2697 3051 return err; 2698 3052 3053 + if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags)) 3054 + btusb_mtk_release_iso_intf(data); 3055 + 2699 3056 btusb_stop_traffic(data); 2700 3057 usb_kill_anchored_urbs(&data->tx_anchor); 2701 - mediatek = hci_get_priv(hdev); 2702 3058 2703 - if (mediatek->dev_id == 0x7925) { 2704 - btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val); 2705 - val |= (1 << 5); 2706 - btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val); 2707 - btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val); 2708 - val &= 0xFFFF00FF; 2709 - val |= (1 << 13); 2710 - btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val); 2711 - btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, 0x00010001); 2712 - btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val); 2713 - val |= (1 << 0); 2714 - btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val); 2715 - btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF); 2716 - btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val); 2717 - btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF); 2718 - btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val); 2719 - msleep(100); 2720 - } else { 2721 - /* It's Device EndPoint Reset Option Register */ 2722 - bt_dev_dbg(hdev, "Initiating reset mechanism via uhw"); 2723 - btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT); 2724 - btusb_mtk_uhw_reg_read(data, MTK_BT_WDT_STATUS, &val); 2725 - 2726 - /* Reset the bluetooth chip via USB interface. */ 2727 - btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 1); 2728 - btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF); 2729 - btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val); 2730 - btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF); 2731 - btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val); 2732 - /* MT7921 need to delay 20ms between toggle reset bit */ 2733 - msleep(20); 2734 - btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 0); 2735 - btusb_mtk_uhw_reg_read(data, MTK_BT_SUBSYS_RST, &val); 2736 - } 2737 - 2738 - err = readx_poll_timeout(btusb_mtk_reset_done, hdev, val, 2739 - val & MTK_BT_RST_DONE, 20000, 1000000); 2740 - if (err < 0) 2741 - bt_dev_err(hdev, "Reset timeout"); 2742 - 2743 - btusb_mtk_id_get(data, 0x70010200, &val); 2744 - if (!val) 2745 - bt_dev_err(hdev, "Can't get device id, subsys reset fail."); 3059 + err = btmtk_usb_subsys_reset(hdev, btmtk_data->dev_id); 2746 3060 2747 3061 usb_queue_reset_device(data->intf); 2748 - 2749 - clear_bit(BTUSB_HW_RESET_ACTIVE, &data->flags); 3062 + clear_bit(BTMTK_HW_RESET_ACTIVE, &btmtk_data->flags); 2750 3063 2751 3064 return err; 3065 + } 3066 + 3067 + static int btusb_send_frame_mtk(struct hci_dev *hdev, struct sk_buff *skb) 3068 + { 3069 + struct urb *urb; 3070 + 3071 + BT_DBG("%s", hdev->name); 3072 + 3073 + if (hci_skb_pkt_type(skb) == HCI_ISODATA_PKT) { 3074 + urb = alloc_mtk_intr_urb(hdev, skb, btusb_tx_complete); 3075 + if (IS_ERR(urb)) 3076 + return PTR_ERR(urb); 3077 + 3078 + return submit_or_queue_tx_urb(hdev, urb); 3079 + } else { 3080 + return btusb_send_frame(hdev, skb); 3081 + } 2752 3082 } 2753 3083 2754 3084 static int btusb_mtk_setup(struct hci_dev *hdev) 2755 3085 { 2756 3086 struct btusb_data *data = hci_get_drvdata(hdev); 2757 - struct btmtk_hci_wmt_params wmt_params; 2758 - ktime_t calltime, delta, rettime; 2759 - struct btmtk_tci_sleep tci_sleep; 2760 - unsigned long long duration; 2761 - struct sk_buff *skb; 2762 - const char *fwname; 2763 - int err, status; 2764 - u32 dev_id = 0; 2765 - char fw_bin_name[64]; 2766 - u32 fw_version = 0, fw_flavor = 0; 2767 - u8 param; 2768 - struct btmediatek_data *mediatek; 3087 + struct btmtk_data *btmtk_data = hci_get_priv(hdev); 2769 3088 2770 - calltime = ktime_get(); 3089 + /* MediaTek WMT vendor cmd requiring below USB resources to 3090 + * complete the handshake. 3091 + */ 3092 + btmtk_data->drv_name = btusb_driver.name; 3093 + btmtk_data->intf = data->intf; 3094 + btmtk_data->udev = data->udev; 3095 + btmtk_data->ctrl_anchor = &data->ctrl_anchor; 3096 + btmtk_data->reset_sync = btusb_mtk_reset; 2771 3097 2772 - err = btusb_mtk_id_get(data, 0x80000008, &dev_id); 2773 - if (err < 0) { 2774 - bt_dev_err(hdev, "Failed to get device id (%d)", err); 2775 - return err; 2776 - } 3098 + /* Claim ISO data interface and endpoint */ 3099 + btmtk_data->isopkt_intf = usb_ifnum_to_if(data->udev, MTK_ISO_IFNUM); 3100 + if (btmtk_data->isopkt_intf) 3101 + btusb_mtk_claim_iso_intf(data); 2777 3102 2778 - if (!dev_id || dev_id != 0x7663) { 2779 - err = btusb_mtk_id_get(data, 0x70010200, &dev_id); 2780 - if (err < 0) { 2781 - bt_dev_err(hdev, "Failed to get device id (%d)", err); 2782 - return err; 2783 - } 2784 - err = btusb_mtk_id_get(data, 0x80021004, &fw_version); 2785 - if (err < 0) { 2786 - bt_dev_err(hdev, "Failed to get fw version (%d)", err); 2787 - return err; 2788 - } 2789 - err = btusb_mtk_id_get(data, 0x70010020, &fw_flavor); 2790 - if (err < 0) { 2791 - bt_dev_err(hdev, "Failed to get fw flavor (%d)", err); 2792 - return err; 2793 - } 2794 - fw_flavor = (fw_flavor & 0x00000080) >> 7; 2795 - } 2796 - 2797 - mediatek = hci_get_priv(hdev); 2798 - mediatek->dev_id = dev_id; 2799 - mediatek->reset_sync = btusb_mtk_reset; 2800 - 2801 - err = btmtk_register_coredump(hdev, btusb_driver.name, fw_version); 2802 - if (err < 0) 2803 - bt_dev_err(hdev, "Failed to register coredump (%d)", err); 2804 - 2805 - switch (dev_id) { 2806 - case 0x7663: 2807 - fwname = FIRMWARE_MT7663; 2808 - break; 2809 - case 0x7668: 2810 - fwname = FIRMWARE_MT7668; 2811 - break; 2812 - case 0x7922: 2813 - case 0x7961: 2814 - case 0x7925: 2815 - if (dev_id == 0x7925) 2816 - snprintf(fw_bin_name, sizeof(fw_bin_name), 2817 - "mediatek/mt%04x/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", 2818 - dev_id & 0xffff, dev_id & 0xffff, (fw_version & 0xff) + 1); 2819 - else if (dev_id == 0x7961 && fw_flavor) 2820 - snprintf(fw_bin_name, sizeof(fw_bin_name), 2821 - "mediatek/BT_RAM_CODE_MT%04x_1a_%x_hdr.bin", 2822 - dev_id & 0xffff, (fw_version & 0xff) + 1); 2823 - else 2824 - snprintf(fw_bin_name, sizeof(fw_bin_name), 2825 - "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", 2826 - dev_id & 0xffff, (fw_version & 0xff) + 1); 2827 - 2828 - err = btmtk_setup_firmware_79xx(hdev, fw_bin_name, 2829 - btusb_mtk_hci_wmt_sync); 2830 - if (err < 0) { 2831 - bt_dev_err(hdev, "Failed to set up firmware (%d)", err); 2832 - return err; 2833 - } 2834 - 2835 - /* It's Device EndPoint Reset Option Register */ 2836 - btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT); 2837 - 2838 - /* Enable Bluetooth protocol */ 2839 - param = 1; 2840 - wmt_params.op = BTMTK_WMT_FUNC_CTRL; 2841 - wmt_params.flag = 0; 2842 - wmt_params.dlen = sizeof(param); 2843 - wmt_params.data = &param; 2844 - wmt_params.status = NULL; 2845 - 2846 - err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); 2847 - if (err < 0) { 2848 - bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); 2849 - return err; 2850 - } 2851 - 2852 - hci_set_msft_opcode(hdev, 0xFD30); 2853 - hci_set_aosp_capable(hdev); 2854 - goto done; 2855 - default: 2856 - bt_dev_err(hdev, "Unsupported hardware variant (%08x)", 2857 - dev_id); 2858 - return -ENODEV; 2859 - } 2860 - 2861 - /* Query whether the firmware is already download */ 2862 - wmt_params.op = BTMTK_WMT_SEMAPHORE; 2863 - wmt_params.flag = 1; 2864 - wmt_params.dlen = 0; 2865 - wmt_params.data = NULL; 2866 - wmt_params.status = &status; 2867 - 2868 - err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); 2869 - if (err < 0) { 2870 - bt_dev_err(hdev, "Failed to query firmware status (%d)", err); 2871 - return err; 2872 - } 2873 - 2874 - if (status == BTMTK_WMT_PATCH_DONE) { 2875 - bt_dev_info(hdev, "firmware already downloaded"); 2876 - goto ignore_setup_fw; 2877 - } 2878 - 2879 - /* Setup a firmware which the device definitely requires */ 2880 - err = btmtk_setup_firmware(hdev, fwname, 2881 - btusb_mtk_hci_wmt_sync); 2882 - if (err < 0) 2883 - return err; 2884 - 2885 - ignore_setup_fw: 2886 - err = readx_poll_timeout(btusb_mtk_func_query, hdev, status, 2887 - status < 0 || status != BTMTK_WMT_ON_PROGRESS, 2888 - 2000, 5000000); 2889 - /* -ETIMEDOUT happens */ 2890 - if (err < 0) 2891 - return err; 2892 - 2893 - /* The other errors happen in btusb_mtk_func_query */ 2894 - if (status < 0) 2895 - return status; 2896 - 2897 - if (status == BTMTK_WMT_ON_DONE) { 2898 - bt_dev_info(hdev, "function already on"); 2899 - goto ignore_func_on; 2900 - } 2901 - 2902 - /* Enable Bluetooth protocol */ 2903 - param = 1; 2904 - wmt_params.op = BTMTK_WMT_FUNC_CTRL; 2905 - wmt_params.flag = 0; 2906 - wmt_params.dlen = sizeof(param); 2907 - wmt_params.data = &param; 2908 - wmt_params.status = NULL; 2909 - 2910 - err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); 2911 - if (err < 0) { 2912 - bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); 2913 - return err; 2914 - } 2915 - 2916 - ignore_func_on: 2917 - /* Apply the low power environment setup */ 2918 - tci_sleep.mode = 0x5; 2919 - tci_sleep.duration = cpu_to_le16(0x640); 2920 - tci_sleep.host_duration = cpu_to_le16(0x640); 2921 - tci_sleep.host_wakeup_pin = 0; 2922 - tci_sleep.time_compensation = 0; 2923 - 2924 - skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep, 2925 - HCI_INIT_TIMEOUT); 2926 - if (IS_ERR(skb)) { 2927 - err = PTR_ERR(skb); 2928 - bt_dev_err(hdev, "Failed to apply low power setting (%d)", err); 2929 - return err; 2930 - } 2931 - kfree_skb(skb); 2932 - 2933 - done: 2934 - rettime = ktime_get(); 2935 - delta = ktime_sub(rettime, calltime); 2936 - duration = (unsigned long long)ktime_to_ns(delta) >> 10; 2937 - 2938 - bt_dev_info(hdev, "Device setup in %llu usecs", duration); 2939 - 2940 - return 0; 3103 + return btmtk_usb_setup(hdev); 2941 3104 } 2942 3105 2943 3106 static int btusb_mtk_shutdown(struct hci_dev *hdev) 2944 3107 { 2945 - struct btmtk_hci_wmt_params wmt_params; 2946 - u8 param = 0; 2947 - int err; 2948 - 2949 - /* Disable the device */ 2950 - wmt_params.op = BTMTK_WMT_FUNC_CTRL; 2951 - wmt_params.flag = 0; 2952 - wmt_params.dlen = sizeof(param); 2953 - wmt_params.data = &param; 2954 - wmt_params.status = NULL; 2955 - 2956 - err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); 2957 - if (err < 0) { 2958 - bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); 2959 - return err; 2960 - } 2961 - 2962 - return 0; 2963 - } 2964 - 2965 - static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb) 2966 - { 2967 3108 struct btusb_data *data = hci_get_drvdata(hdev); 2968 - u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle); 3109 + struct btmtk_data *btmtk_data = hci_get_priv(hdev); 2969 3110 2970 - switch (handle) { 2971 - case 0xfc6f: /* Firmware dump from device */ 2972 - /* When the firmware hangs, the device can no longer 2973 - * suspend and thus disable auto-suspend. 2974 - */ 2975 - usb_disable_autosuspend(data->udev); 3111 + if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags)) 3112 + btusb_mtk_release_iso_intf(data); 2976 3113 2977 - /* We need to forward the diagnostic packet to userspace daemon 2978 - * for backward compatibility, so we have to clone the packet 2979 - * extraly for the in-kernel coredump support. 2980 - */ 2981 - if (IS_ENABLED(CONFIG_DEV_COREDUMP)) { 2982 - struct sk_buff *skb_cd = skb_clone(skb, GFP_ATOMIC); 2983 - 2984 - if (skb_cd) 2985 - btmtk_process_coredump(hdev, skb_cd); 2986 - } 2987 - 2988 - fallthrough; 2989 - case 0x05ff: /* Firmware debug logging 1 */ 2990 - case 0x05fe: /* Firmware debug logging 2 */ 2991 - return hci_recv_diag(hdev, skb); 2992 - } 2993 - 2994 - return hci_recv_frame(hdev, skb); 3114 + return btmtk_usb_shutdown(hdev); 2995 3115 } 2996 3116 2997 3117 #ifdef CONFIG_PM ··· 3759 4347 data->recv_event = btusb_recv_event_realtek; 3760 4348 } else if (id->driver_info & BTUSB_MEDIATEK) { 3761 4349 /* Allocate extra space for Mediatek device */ 3762 - priv_size += sizeof(struct btmediatek_data); 4350 + priv_size += sizeof(struct btmtk_data); 3763 4351 } 3764 4352 3765 4353 data->recv_acl = hci_recv_frame; ··· 3863 4451 hdev->manufacturer = 70; 3864 4452 hdev->cmd_timeout = btmtk_reset_sync; 3865 4453 hdev->set_bdaddr = btmtk_set_bdaddr; 4454 + hdev->send = btusb_send_frame_mtk; 3866 4455 set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks); 3867 4456 set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); 3868 - data->recv_acl = btusb_recv_acl_mtk; 4457 + data->recv_acl = btmtk_usb_recv_acl; 4458 + data->suspend = btmtk_usb_suspend; 4459 + data->resume = btmtk_usb_resume; 3869 4460 } 3870 4461 3871 4462 if (id->driver_info & BTUSB_SWAVE) { ··· 4109 4694 4110 4695 cancel_work_sync(&data->work); 4111 4696 4697 + if (data->suspend) 4698 + data->suspend(data->hdev); 4699 + 4112 4700 btusb_stop_traffic(data); 4113 4701 usb_kill_anchored_urbs(&data->tx_anchor); 4114 4702 ··· 4214 4796 else 4215 4797 btusb_submit_isoc_urb(hdev, GFP_NOIO); 4216 4798 } 4799 + 4800 + if (data->resume) 4801 + data->resume(hdev); 4217 4802 4218 4803 spin_lock_irq(&data->txlock); 4219 4804 play_deferred(data);
+50 -16
drivers/bluetooth/hci_bcm4377.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only OR MIT 2 2 /* 3 - * Bluetooth HCI driver for Broadcom 4377/4378/4387 devices attached via PCIe 3 + * Bluetooth HCI driver for Broadcom 4377/4378/4387/4388 devices attached via PCIe 4 4 * 5 5 * Copyright (C) The Asahi Linux Contributors 6 6 */ ··· 26 26 BCM4377 = 0, 27 27 BCM4378, 28 28 BCM4387, 29 + BCM4388, 29 30 }; 30 31 31 32 #define BCM4377_DEVICE_ID 0x5fa0 32 33 #define BCM4378_DEVICE_ID 0x5f69 33 34 #define BCM4387_DEVICE_ID 0x5f71 35 + #define BCM4388_DEVICE_ID 0x5f72 34 36 35 - #define BCM4377_TIMEOUT 1000 37 + #define BCM4377_TIMEOUT msecs_to_jiffies(1000) 38 + #define BCM4377_BOOT_TIMEOUT msecs_to_jiffies(5000) 36 39 37 40 /* 38 41 * These devices only support DMA transactions inside a 32bit window ··· 490 487 * second window in BAR0 491 488 * has_bar0_core2_window2: Set to true if this chip requires the second core's 492 489 * second window to be configured 490 + * bar2_offset: Offset to the start of the variables in BAR2 493 491 * clear_pciecfg_subsystem_ctrl_bit19: Set to true if bit 19 in the 494 492 * vendor-specific subsystem control 495 493 * register has to be cleared ··· 514 510 u32 bar0_window1; 515 511 u32 bar0_window2; 516 512 u32 bar0_core2_window2; 513 + u32 bar2_offset; 517 514 518 515 unsigned long has_bar0_core2_window2 : 1; 519 516 unsigned long clear_pciecfg_subsystem_ctrl_bit19 : 1; ··· 840 835 struct bcm4377_data *bcm4377 = data; 841 836 u32 bootstage, rti_status; 842 837 843 - bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE); 844 - rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS); 838 + bootstage = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_BOOTSTAGE); 839 + rti_status = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_STATUS); 845 840 846 841 if (bootstage != bcm4377->bootstage || 847 842 rti_status != bcm4377->rti_status) { ··· 1199 1194 return __bcm4378_send_calibration(bcm4377, 1200 1195 bcm4377->taurus_cal_blob, 1201 1196 bcm4377->taurus_cal_size); 1197 + } 1198 + 1199 + static int bcm4388_send_calibration(struct bcm4377_data *bcm4377) 1200 + { 1201 + /* BCM4388 always uses beamforming */ 1202 + return __bcm4378_send_calibration( 1203 + bcm4377, bcm4377->taurus_beamforming_cal_blob, 1204 + bcm4377->taurus_beamforming_cal_size); 1202 1205 } 1203 1206 1204 1207 static const struct firmware *bcm4377_request_blob(struct bcm4377_data *bcm4377, ··· 1832 1819 int ret = 0; 1833 1820 u32 bootstage, rti_status; 1834 1821 1835 - bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE); 1836 - rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS); 1822 + bootstage = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_BOOTSTAGE); 1823 + rti_status = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_STATUS); 1837 1824 1838 1825 if (bootstage != 0) { 1839 1826 dev_err(&bcm4377->pdev->dev, "bootstage is %d and not 0\n", ··· 1867 1854 iowrite32(BCM4377_DMA_MASK, 1868 1855 bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_SIZE); 1869 1856 1870 - iowrite32(lower_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_LO); 1871 - iowrite32(upper_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_HI); 1872 - iowrite32(fw->size, bcm4377->bar2 + BCM4377_BAR2_FW_SIZE); 1857 + iowrite32(lower_32_bits(fw_dma), 1858 + bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_LO); 1859 + iowrite32(upper_32_bits(fw_dma), 1860 + bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_HI); 1861 + iowrite32(fw->size, 1862 + bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_SIZE); 1873 1863 iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_FW_DOORBELL); 1874 1864 1875 1865 dev_dbg(&bcm4377->pdev->dev, "waiting for firmware to boot\n"); 1876 1866 1877 1867 ret = wait_for_completion_interruptible_timeout(&bcm4377->event, 1878 - BCM4377_TIMEOUT); 1868 + BCM4377_BOOT_TIMEOUT); 1879 1869 if (ret == 0) { 1880 1870 ret = -ETIMEDOUT; 1881 1871 goto out_dma_free; ··· 1929 1913 dev_dbg(&bcm4377->pdev->dev, "RTI is in state 1\n"); 1930 1914 1931 1915 /* allow access to the entire IOVA space again */ 1932 - iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_LO); 1933 - iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_HI); 1916 + iowrite32(0, bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_LO); 1917 + iowrite32(0, bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_HI); 1934 1918 iowrite32(BCM4377_DMA_MASK, 1935 - bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_SIZE); 1919 + bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_SIZE); 1936 1920 1937 1921 /* setup "Converged IPC" context */ 1938 1922 iowrite32(lower_32_bits(bcm4377->ctx_dma), 1939 - bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_LO); 1923 + bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_CONTEXT_ADDR_LO); 1940 1924 iowrite32(upper_32_bits(bcm4377->ctx_dma), 1941 - bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_HI); 1925 + bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_CONTEXT_ADDR_HI); 1942 1926 iowrite32(2, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL); 1943 1927 1944 1928 ret = wait_for_completion_interruptible_timeout(&bcm4377->event, ··· 2504 2488 .send_calibration = bcm4387_send_calibration, 2505 2489 .send_ptb = bcm4378_send_ptb, 2506 2490 }, 2491 + 2492 + [BCM4388] = { 2493 + .id = 0x4388, 2494 + .otp_offset = 0x415c, 2495 + .bar2_offset = 0x200000, 2496 + .bar0_window1 = 0x18002000, 2497 + .bar0_window2 = 0x18109000, 2498 + .bar0_core2_window2 = 0x18106000, 2499 + .has_bar0_core2_window2 = true, 2500 + .broken_mws_transport_config = true, 2501 + .broken_le_coded = true, 2502 + .broken_le_ext_adv_report_phy = true, 2503 + .send_calibration = bcm4388_send_calibration, 2504 + .send_ptb = bcm4378_send_ptb, 2505 + }, 2507 2506 }; 2508 2507 2509 2508 #define BCM4377_DEVID_ENTRY(id) \ ··· 2532 2501 BCM4377_DEVID_ENTRY(4377), 2533 2502 BCM4377_DEVID_ENTRY(4378), 2534 2503 BCM4377_DEVID_ENTRY(4387), 2504 + BCM4377_DEVID_ENTRY(4388), 2535 2505 {}, 2536 2506 }; 2537 2507 MODULE_DEVICE_TABLE(pci, bcm4377_devid_table); ··· 2547 2515 module_pci_driver(bcm4377_pci_driver); 2548 2516 2549 2517 MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>"); 2550 - MODULE_DESCRIPTION("Bluetooth support for Broadcom 4377/4378/4387 devices"); 2518 + MODULE_DESCRIPTION("Bluetooth support for Broadcom 4377/4378/4387/4388 devices"); 2551 2519 MODULE_LICENSE("Dual MIT/GPL"); 2552 2520 MODULE_FIRMWARE("brcm/brcmbt4377*.bin"); 2553 2521 MODULE_FIRMWARE("brcm/brcmbt4377*.ptb"); ··· 2555 2523 MODULE_FIRMWARE("brcm/brcmbt4378*.ptb"); 2556 2524 MODULE_FIRMWARE("brcm/brcmbt4387*.bin"); 2557 2525 MODULE_FIRMWARE("brcm/brcmbt4387*.ptb"); 2526 + MODULE_FIRMWARE("brcm/brcmbt4388*.bin"); 2527 + MODULE_FIRMWARE("brcm/brcmbt4388*.ptb");
+1 -1
drivers/bluetooth/hci_ldisc.c
··· 488 488 if (tty->ops->write == NULL) 489 489 return -EOPNOTSUPP; 490 490 491 - hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL); 491 + hu = kzalloc(sizeof(*hu), GFP_KERNEL); 492 492 if (!hu) { 493 493 BT_ERR("Can't allocate control structure"); 494 494 return -ENFILE;
-5
drivers/bluetooth/hci_nokia.c
··· 116 116 #define SETUP_BAUD_RATE 921600 117 117 #define INIT_BAUD_RATE 120000 118 118 119 - struct hci_nokia_radio_hdr { 120 - u8 evt; 121 - u8 dlen; 122 - } __packed; 123 - 124 119 struct nokia_bt_dev { 125 120 struct hci_uart hu; 126 121 struct serdev_device *serdev;
+98 -35
drivers/bluetooth/hci_qca.c
··· 28 28 #include <linux/of.h> 29 29 #include <linux/acpi.h> 30 30 #include <linux/platform_device.h> 31 + #include <linux/pwrseq/consumer.h> 31 32 #include <linux/regulator/consumer.h> 32 33 #include <linux/serdev.h> 33 34 #include <linux/mutex.h> ··· 215 214 struct regulator_bulk_data *vreg_bulk; 216 215 int num_vregs; 217 216 bool vregs_on; 217 + struct pwrseq_desc *pwrseq; 218 218 }; 219 219 220 220 struct qca_serdev { ··· 571 569 if (!hci_uart_has_flow_control(hu)) 572 570 return -EOPNOTSUPP; 573 571 574 - qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); 572 + qca = kzalloc(sizeof(*qca), GFP_KERNEL); 575 573 if (!qca) 576 574 return -ENOMEM; 577 575 ··· 1042 1040 } 1043 1041 1044 1042 if (!qca_memdump) { 1045 - qca_memdump = kzalloc(sizeof(struct qca_memdump_info), 1046 - GFP_ATOMIC); 1043 + qca_memdump = kzalloc(sizeof(*qca_memdump), GFP_ATOMIC); 1047 1044 if (!qca_memdump) { 1048 1045 mutex_unlock(&qca->hci_memdump_lock); 1049 1046 return; ··· 1686 1685 return wakeup; 1687 1686 } 1688 1687 1688 + static int qca_port_reopen(struct hci_uart *hu) 1689 + { 1690 + int ret; 1691 + 1692 + /* Now the device is in ready state to communicate with host. 1693 + * To sync host with device we need to reopen port. 1694 + * Without this, we will have RTS and CTS synchronization 1695 + * issues. 1696 + */ 1697 + serdev_device_close(hu->serdev); 1698 + ret = serdev_device_open(hu->serdev); 1699 + if (ret) { 1700 + bt_dev_err(hu->hdev, "failed to open port"); 1701 + return ret; 1702 + } 1703 + 1704 + hci_uart_set_flow_control(hu, false); 1705 + 1706 + return 0; 1707 + } 1708 + 1689 1709 static int qca_regulator_init(struct hci_uart *hu) 1690 1710 { 1691 1711 enum qca_btsoc_type soc_type = qca_soc_type(hu); ··· 1718 1696 * off the voltage regulator. 1719 1697 */ 1720 1698 qcadev = serdev_device_get_drvdata(hu->serdev); 1699 + 1721 1700 if (!qcadev->bt_power->vregs_on) { 1722 1701 serdev_device_close(hu->serdev); 1723 1702 ret = qca_regulator_enable(qcadev); ··· 1776 1753 break; 1777 1754 } 1778 1755 1779 - /* Now the device is in ready state to communicate with host. 1780 - * To sync host with device we need to reopen port. 1781 - * Without this, we will have RTS and CTS synchronization 1782 - * issues. 1783 - */ 1784 - serdev_device_close(hu->serdev); 1785 - ret = serdev_device_open(hu->serdev); 1786 - if (ret) { 1787 - bt_dev_err(hu->hdev, "failed to open port"); 1788 - return ret; 1789 - } 1790 - 1791 - hci_uart_set_flow_control(hu, false); 1792 - 1793 - return 0; 1756 + return qca_port_reopen(hu); 1794 1757 } 1795 1758 1796 1759 static int qca_power_on(struct hci_dev *hdev) ··· 1801 1792 case QCA_WCN6750: 1802 1793 case QCA_WCN6855: 1803 1794 case QCA_WCN7850: 1795 + case QCA_QCA6390: 1804 1796 ret = qca_regulator_init(hu); 1805 1797 break; 1806 1798 ··· 2140 2130 unsigned long flags; 2141 2131 enum qca_btsoc_type soc_type = qca_soc_type(hu); 2142 2132 bool sw_ctrl_state; 2133 + struct qca_power *power; 2143 2134 2144 2135 /* From this point we go into power off state. But serial port is 2145 2136 * still open, stop queueing the IBS data and flush all the buffered ··· 2158 2147 return; 2159 2148 2160 2149 qcadev = serdev_device_get_drvdata(hu->serdev); 2150 + power = qcadev->bt_power; 2151 + 2152 + if (power->pwrseq) { 2153 + pwrseq_power_off(power->pwrseq); 2154 + set_bit(QCA_BT_OFF, &qca->flags); 2155 + return; 2156 + } 2161 2157 2162 2158 switch (soc_type) { 2163 2159 case QCA_WCN3988: ··· 2185 2167 sw_ctrl_state = gpiod_get_value_cansleep(qcadev->sw_ctrl); 2186 2168 bt_dev_dbg(hu->hdev, "SW_CTRL is %d", sw_ctrl_state); 2187 2169 } 2170 + break; 2171 + 2172 + case QCA_QCA6390: 2173 + pwrseq_power_off(qcadev->bt_power->pwrseq); 2188 2174 break; 2189 2175 2190 2176 default: ··· 2225 2203 { 2226 2204 struct qca_power *power = qcadev->bt_power; 2227 2205 int ret; 2206 + 2207 + if (power->pwrseq) 2208 + return pwrseq_power_on(power->pwrseq); 2228 2209 2229 2210 /* Already enabled */ 2230 2211 if (power->vregs_on) ··· 2297 2272 return 0; 2298 2273 } 2299 2274 2275 + static void qca_clk_disable_unprepare(void *data) 2276 + { 2277 + struct clk *clk = data; 2278 + 2279 + clk_disable_unprepare(clk); 2280 + } 2281 + 2300 2282 static int qca_serdev_probe(struct serdev_device *serdev) 2301 2283 { 2302 2284 struct qca_serdev *qcadev; ··· 2342 2310 case QCA_WCN6750: 2343 2311 case QCA_WCN6855: 2344 2312 case QCA_WCN7850: 2313 + case QCA_QCA6390: 2345 2314 qcadev->bt_power = devm_kzalloc(&serdev->dev, 2346 2315 sizeof(struct qca_power), 2347 2316 GFP_KERNEL); 2348 2317 if (!qcadev->bt_power) 2349 2318 return -ENOMEM; 2319 + break; 2320 + default: 2321 + break; 2322 + } 2350 2323 2324 + switch (qcadev->btsoc_type) { 2325 + case QCA_WCN6855: 2326 + case QCA_WCN7850: 2327 + if (!device_property_present(&serdev->dev, "enable-gpios")) { 2328 + /* 2329 + * Backward compatibility with old DT sources. If the 2330 + * node doesn't have the 'enable-gpios' property then 2331 + * let's use the power sequencer. Otherwise, let's 2332 + * drive everything outselves. 2333 + */ 2334 + qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev, 2335 + "bluetooth"); 2336 + if (IS_ERR(qcadev->bt_power->pwrseq)) 2337 + return PTR_ERR(qcadev->bt_power->pwrseq); 2338 + 2339 + break; 2340 + } 2341 + fallthrough; 2342 + case QCA_WCN3988: 2343 + case QCA_WCN3990: 2344 + case QCA_WCN3991: 2345 + case QCA_WCN3998: 2346 + case QCA_WCN6750: 2351 2347 qcadev->bt_power->dev = &serdev->dev; 2352 2348 err = qca_init_regulators(qcadev->bt_power, data->vregs, 2353 2349 data->num_vregs); ··· 2413 2353 dev_err(&serdev->dev, "failed to acquire clk\n"); 2414 2354 return PTR_ERR(qcadev->susclk); 2415 2355 } 2356 + break; 2416 2357 2417 - err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); 2418 - if (err) { 2419 - BT_ERR("wcn3990 serdev registration failed"); 2420 - return err; 2421 - } 2358 + case QCA_QCA6390: 2359 + qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev, 2360 + "bluetooth"); 2361 + if (IS_ERR(qcadev->bt_power->pwrseq)) 2362 + return PTR_ERR(qcadev->bt_power->pwrseq); 2422 2363 break; 2423 2364 2424 2365 default: ··· 2446 2385 if (err) 2447 2386 return err; 2448 2387 2449 - err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); 2450 - if (err) { 2451 - BT_ERR("Rome serdev registration failed"); 2452 - clk_disable_unprepare(qcadev->susclk); 2388 + err = devm_add_action_or_reset(&serdev->dev, 2389 + qca_clk_disable_unprepare, 2390 + qcadev->susclk); 2391 + if (err) 2453 2392 return err; 2454 - } 2393 + 2394 + } 2395 + 2396 + err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); 2397 + if (err) { 2398 + BT_ERR("serdev registration failed"); 2399 + return err; 2455 2400 } 2456 2401 2457 2402 hdev = qcadev->serdev_hu.hdev; ··· 2495 2428 case QCA_WCN6750: 2496 2429 case QCA_WCN6855: 2497 2430 case QCA_WCN7850: 2498 - if (power->vregs_on) { 2431 + if (power->vregs_on) 2499 2432 qca_power_shutdown(&qcadev->serdev_hu); 2500 - break; 2501 - } 2502 - fallthrough; 2503 - 2433 + break; 2504 2434 default: 2505 - if (qcadev->susclk) 2506 - clk_disable_unprepare(qcadev->susclk); 2435 + break; 2507 2436 } 2508 2437 2509 2438 hci_uart_unregister_device(&qcadev->serdev_hu);
+1 -1
drivers/bluetooth/hci_vhci.c
··· 633 633 { 634 634 struct vhci_data *data; 635 635 636 - data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL); 636 + data = kzalloc(sizeof(*data), GFP_KERNEL); 637 637 if (!data) 638 638 return -ENOMEM; 639 639
+1
drivers/power/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 source "drivers/power/reset/Kconfig" 3 + source "drivers/power/sequencing/Kconfig" 3 4 source "drivers/power/supply/Kconfig"
+1
drivers/power/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 obj-$(CONFIG_POWER_RESET) += reset/ 3 + obj-$(CONFIG_POWER_SEQUENCING) += sequencing/ 3 4 obj-$(CONFIG_POWER_SUPPLY) += supply/
+29
drivers/power/sequencing/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + 3 + menuconfig POWER_SEQUENCING 4 + tristate "Power Sequencing support" 5 + help 6 + Say Y here to enable the Power Sequencing subsystem. 7 + 8 + This subsystem is designed to control power to devices that share 9 + complex resources and/or require specific power sequences to be run 10 + during power-up. 11 + 12 + If unsure, say no. 13 + 14 + if POWER_SEQUENCING 15 + 16 + config POWER_SEQUENCING_QCOM_WCN 17 + tristate "Qualcomm WCN family PMU driver" 18 + default m if ARCH_QCOM 19 + help 20 + Say Y here to enable the power sequencing driver for Qualcomm 21 + WCN Bluetooth/WLAN chipsets. 22 + 23 + Typically, a package from the Qualcomm WCN family contains the BT 24 + and WLAN modules whose power is controlled by the PMU module. As the 25 + former two share the power-up sequence which is executed by the PMU, 26 + this driver is needed for correct power control or else we'd risk not 27 + respecting the required delays between enabling Bluetooth and WLAN. 28 + 29 + endif
+6
drivers/power/sequencing/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + obj-$(CONFIG_POWER_SEQUENCING) += pwrseq-core.o 4 + pwrseq-core-y := core.o 5 + 6 + obj-$(CONFIG_POWER_SEQUENCING_QCOM_WCN) += pwrseq-qcom-wcn.o
+1105
drivers/power/sequencing/core.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (C) 2024 Linaro Ltd. 4 + */ 5 + 6 + #include <linux/bug.h> 7 + #include <linux/cleanup.h> 8 + #include <linux/debugfs.h> 9 + #include <linux/device.h> 10 + #include <linux/err.h> 11 + #include <linux/export.h> 12 + #include <linux/idr.h> 13 + #include <linux/kernel.h> 14 + #include <linux/kref.h> 15 + #include <linux/list.h> 16 + #include <linux/lockdep.h> 17 + #include <linux/module.h> 18 + #include <linux/mutex.h> 19 + #include <linux/property.h> 20 + #include <linux/pwrseq/consumer.h> 21 + #include <linux/pwrseq/provider.h> 22 + #include <linux/radix-tree.h> 23 + #include <linux/rwsem.h> 24 + #include <linux/slab.h> 25 + 26 + /* 27 + * Power-sequencing framework for linux. 28 + * 29 + * This subsystem allows power sequence providers to register a set of targets 30 + * that consumers may request and power-up/down. 31 + * 32 + * Glossary: 33 + * 34 + * Unit - a unit is a discreet chunk of a power sequence. For instance one unit 35 + * may enable a set of regulators, another may enable a specific GPIO. Units 36 + * can define dependencies in the form of other units that must be enabled 37 + * before it itself can be. 38 + * 39 + * Target - a target is a set of units (composed of the "final" unit and its 40 + * dependencies) that a consumer selects by its name when requesting a handle 41 + * to the power sequencer. Via the dependency system, multiple targets may 42 + * share the same parts of a power sequence but ignore parts that are 43 + * irrelevant. 44 + * 45 + * Descriptor - a handle passed by the pwrseq core to every consumer that 46 + * serves as the entry point to the provider layer. It ensures coherence 47 + * between different users and keeps reference counting consistent. 48 + * 49 + * Each provider must define a .match() callback whose role is to determine 50 + * whether a potential consumer is in fact associated with this sequencer. 51 + * This allows creating abstraction layers on top of regular device-tree 52 + * resources like regulators, clocks and other nodes connected to the consumer 53 + * via phandle. 54 + */ 55 + 56 + static DEFINE_IDA(pwrseq_ida); 57 + 58 + /* 59 + * Protects the device list on the pwrseq bus from concurrent modifications 60 + * but allows simultaneous read-only access. 61 + */ 62 + static DECLARE_RWSEM(pwrseq_sem); 63 + 64 + /** 65 + * struct pwrseq_unit - Private power-sequence unit data. 66 + * @ref: Reference count for this object. When it goes to 0, the object is 67 + * destroyed. 68 + * @name: Name of this target. 69 + * @list: Link to siblings on the list of all units of a single sequencer. 70 + * @deps: List of units on which this unit depends. 71 + * @enable: Callback running the part of the power-on sequence provided by 72 + * this unit. 73 + * @disable: Callback running the part of the power-off sequence provided 74 + * by this unit. 75 + * @enable_count: Current number of users that enabled this unit. May be the 76 + * consumer of the power sequencer or other units that depend 77 + * on this one. 78 + */ 79 + struct pwrseq_unit { 80 + struct kref ref; 81 + const char *name; 82 + struct list_head list; 83 + struct list_head deps; 84 + pwrseq_power_state_func enable; 85 + pwrseq_power_state_func disable; 86 + unsigned int enable_count; 87 + }; 88 + 89 + static struct pwrseq_unit *pwrseq_unit_new(const struct pwrseq_unit_data *data) 90 + { 91 + struct pwrseq_unit *unit; 92 + 93 + unit = kzalloc(sizeof(*unit), GFP_KERNEL); 94 + if (!unit) 95 + return NULL; 96 + 97 + unit->name = kstrdup_const(data->name, GFP_KERNEL); 98 + if (!unit->name) { 99 + kfree(unit); 100 + return NULL; 101 + } 102 + 103 + kref_init(&unit->ref); 104 + INIT_LIST_HEAD(&unit->deps); 105 + unit->enable = data->enable; 106 + unit->disable = data->disable; 107 + 108 + return unit; 109 + } 110 + 111 + static struct pwrseq_unit *pwrseq_unit_get(struct pwrseq_unit *unit) 112 + { 113 + kref_get(&unit->ref); 114 + 115 + return unit; 116 + } 117 + 118 + static void pwrseq_unit_release(struct kref *ref); 119 + 120 + static void pwrseq_unit_put(struct pwrseq_unit *unit) 121 + { 122 + kref_put(&unit->ref, pwrseq_unit_release); 123 + } 124 + 125 + /** 126 + * struct pwrseq_unit_dep - Wrapper around a reference to the unit structure 127 + * allowing to keep it on multiple dependency lists 128 + * in different units. 129 + * @list: Siblings on the list. 130 + * @unit: Address of the referenced unit. 131 + */ 132 + struct pwrseq_unit_dep { 133 + struct list_head list; 134 + struct pwrseq_unit *unit; 135 + }; 136 + 137 + static struct pwrseq_unit_dep *pwrseq_unit_dep_new(struct pwrseq_unit *unit) 138 + { 139 + struct pwrseq_unit_dep *dep; 140 + 141 + dep = kzalloc(sizeof(*dep), GFP_KERNEL); 142 + if (!dep) 143 + return NULL; 144 + 145 + dep->unit = unit; 146 + 147 + return dep; 148 + } 149 + 150 + static void pwrseq_unit_dep_free(struct pwrseq_unit_dep *ref) 151 + { 152 + pwrseq_unit_put(ref->unit); 153 + kfree(ref); 154 + } 155 + 156 + static void pwrseq_unit_free_deps(struct list_head *list) 157 + { 158 + struct pwrseq_unit_dep *dep, *next; 159 + 160 + list_for_each_entry_safe(dep, next, list, list) { 161 + list_del(&dep->list); 162 + pwrseq_unit_dep_free(dep); 163 + } 164 + } 165 + 166 + static void pwrseq_unit_release(struct kref *ref) 167 + { 168 + struct pwrseq_unit *unit = container_of(ref, struct pwrseq_unit, ref); 169 + 170 + pwrseq_unit_free_deps(&unit->deps); 171 + list_del(&unit->list); 172 + kfree_const(unit->name); 173 + kfree(unit); 174 + } 175 + 176 + /** 177 + * struct pwrseq_target - Private power-sequence target data. 178 + * @list: Siblings on the list of all targets exposed by a power sequencer. 179 + * @name: Name of the target. 180 + * @unit: Final unit for this target. 181 + * @post_enable: Callback run after the target unit has been enabled, *after* 182 + * the state lock has been released. It's useful for implementing 183 + * boot-up delays without blocking other users from powering up 184 + * using the same power sequencer. 185 + */ 186 + struct pwrseq_target { 187 + struct list_head list; 188 + const char *name; 189 + struct pwrseq_unit *unit; 190 + pwrseq_power_state_func post_enable; 191 + }; 192 + 193 + static struct pwrseq_target * 194 + pwrseq_target_new(const struct pwrseq_target_data *data) 195 + { 196 + struct pwrseq_target *target; 197 + 198 + target = kzalloc(sizeof(*target), GFP_KERNEL); 199 + if (!target) 200 + return NULL; 201 + 202 + target->name = kstrdup_const(data->name, GFP_KERNEL); 203 + if (!target->name) { 204 + kfree(target); 205 + return NULL; 206 + } 207 + 208 + target->post_enable = data->post_enable; 209 + 210 + return target; 211 + } 212 + 213 + static void pwrseq_target_free(struct pwrseq_target *target) 214 + { 215 + pwrseq_unit_put(target->unit); 216 + kfree_const(target->name); 217 + kfree(target); 218 + } 219 + 220 + /** 221 + * struct pwrseq_device - Private power sequencing data. 222 + * @dev: Device struct associated with this sequencer. 223 + * @id: Device ID. 224 + * @owner: Prevents removal of active power sequencing providers. 225 + * @rw_lock: Protects the device from being unregistered while in use. 226 + * @state_lock: Prevents multiple users running the power sequence at the same 227 + * time. 228 + * @match: Power sequencer matching callback. 229 + * @targets: List of targets exposed by this sequencer. 230 + * @units: List of all units supported by this sequencer. 231 + */ 232 + struct pwrseq_device { 233 + struct device dev; 234 + int id; 235 + struct module *owner; 236 + struct rw_semaphore rw_lock; 237 + struct mutex state_lock; 238 + pwrseq_match_func match; 239 + struct list_head targets; 240 + struct list_head units; 241 + }; 242 + 243 + static struct pwrseq_device *to_pwrseq_device(struct device *dev) 244 + { 245 + return container_of(dev, struct pwrseq_device, dev); 246 + } 247 + 248 + static struct pwrseq_device *pwrseq_device_get(struct pwrseq_device *pwrseq) 249 + { 250 + get_device(&pwrseq->dev); 251 + 252 + return pwrseq; 253 + } 254 + 255 + static void pwrseq_device_put(struct pwrseq_device *pwrseq) 256 + { 257 + put_device(&pwrseq->dev); 258 + } 259 + 260 + /** 261 + * struct pwrseq_desc - Wraps access to the pwrseq_device and ensures that one 262 + * user cannot break the reference counting for others. 263 + * @pwrseq: Reference to the power sequencing device. 264 + * @target: Reference to the target this descriptor allows to control. 265 + * @powered_on: Power state set by the holder of the descriptor (not necessarily 266 + * corresponding to the actual power state of the device). 267 + */ 268 + struct pwrseq_desc { 269 + struct pwrseq_device *pwrseq; 270 + struct pwrseq_target *target; 271 + bool powered_on; 272 + }; 273 + 274 + static const struct bus_type pwrseq_bus = { 275 + .name = "pwrseq", 276 + }; 277 + 278 + static void pwrseq_release(struct device *dev) 279 + { 280 + struct pwrseq_device *pwrseq = to_pwrseq_device(dev); 281 + struct pwrseq_target *target, *pos; 282 + 283 + list_for_each_entry_safe(target, pos, &pwrseq->targets, list) { 284 + list_del(&target->list); 285 + pwrseq_target_free(target); 286 + } 287 + 288 + mutex_destroy(&pwrseq->state_lock); 289 + ida_free(&pwrseq_ida, pwrseq->id); 290 + kfree(pwrseq); 291 + } 292 + 293 + static const struct device_type pwrseq_device_type = { 294 + .name = "power_sequencer", 295 + .release = pwrseq_release, 296 + }; 297 + 298 + static int pwrseq_check_unit_deps(const struct pwrseq_unit_data *data, 299 + struct radix_tree_root *visited_units) 300 + { 301 + const struct pwrseq_unit_data *tmp, **cur; 302 + int ret; 303 + 304 + ret = radix_tree_insert(visited_units, (unsigned long)data, 305 + (void *)data); 306 + if (ret) 307 + return ret; 308 + 309 + for (cur = data->deps; cur && *cur; cur++) { 310 + tmp = radix_tree_lookup(visited_units, (unsigned long)*cur); 311 + if (tmp) { 312 + WARN(1, "Circular dependency in power sequencing flow detected!\n"); 313 + return -EINVAL; 314 + } 315 + 316 + ret = pwrseq_check_unit_deps(*cur, visited_units); 317 + if (ret) 318 + return ret; 319 + } 320 + 321 + return 0; 322 + } 323 + 324 + static int pwrseq_check_target_deps(const struct pwrseq_target_data *data) 325 + { 326 + struct radix_tree_root visited_units; 327 + struct radix_tree_iter iter; 328 + void __rcu **slot; 329 + int ret; 330 + 331 + if (!data->unit) 332 + return -EINVAL; 333 + 334 + INIT_RADIX_TREE(&visited_units, GFP_KERNEL); 335 + ret = pwrseq_check_unit_deps(data->unit, &visited_units); 336 + radix_tree_for_each_slot(slot, &visited_units, &iter, 0) 337 + radix_tree_delete(&visited_units, iter.index); 338 + 339 + return ret; 340 + } 341 + 342 + static int pwrseq_unit_setup_deps(const struct pwrseq_unit_data **data, 343 + struct list_head *dep_list, 344 + struct list_head *unit_list, 345 + struct radix_tree_root *processed_units); 346 + 347 + static struct pwrseq_unit * 348 + pwrseq_unit_setup(const struct pwrseq_unit_data *data, 349 + struct list_head *unit_list, 350 + struct radix_tree_root *processed_units) 351 + { 352 + struct pwrseq_unit *unit; 353 + int ret; 354 + 355 + unit = radix_tree_lookup(processed_units, (unsigned long)data); 356 + if (unit) 357 + return pwrseq_unit_get(unit); 358 + 359 + unit = pwrseq_unit_new(data); 360 + if (!unit) 361 + return ERR_PTR(-ENOMEM); 362 + 363 + if (data->deps) { 364 + ret = pwrseq_unit_setup_deps(data->deps, &unit->deps, 365 + unit_list, processed_units); 366 + if (ret) { 367 + pwrseq_unit_put(unit); 368 + return ERR_PTR(ret); 369 + } 370 + } 371 + 372 + ret = radix_tree_insert(processed_units, (unsigned long)data, unit); 373 + if (ret) { 374 + pwrseq_unit_put(unit); 375 + return ERR_PTR(ret); 376 + } 377 + 378 + list_add_tail(&unit->list, unit_list); 379 + 380 + return unit; 381 + } 382 + 383 + static int pwrseq_unit_setup_deps(const struct pwrseq_unit_data **data, 384 + struct list_head *dep_list, 385 + struct list_head *unit_list, 386 + struct radix_tree_root *processed_units) 387 + { 388 + const struct pwrseq_unit_data *pos; 389 + struct pwrseq_unit_dep *dep; 390 + struct pwrseq_unit *unit; 391 + int i; 392 + 393 + for (i = 0; data[i]; i++) { 394 + pos = data[i]; 395 + 396 + unit = pwrseq_unit_setup(pos, unit_list, processed_units); 397 + if (IS_ERR(unit)) 398 + return PTR_ERR(unit); 399 + 400 + dep = pwrseq_unit_dep_new(unit); 401 + if (!dep) { 402 + pwrseq_unit_put(unit); 403 + return -ENOMEM; 404 + } 405 + 406 + list_add_tail(&dep->list, dep_list); 407 + } 408 + 409 + return 0; 410 + } 411 + 412 + static int pwrseq_do_setup_targets(const struct pwrseq_target_data **data, 413 + struct pwrseq_device *pwrseq, 414 + struct radix_tree_root *processed_units) 415 + { 416 + const struct pwrseq_target_data *pos; 417 + struct pwrseq_target *target; 418 + int ret, i; 419 + 420 + for (i = 0; data[i]; i++) { 421 + pos = data[i]; 422 + 423 + ret = pwrseq_check_target_deps(pos); 424 + if (ret) 425 + return ret; 426 + 427 + target = pwrseq_target_new(pos); 428 + if (!target) 429 + return -ENOMEM; 430 + 431 + target->unit = pwrseq_unit_setup(pos->unit, &pwrseq->units, 432 + processed_units); 433 + if (IS_ERR(target->unit)) { 434 + ret = PTR_ERR(target->unit); 435 + pwrseq_target_free(target); 436 + return ret; 437 + } 438 + 439 + list_add_tail(&target->list, &pwrseq->targets); 440 + } 441 + 442 + return 0; 443 + } 444 + 445 + static int pwrseq_setup_targets(const struct pwrseq_target_data **targets, 446 + struct pwrseq_device *pwrseq) 447 + { 448 + struct radix_tree_root processed_units; 449 + struct radix_tree_iter iter; 450 + void __rcu **slot; 451 + int ret; 452 + 453 + INIT_RADIX_TREE(&processed_units, GFP_KERNEL); 454 + ret = pwrseq_do_setup_targets(targets, pwrseq, &processed_units); 455 + radix_tree_for_each_slot(slot, &processed_units, &iter, 0) 456 + radix_tree_delete(&processed_units, iter.index); 457 + 458 + return ret; 459 + } 460 + 461 + /** 462 + * pwrseq_device_register() - Register a new power sequencer. 463 + * @config: Configuration of the new power sequencing device. 464 + * 465 + * The config structure is only used during the call and can be freed after 466 + * the function returns. The config structure *must* have the parent device 467 + * as well as the match() callback and at least one target set. 468 + * 469 + * Returns: 470 + * Returns the address of the new pwrseq device or ERR_PTR() on failure. 471 + */ 472 + struct pwrseq_device * 473 + pwrseq_device_register(const struct pwrseq_config *config) 474 + { 475 + struct pwrseq_device *pwrseq; 476 + int ret, id; 477 + 478 + if (!config->parent || !config->match || !config->targets || 479 + !config->targets[0]) 480 + return ERR_PTR(-EINVAL); 481 + 482 + pwrseq = kzalloc(sizeof(*pwrseq), GFP_KERNEL); 483 + if (!pwrseq) 484 + return ERR_PTR(-ENOMEM); 485 + 486 + pwrseq->dev.type = &pwrseq_device_type; 487 + pwrseq->dev.bus = &pwrseq_bus; 488 + pwrseq->dev.parent = config->parent; 489 + device_set_node(&pwrseq->dev, dev_fwnode(config->parent)); 490 + dev_set_drvdata(&pwrseq->dev, config->drvdata); 491 + 492 + id = ida_alloc(&pwrseq_ida, GFP_KERNEL); 493 + if (id < 0) { 494 + kfree(pwrseq); 495 + return ERR_PTR(id); 496 + } 497 + 498 + pwrseq->id = id; 499 + 500 + /* 501 + * From this point onwards the device's release() callback is 502 + * responsible for freeing resources. 503 + */ 504 + device_initialize(&pwrseq->dev); 505 + 506 + ret = dev_set_name(&pwrseq->dev, "pwrseq.%d", pwrseq->id); 507 + if (ret) 508 + goto err_put_pwrseq; 509 + 510 + pwrseq->owner = config->owner ?: THIS_MODULE; 511 + pwrseq->match = config->match; 512 + 513 + init_rwsem(&pwrseq->rw_lock); 514 + mutex_init(&pwrseq->state_lock); 515 + INIT_LIST_HEAD(&pwrseq->targets); 516 + INIT_LIST_HEAD(&pwrseq->units); 517 + 518 + ret = pwrseq_setup_targets(config->targets, pwrseq); 519 + if (ret) 520 + goto err_put_pwrseq; 521 + 522 + scoped_guard(rwsem_write, &pwrseq_sem) { 523 + ret = device_add(&pwrseq->dev); 524 + if (ret) 525 + goto err_put_pwrseq; 526 + } 527 + 528 + return pwrseq; 529 + 530 + err_put_pwrseq: 531 + pwrseq_device_put(pwrseq); 532 + return ERR_PTR(ret); 533 + } 534 + EXPORT_SYMBOL_GPL(pwrseq_device_register); 535 + 536 + /** 537 + * pwrseq_device_unregister() - Unregister the power sequencer. 538 + * @pwrseq: Power sequencer to unregister. 539 + */ 540 + void pwrseq_device_unregister(struct pwrseq_device *pwrseq) 541 + { 542 + struct device *dev = &pwrseq->dev; 543 + struct pwrseq_target *target; 544 + 545 + scoped_guard(mutex, &pwrseq->state_lock) { 546 + guard(rwsem_write)(&pwrseq->rw_lock); 547 + 548 + list_for_each_entry(target, &pwrseq->targets, list) 549 + WARN(target->unit->enable_count, 550 + "REMOVING POWER SEQUENCER WITH ACTIVE USERS\n"); 551 + 552 + guard(rwsem_write)(&pwrseq_sem); 553 + 554 + device_del(dev); 555 + } 556 + 557 + pwrseq_device_put(pwrseq); 558 + } 559 + EXPORT_SYMBOL_GPL(pwrseq_device_unregister); 560 + 561 + static void devm_pwrseq_device_unregister(void *data) 562 + { 563 + struct pwrseq_device *pwrseq = data; 564 + 565 + pwrseq_device_unregister(pwrseq); 566 + } 567 + 568 + /** 569 + * devm_pwrseq_device_register() - Managed variant of pwrseq_device_register(). 570 + * @dev: Managing device. 571 + * @config: Configuration of the new power sequencing device. 572 + * 573 + * Returns: 574 + * Returns the address of the new pwrseq device or ERR_PTR() on failure. 575 + */ 576 + struct pwrseq_device * 577 + devm_pwrseq_device_register(struct device *dev, 578 + const struct pwrseq_config *config) 579 + { 580 + struct pwrseq_device *pwrseq; 581 + int ret; 582 + 583 + pwrseq = pwrseq_device_register(config); 584 + if (IS_ERR(pwrseq)) 585 + return pwrseq; 586 + 587 + ret = devm_add_action_or_reset(dev, devm_pwrseq_device_unregister, 588 + pwrseq); 589 + if (ret) 590 + return ERR_PTR(ret); 591 + 592 + return pwrseq; 593 + } 594 + EXPORT_SYMBOL_GPL(devm_pwrseq_device_register); 595 + 596 + /** 597 + * pwrseq_device_get_drvdata() - Get the driver private data associated with 598 + * this sequencer. 599 + * @pwrseq: Power sequencer object. 600 + * 601 + * Returns: 602 + * Address of the private driver data. 603 + */ 604 + void *pwrseq_device_get_drvdata(struct pwrseq_device *pwrseq) 605 + { 606 + return dev_get_drvdata(&pwrseq->dev); 607 + } 608 + EXPORT_SYMBOL_GPL(pwrseq_device_get_drvdata); 609 + 610 + struct pwrseq_match_data { 611 + struct pwrseq_desc *desc; 612 + struct device *dev; 613 + const char *target; 614 + }; 615 + 616 + static int pwrseq_match_device(struct device *pwrseq_dev, void *data) 617 + { 618 + struct pwrseq_device *pwrseq = to_pwrseq_device(pwrseq_dev); 619 + struct pwrseq_match_data *match_data = data; 620 + struct pwrseq_target *target; 621 + int ret; 622 + 623 + lockdep_assert_held_read(&pwrseq_sem); 624 + 625 + guard(rwsem_read)(&pwrseq->rw_lock); 626 + if (!device_is_registered(&pwrseq->dev)) 627 + return 0; 628 + 629 + ret = pwrseq->match(pwrseq, match_data->dev); 630 + if (ret <= 0) 631 + return ret; 632 + 633 + /* We got the matching device, let's find the right target. */ 634 + list_for_each_entry(target, &pwrseq->targets, list) { 635 + if (strcmp(target->name, match_data->target)) 636 + continue; 637 + 638 + match_data->desc->target = target; 639 + } 640 + 641 + /* 642 + * This device does not have this target. No point in deferring as it 643 + * will not get a new target dynamically later. 644 + */ 645 + if (!match_data->desc->target) 646 + return -ENOENT; 647 + 648 + if (!try_module_get(pwrseq->owner)) 649 + return -EPROBE_DEFER; 650 + 651 + match_data->desc->pwrseq = pwrseq_device_get(pwrseq); 652 + 653 + return 1; 654 + } 655 + 656 + /** 657 + * pwrseq_get() - Get the power sequencer associated with this device. 658 + * @dev: Device for which to get the sequencer. 659 + * @target: Name of the target exposed by the sequencer this device wants to 660 + * reach. 661 + * 662 + * Returns: 663 + * New power sequencer descriptor for use by the consumer driver or ERR_PTR() 664 + * on failure. 665 + */ 666 + struct pwrseq_desc *pwrseq_get(struct device *dev, const char *target) 667 + { 668 + struct pwrseq_match_data match_data; 669 + int ret; 670 + 671 + struct pwrseq_desc *desc __free(kfree) = kzalloc(sizeof(*desc), 672 + GFP_KERNEL); 673 + if (!desc) 674 + return ERR_PTR(-ENOMEM); 675 + 676 + match_data.desc = desc; 677 + match_data.dev = dev; 678 + match_data.target = target; 679 + 680 + guard(rwsem_read)(&pwrseq_sem); 681 + 682 + ret = bus_for_each_dev(&pwrseq_bus, NULL, &match_data, 683 + pwrseq_match_device); 684 + if (ret < 0) 685 + return ERR_PTR(ret); 686 + if (ret == 0) 687 + /* No device matched. */ 688 + return ERR_PTR(-EPROBE_DEFER); 689 + 690 + return no_free_ptr(desc); 691 + } 692 + EXPORT_SYMBOL_GPL(pwrseq_get); 693 + 694 + /** 695 + * pwrseq_put() - Release the power sequencer descriptor. 696 + * @desc: Descriptor to release. 697 + */ 698 + void pwrseq_put(struct pwrseq_desc *desc) 699 + { 700 + struct pwrseq_device *pwrseq; 701 + 702 + if (!desc) 703 + return; 704 + 705 + pwrseq = desc->pwrseq; 706 + 707 + if (desc->powered_on) 708 + pwrseq_power_off(desc); 709 + 710 + kfree(desc); 711 + module_put(pwrseq->owner); 712 + pwrseq_device_put(pwrseq); 713 + } 714 + EXPORT_SYMBOL_GPL(pwrseq_put); 715 + 716 + static void devm_pwrseq_put(void *data) 717 + { 718 + struct pwrseq_desc *desc = data; 719 + 720 + pwrseq_put(desc); 721 + } 722 + 723 + /** 724 + * devm_pwrseq_get() - Managed variant of pwrseq_get(). 725 + * @dev: Device for which to get the sequencer and which also manages its 726 + * lifetime. 727 + * @target: Name of the target exposed by the sequencer this device wants to 728 + * reach. 729 + * 730 + * Returns: 731 + * New power sequencer descriptor for use by the consumer driver or ERR_PTR() 732 + * on failure. 733 + */ 734 + struct pwrseq_desc *devm_pwrseq_get(struct device *dev, const char *target) 735 + { 736 + struct pwrseq_desc *desc; 737 + int ret; 738 + 739 + desc = pwrseq_get(dev, target); 740 + if (IS_ERR(desc)) 741 + return desc; 742 + 743 + ret = devm_add_action_or_reset(dev, devm_pwrseq_put, desc); 744 + if (ret) 745 + return ERR_PTR(ret); 746 + 747 + return desc; 748 + } 749 + EXPORT_SYMBOL_GPL(devm_pwrseq_get); 750 + 751 + static int pwrseq_unit_enable(struct pwrseq_device *pwrseq, 752 + struct pwrseq_unit *target); 753 + static int pwrseq_unit_disable(struct pwrseq_device *pwrseq, 754 + struct pwrseq_unit *target); 755 + 756 + static int pwrseq_unit_enable_deps(struct pwrseq_device *pwrseq, 757 + struct list_head *list) 758 + { 759 + struct pwrseq_unit_dep *pos; 760 + int ret = 0; 761 + 762 + list_for_each_entry(pos, list, list) { 763 + ret = pwrseq_unit_enable(pwrseq, pos->unit); 764 + if (ret) { 765 + list_for_each_entry_continue_reverse(pos, list, list) 766 + pwrseq_unit_disable(pwrseq, pos->unit); 767 + break; 768 + } 769 + } 770 + 771 + return ret; 772 + } 773 + 774 + static int pwrseq_unit_disable_deps(struct pwrseq_device *pwrseq, 775 + struct list_head *list) 776 + { 777 + struct pwrseq_unit_dep *pos; 778 + int ret = 0; 779 + 780 + list_for_each_entry_reverse(pos, list, list) { 781 + ret = pwrseq_unit_disable(pwrseq, pos->unit); 782 + if (ret) { 783 + list_for_each_entry_continue(pos, list, list) 784 + pwrseq_unit_enable(pwrseq, pos->unit); 785 + break; 786 + } 787 + } 788 + 789 + return ret; 790 + } 791 + 792 + static int pwrseq_unit_enable(struct pwrseq_device *pwrseq, 793 + struct pwrseq_unit *unit) 794 + { 795 + int ret; 796 + 797 + lockdep_assert_held_read(&pwrseq->rw_lock); 798 + lockdep_assert_held(&pwrseq->state_lock); 799 + 800 + if (unit->enable_count != 0) { 801 + unit->enable_count++; 802 + return 0; 803 + } 804 + 805 + ret = pwrseq_unit_enable_deps(pwrseq, &unit->deps); 806 + if (ret) { 807 + dev_err(&pwrseq->dev, 808 + "Failed to enable dependencies before power-on for target '%s': %d\n", 809 + unit->name, ret); 810 + return ret; 811 + } 812 + 813 + if (unit->enable) { 814 + ret = unit->enable(pwrseq); 815 + if (ret) { 816 + dev_err(&pwrseq->dev, 817 + "Failed to enable target '%s': %d\n", 818 + unit->name, ret); 819 + pwrseq_unit_disable_deps(pwrseq, &unit->deps); 820 + return ret; 821 + } 822 + } 823 + 824 + unit->enable_count++; 825 + 826 + return 0; 827 + } 828 + 829 + static int pwrseq_unit_disable(struct pwrseq_device *pwrseq, 830 + struct pwrseq_unit *unit) 831 + { 832 + int ret; 833 + 834 + lockdep_assert_held_read(&pwrseq->rw_lock); 835 + lockdep_assert_held(&pwrseq->state_lock); 836 + 837 + if (unit->enable_count == 0) { 838 + WARN(1, "Unmatched power-off for target '%s'\n", 839 + unit->name); 840 + return -EBUSY; 841 + } 842 + 843 + if (unit->enable_count != 1) { 844 + unit->enable_count--; 845 + return 0; 846 + } 847 + 848 + if (unit->disable) { 849 + ret = unit->disable(pwrseq); 850 + if (ret) { 851 + dev_err(&pwrseq->dev, 852 + "Failed to disable target '%s': %d\n", 853 + unit->name, ret); 854 + return ret; 855 + } 856 + } 857 + 858 + ret = pwrseq_unit_disable_deps(pwrseq, &unit->deps); 859 + if (ret) { 860 + dev_err(&pwrseq->dev, 861 + "Failed to disable dependencies after power-off for target '%s': %d\n", 862 + unit->name, ret); 863 + if (unit->enable) 864 + unit->enable(pwrseq); 865 + return ret; 866 + } 867 + 868 + unit->enable_count--; 869 + 870 + return 0; 871 + } 872 + 873 + /** 874 + * pwrseq_power_on() - Issue a power-on request on behalf of the consumer 875 + * device. 876 + * @desc: Descriptor referencing the power sequencer. 877 + * 878 + * This function tells the power sequencer that the consumer wants to be 879 + * powered-up. The sequencer may already have powered-up the device in which 880 + * case the function returns 0. If the power-up sequence is already in 881 + * progress, the function will block until it's done and return 0. If this is 882 + * the first request, the device will be powered up. 883 + * 884 + * Returns: 885 + * 0 on success, negative error number on failure. 886 + */ 887 + int pwrseq_power_on(struct pwrseq_desc *desc) 888 + { 889 + struct pwrseq_device *pwrseq; 890 + struct pwrseq_target *target; 891 + struct pwrseq_unit *unit; 892 + int ret; 893 + 894 + might_sleep(); 895 + 896 + if (!desc || desc->powered_on) 897 + return 0; 898 + 899 + pwrseq = desc->pwrseq; 900 + target = desc->target; 901 + unit = target->unit; 902 + 903 + guard(rwsem_read)(&pwrseq->rw_lock); 904 + if (!device_is_registered(&pwrseq->dev)) 905 + return -ENODEV; 906 + 907 + scoped_guard(mutex, &pwrseq->state_lock) { 908 + ret = pwrseq_unit_enable(pwrseq, unit); 909 + if (!ret) 910 + desc->powered_on = true; 911 + } 912 + 913 + if (target->post_enable) { 914 + ret = target->post_enable(pwrseq); 915 + if (ret) { 916 + pwrseq_unit_disable(pwrseq, unit); 917 + desc->powered_on = false; 918 + } 919 + } 920 + 921 + return ret; 922 + } 923 + EXPORT_SYMBOL_GPL(pwrseq_power_on); 924 + 925 + /** 926 + * pwrseq_power_off() - Issue a power-off request on behalf of the consumer 927 + * device. 928 + * @desc: Descriptor referencing the power sequencer. 929 + * 930 + * This undoes the effects of pwrseq_power_on(). It issues a power-off request 931 + * on behalf of the consumer and when the last remaining user does so, the 932 + * power-down sequence will be started. If one is in progress, the function 933 + * will block until it's complete and then return. 934 + * 935 + * Returns: 936 + * 0 on success, negative error number on failure. 937 + */ 938 + int pwrseq_power_off(struct pwrseq_desc *desc) 939 + { 940 + struct pwrseq_device *pwrseq; 941 + struct pwrseq_unit *unit; 942 + int ret; 943 + 944 + might_sleep(); 945 + 946 + if (!desc || !desc->powered_on) 947 + return 0; 948 + 949 + pwrseq = desc->pwrseq; 950 + unit = desc->target->unit; 951 + 952 + guard(rwsem_read)(&pwrseq->rw_lock); 953 + if (!device_is_registered(&pwrseq->dev)) 954 + return -ENODEV; 955 + 956 + guard(mutex)(&pwrseq->state_lock); 957 + 958 + ret = pwrseq_unit_disable(pwrseq, unit); 959 + if (!ret) 960 + desc->powered_on = false; 961 + 962 + return ret; 963 + } 964 + EXPORT_SYMBOL_GPL(pwrseq_power_off); 965 + 966 + #if IS_ENABLED(CONFIG_DEBUG_FS) 967 + 968 + struct pwrseq_debugfs_count_ctx { 969 + struct device *dev; 970 + loff_t index; 971 + }; 972 + 973 + static int pwrseq_debugfs_seq_count(struct device *dev, void *data) 974 + { 975 + struct pwrseq_debugfs_count_ctx *ctx = data; 976 + 977 + ctx->dev = dev; 978 + 979 + return ctx->index-- ? 0 : 1; 980 + } 981 + 982 + static void *pwrseq_debugfs_seq_start(struct seq_file *seq, loff_t *pos) 983 + { 984 + struct pwrseq_debugfs_count_ctx ctx; 985 + 986 + ctx.dev = NULL; 987 + ctx.index = *pos; 988 + 989 + /* 990 + * We're holding the lock for the entire printout so no need to fiddle 991 + * with device reference count. 992 + */ 993 + down_read(&pwrseq_sem); 994 + 995 + bus_for_each_dev(&pwrseq_bus, NULL, &ctx, pwrseq_debugfs_seq_count); 996 + if (!ctx.index) 997 + return NULL; 998 + 999 + return ctx.dev; 1000 + } 1001 + 1002 + static void *pwrseq_debugfs_seq_next(struct seq_file *seq, void *data, 1003 + loff_t *pos) 1004 + { 1005 + struct device *curr = data; 1006 + 1007 + ++*pos; 1008 + 1009 + struct device *next __free(put_device) = 1010 + bus_find_next_device(&pwrseq_bus, curr); 1011 + return next; 1012 + } 1013 + 1014 + static void pwrseq_debugfs_seq_show_target(struct seq_file *seq, 1015 + struct pwrseq_target *target) 1016 + { 1017 + seq_printf(seq, " target: [%s] (target unit: [%s])\n", 1018 + target->name, target->unit->name); 1019 + } 1020 + 1021 + static void pwrseq_debugfs_seq_show_unit(struct seq_file *seq, 1022 + struct pwrseq_unit *unit) 1023 + { 1024 + struct pwrseq_unit_dep *ref; 1025 + 1026 + seq_printf(seq, " unit: [%s] - enable count: %u\n", 1027 + unit->name, unit->enable_count); 1028 + 1029 + if (list_empty(&unit->deps)) 1030 + return; 1031 + 1032 + seq_puts(seq, " dependencies:\n"); 1033 + list_for_each_entry(ref, &unit->deps, list) 1034 + seq_printf(seq, " [%s]\n", ref->unit->name); 1035 + } 1036 + 1037 + static int pwrseq_debugfs_seq_show(struct seq_file *seq, void *data) 1038 + { 1039 + struct device *dev = data; 1040 + struct pwrseq_device *pwrseq = to_pwrseq_device(dev); 1041 + struct pwrseq_target *target; 1042 + struct pwrseq_unit *unit; 1043 + 1044 + seq_printf(seq, "%s:\n", dev_name(dev)); 1045 + 1046 + seq_puts(seq, " targets:\n"); 1047 + list_for_each_entry(target, &pwrseq->targets, list) 1048 + pwrseq_debugfs_seq_show_target(seq, target); 1049 + 1050 + seq_puts(seq, " units:\n"); 1051 + list_for_each_entry(unit, &pwrseq->units, list) 1052 + pwrseq_debugfs_seq_show_unit(seq, unit); 1053 + 1054 + return 0; 1055 + } 1056 + 1057 + static void pwrseq_debugfs_seq_stop(struct seq_file *seq, void *data) 1058 + { 1059 + up_read(&pwrseq_sem); 1060 + } 1061 + 1062 + static const struct seq_operations pwrseq_debugfs_sops = { 1063 + .start = pwrseq_debugfs_seq_start, 1064 + .next = pwrseq_debugfs_seq_next, 1065 + .show = pwrseq_debugfs_seq_show, 1066 + .stop = pwrseq_debugfs_seq_stop, 1067 + }; 1068 + DEFINE_SEQ_ATTRIBUTE(pwrseq_debugfs); 1069 + 1070 + static struct dentry *pwrseq_debugfs_dentry; 1071 + 1072 + #endif /* CONFIG_DEBUG_FS */ 1073 + 1074 + static int __init pwrseq_init(void) 1075 + { 1076 + int ret; 1077 + 1078 + ret = bus_register(&pwrseq_bus); 1079 + if (ret) { 1080 + pr_err("Failed to register the power sequencer bus\n"); 1081 + return ret; 1082 + } 1083 + 1084 + #if IS_ENABLED(CONFIG_DEBUG_FS) 1085 + pwrseq_debugfs_dentry = debugfs_create_file("pwrseq", 0444, NULL, NULL, 1086 + &pwrseq_debugfs_fops); 1087 + #endif /* CONFIG_DEBUG_FS */ 1088 + 1089 + return 0; 1090 + } 1091 + subsys_initcall(pwrseq_init); 1092 + 1093 + static void __exit pwrseq_exit(void) 1094 + { 1095 + #if IS_ENABLED(CONFIG_DEBUG_FS) 1096 + debugfs_remove_recursive(pwrseq_debugfs_dentry); 1097 + #endif /* CONFIG_DEBUG_FS */ 1098 + 1099 + bus_unregister(&pwrseq_bus); 1100 + } 1101 + module_exit(pwrseq_exit); 1102 + 1103 + MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); 1104 + MODULE_DESCRIPTION("Power Sequencing subsystem core"); 1105 + MODULE_LICENSE("GPL");
+336
drivers/power/sequencing/pwrseq-qcom-wcn.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (C) 2024 Linaro Ltd. 4 + */ 5 + 6 + #include <linux/clk.h> 7 + #include <linux/delay.h> 8 + #include <linux/device.h> 9 + #include <linux/gpio/consumer.h> 10 + #include <linux/jiffies.h> 11 + #include <linux/mod_devicetable.h> 12 + #include <linux/module.h> 13 + #include <linux/of.h> 14 + #include <linux/platform_device.h> 15 + #include <linux/regulator/consumer.h> 16 + #include <linux/pwrseq/provider.h> 17 + #include <linux/string.h> 18 + #include <linux/types.h> 19 + 20 + struct pwrseq_qcom_wcn_pdata { 21 + const char *const *vregs; 22 + size_t num_vregs; 23 + unsigned int pwup_delay_ms; 24 + unsigned int gpio_enable_delay_ms; 25 + }; 26 + 27 + struct pwrseq_qcom_wcn_ctx { 28 + struct pwrseq_device *pwrseq; 29 + struct device_node *of_node; 30 + const struct pwrseq_qcom_wcn_pdata *pdata; 31 + struct regulator_bulk_data *regs; 32 + struct gpio_desc *bt_gpio; 33 + struct gpio_desc *wlan_gpio; 34 + struct clk *clk; 35 + unsigned long last_gpio_enable_jf; 36 + }; 37 + 38 + static void pwrseq_qcom_wcn_ensure_gpio_delay(struct pwrseq_qcom_wcn_ctx *ctx) 39 + { 40 + unsigned long diff_jiffies; 41 + unsigned int diff_msecs; 42 + 43 + if (!ctx->pdata->gpio_enable_delay_ms) 44 + return; 45 + 46 + diff_jiffies = jiffies - ctx->last_gpio_enable_jf; 47 + diff_msecs = jiffies_to_msecs(diff_jiffies); 48 + 49 + if (diff_msecs < ctx->pdata->gpio_enable_delay_ms) 50 + msleep(ctx->pdata->gpio_enable_delay_ms - diff_msecs); 51 + } 52 + 53 + static int pwrseq_qcom_wcn_vregs_enable(struct pwrseq_device *pwrseq) 54 + { 55 + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); 56 + 57 + return regulator_bulk_enable(ctx->pdata->num_vregs, ctx->regs); 58 + } 59 + 60 + static int pwrseq_qcom_wcn_vregs_disable(struct pwrseq_device *pwrseq) 61 + { 62 + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); 63 + 64 + return regulator_bulk_disable(ctx->pdata->num_vregs, ctx->regs); 65 + } 66 + 67 + static const struct pwrseq_unit_data pwrseq_qcom_wcn_vregs_unit_data = { 68 + .name = "regulators-enable", 69 + .enable = pwrseq_qcom_wcn_vregs_enable, 70 + .disable = pwrseq_qcom_wcn_vregs_disable, 71 + }; 72 + 73 + static int pwrseq_qcom_wcn_clk_enable(struct pwrseq_device *pwrseq) 74 + { 75 + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); 76 + 77 + return clk_prepare_enable(ctx->clk); 78 + } 79 + 80 + static int pwrseq_qcom_wcn_clk_disable(struct pwrseq_device *pwrseq) 81 + { 82 + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); 83 + 84 + clk_disable_unprepare(ctx->clk); 85 + 86 + return 0; 87 + } 88 + 89 + static const struct pwrseq_unit_data pwrseq_qcom_wcn_clk_unit_data = { 90 + .name = "clock-enable", 91 + .enable = pwrseq_qcom_wcn_clk_enable, 92 + .disable = pwrseq_qcom_wcn_clk_disable, 93 + }; 94 + 95 + static const struct pwrseq_unit_data *pwrseq_qcom_wcn_unit_deps[] = { 96 + &pwrseq_qcom_wcn_vregs_unit_data, 97 + &pwrseq_qcom_wcn_clk_unit_data, 98 + NULL 99 + }; 100 + 101 + static int pwrseq_qcom_wcn_bt_enable(struct pwrseq_device *pwrseq) 102 + { 103 + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); 104 + 105 + pwrseq_qcom_wcn_ensure_gpio_delay(ctx); 106 + gpiod_set_value_cansleep(ctx->bt_gpio, 1); 107 + ctx->last_gpio_enable_jf = jiffies; 108 + 109 + return 0; 110 + } 111 + 112 + static int pwrseq_qcom_wcn_bt_disable(struct pwrseq_device *pwrseq) 113 + { 114 + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); 115 + 116 + gpiod_set_value_cansleep(ctx->bt_gpio, 0); 117 + 118 + return 0; 119 + } 120 + 121 + static const struct pwrseq_unit_data pwrseq_qcom_wcn_bt_unit_data = { 122 + .name = "bluetooth-enable", 123 + .deps = pwrseq_qcom_wcn_unit_deps, 124 + .enable = pwrseq_qcom_wcn_bt_enable, 125 + .disable = pwrseq_qcom_wcn_bt_disable, 126 + }; 127 + 128 + static int pwrseq_qcom_wcn_wlan_enable(struct pwrseq_device *pwrseq) 129 + { 130 + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); 131 + 132 + pwrseq_qcom_wcn_ensure_gpio_delay(ctx); 133 + gpiod_set_value_cansleep(ctx->wlan_gpio, 1); 134 + ctx->last_gpio_enable_jf = jiffies; 135 + 136 + return 0; 137 + } 138 + 139 + static int pwrseq_qcom_wcn_wlan_disable(struct pwrseq_device *pwrseq) 140 + { 141 + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); 142 + 143 + gpiod_set_value_cansleep(ctx->wlan_gpio, 0); 144 + 145 + return 0; 146 + } 147 + 148 + static const struct pwrseq_unit_data pwrseq_qcom_wcn_wlan_unit_data = { 149 + .name = "wlan-enable", 150 + .deps = pwrseq_qcom_wcn_unit_deps, 151 + .enable = pwrseq_qcom_wcn_wlan_enable, 152 + .disable = pwrseq_qcom_wcn_wlan_disable, 153 + }; 154 + 155 + static int pwrseq_qcom_wcn_pwup_delay(struct pwrseq_device *pwrseq) 156 + { 157 + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); 158 + 159 + if (ctx->pdata->pwup_delay_ms) 160 + msleep(ctx->pdata->pwup_delay_ms); 161 + 162 + return 0; 163 + } 164 + 165 + static const struct pwrseq_target_data pwrseq_qcom_wcn_bt_target_data = { 166 + .name = "bluetooth", 167 + .unit = &pwrseq_qcom_wcn_bt_unit_data, 168 + .post_enable = pwrseq_qcom_wcn_pwup_delay, 169 + }; 170 + 171 + static const struct pwrseq_target_data pwrseq_qcom_wcn_wlan_target_data = { 172 + .name = "wlan", 173 + .unit = &pwrseq_qcom_wcn_wlan_unit_data, 174 + .post_enable = pwrseq_qcom_wcn_pwup_delay, 175 + }; 176 + 177 + static const struct pwrseq_target_data *pwrseq_qcom_wcn_targets[] = { 178 + &pwrseq_qcom_wcn_bt_target_data, 179 + &pwrseq_qcom_wcn_wlan_target_data, 180 + NULL 181 + }; 182 + 183 + static const char *const pwrseq_qca6390_vregs[] = { 184 + "vddio", 185 + "vddaon", 186 + "vddpmu", 187 + "vddrfa0p95", 188 + "vddrfa1p3", 189 + "vddrfa1p9", 190 + "vddpcie1p3", 191 + "vddpcie1p9", 192 + }; 193 + 194 + static const struct pwrseq_qcom_wcn_pdata pwrseq_qca6390_of_data = { 195 + .vregs = pwrseq_qca6390_vregs, 196 + .num_vregs = ARRAY_SIZE(pwrseq_qca6390_vregs), 197 + .pwup_delay_ms = 60, 198 + .gpio_enable_delay_ms = 100, 199 + }; 200 + 201 + static const char *const pwrseq_wcn7850_vregs[] = { 202 + "vdd", 203 + "vddio", 204 + "vddio1p2", 205 + "vddaon", 206 + "vdddig", 207 + "vddrfa1p2", 208 + "vddrfa1p8", 209 + }; 210 + 211 + static const struct pwrseq_qcom_wcn_pdata pwrseq_wcn7850_of_data = { 212 + .vregs = pwrseq_wcn7850_vregs, 213 + .num_vregs = ARRAY_SIZE(pwrseq_wcn7850_vregs), 214 + .pwup_delay_ms = 50, 215 + }; 216 + 217 + static int pwrseq_qcom_wcn_match(struct pwrseq_device *pwrseq, 218 + struct device *dev) 219 + { 220 + struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq); 221 + struct device_node *dev_node = dev->of_node; 222 + 223 + /* 224 + * The PMU supplies power to the Bluetooth and WLAN modules. both 225 + * consume the PMU AON output so check the presence of the 226 + * 'vddaon-supply' property and whether it leads us to the right 227 + * device. 228 + */ 229 + if (!of_property_present(dev_node, "vddaon-supply")) 230 + return 0; 231 + 232 + struct device_node *reg_node __free(device_node) = 233 + of_parse_phandle(dev_node, "vddaon-supply", 0); 234 + if (!reg_node) 235 + return 0; 236 + 237 + /* 238 + * `reg_node` is the PMU AON regulator, its parent is the `regulators` 239 + * node and finally its grandparent is the PMU device node that we're 240 + * looking for. 241 + */ 242 + if (!reg_node->parent || !reg_node->parent->parent || 243 + reg_node->parent->parent != ctx->of_node) 244 + return 0; 245 + 246 + return 1; 247 + } 248 + 249 + static int pwrseq_qcom_wcn_probe(struct platform_device *pdev) 250 + { 251 + struct device *dev = &pdev->dev; 252 + struct pwrseq_qcom_wcn_ctx *ctx; 253 + struct pwrseq_config config; 254 + int i, ret; 255 + 256 + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 257 + if (!ctx) 258 + return -ENOMEM; 259 + 260 + ctx->of_node = dev->of_node; 261 + 262 + ctx->pdata = of_device_get_match_data(dev); 263 + if (!ctx->pdata) 264 + return dev_err_probe(dev, -ENODEV, 265 + "Failed to obtain platform data\n"); 266 + 267 + ctx->regs = devm_kcalloc(dev, ctx->pdata->num_vregs, 268 + sizeof(*ctx->regs), GFP_KERNEL); 269 + if (!ctx->regs) 270 + return -ENOMEM; 271 + 272 + for (i = 0; i < ctx->pdata->num_vregs; i++) 273 + ctx->regs[i].supply = ctx->pdata->vregs[i]; 274 + 275 + ret = devm_regulator_bulk_get(dev, ctx->pdata->num_vregs, ctx->regs); 276 + if (ret < 0) 277 + return dev_err_probe(dev, ret, 278 + "Failed to get all regulators\n"); 279 + 280 + ctx->bt_gpio = devm_gpiod_get_optional(dev, "bt-enable", GPIOD_OUT_LOW); 281 + if (IS_ERR(ctx->bt_gpio)) 282 + return dev_err_probe(dev, PTR_ERR(ctx->bt_gpio), 283 + "Failed to get the Bluetooth enable GPIO\n"); 284 + 285 + ctx->wlan_gpio = devm_gpiod_get_optional(dev, "wlan-enable", 286 + GPIOD_OUT_LOW); 287 + if (IS_ERR(ctx->wlan_gpio)) 288 + return dev_err_probe(dev, PTR_ERR(ctx->wlan_gpio), 289 + "Failed to get the WLAN enable GPIO\n"); 290 + 291 + ctx->clk = devm_clk_get_optional(dev, NULL); 292 + if (IS_ERR(ctx->clk)) 293 + return dev_err_probe(dev, PTR_ERR(ctx->clk), 294 + "Failed to get the reference clock\n"); 295 + 296 + memset(&config, 0, sizeof(config)); 297 + 298 + config.parent = dev; 299 + config.owner = THIS_MODULE; 300 + config.drvdata = ctx; 301 + config.match = pwrseq_qcom_wcn_match; 302 + config.targets = pwrseq_qcom_wcn_targets; 303 + 304 + ctx->pwrseq = devm_pwrseq_device_register(dev, &config); 305 + if (IS_ERR(ctx->pwrseq)) 306 + return dev_err_probe(dev, PTR_ERR(ctx->pwrseq), 307 + "Failed to register the power sequencer\n"); 308 + 309 + return 0; 310 + } 311 + 312 + static const struct of_device_id pwrseq_qcom_wcn_of_match[] = { 313 + { 314 + .compatible = "qcom,qca6390-pmu", 315 + .data = &pwrseq_qca6390_of_data, 316 + }, 317 + { 318 + .compatible = "qcom,wcn7850-pmu", 319 + .data = &pwrseq_wcn7850_of_data, 320 + }, 321 + { } 322 + }; 323 + MODULE_DEVICE_TABLE(of, pwrseq_qcom_wcn_of_match); 324 + 325 + static struct platform_driver pwrseq_qcom_wcn_driver = { 326 + .driver = { 327 + .name = "pwrseq-qcom_wcn", 328 + .of_match_table = pwrseq_qcom_wcn_of_match, 329 + }, 330 + .probe = pwrseq_qcom_wcn_probe, 331 + }; 332 + module_platform_driver(pwrseq_qcom_wcn_driver); 333 + 334 + MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>"); 335 + MODULE_DESCRIPTION("Qualcomm WCN PMU power sequencing driver"); 336 + MODULE_LICENSE("GPL");
+56
include/linux/pwrseq/consumer.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2024 Linaro Ltd. 4 + */ 5 + 6 + #ifndef __POWER_SEQUENCING_CONSUMER_H__ 7 + #define __POWER_SEQUENCING_CONSUMER_H__ 8 + 9 + #include <linux/err.h> 10 + 11 + struct device; 12 + struct pwrseq_desc; 13 + 14 + #if IS_ENABLED(CONFIG_POWER_SEQUENCING) 15 + 16 + struct pwrseq_desc * __must_check 17 + pwrseq_get(struct device *dev, const char *target); 18 + void pwrseq_put(struct pwrseq_desc *desc); 19 + 20 + struct pwrseq_desc * __must_check 21 + devm_pwrseq_get(struct device *dev, const char *target); 22 + 23 + int pwrseq_power_on(struct pwrseq_desc *desc); 24 + int pwrseq_power_off(struct pwrseq_desc *desc); 25 + 26 + #else /* CONFIG_POWER_SEQUENCING */ 27 + 28 + static inline struct pwrseq_desc * __must_check 29 + pwrseq_get(struct device *dev, const char *target) 30 + { 31 + return ERR_PTR(-ENOSYS); 32 + } 33 + 34 + static inline void pwrseq_put(struct pwrseq_desc *desc) 35 + { 36 + } 37 + 38 + static inline struct pwrseq_desc * __must_check 39 + devm_pwrseq_get(struct device *dev, const char *target) 40 + { 41 + return ERR_PTR(-ENOSYS); 42 + } 43 + 44 + static inline int pwrseq_power_on(struct pwrseq_desc *desc) 45 + { 46 + return -ENOSYS; 47 + } 48 + 49 + static inline int pwrseq_power_off(struct pwrseq_desc *desc) 50 + { 51 + return -ENOSYS; 52 + } 53 + 54 + #endif /* CONFIG_POWER_SEQUENCING */ 55 + 56 + #endif /* __POWER_SEQUENCING_CONSUMER_H__ */
+75
include/linux/pwrseq/provider.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2024 Linaro Ltd. 4 + */ 5 + 6 + #ifndef __POWER_SEQUENCING_PROVIDER_H__ 7 + #define __POWER_SEQUENCING_PROVIDER_H__ 8 + 9 + struct device; 10 + struct module; 11 + struct pwrseq_device; 12 + 13 + typedef int (*pwrseq_power_state_func)(struct pwrseq_device *); 14 + typedef int (*pwrseq_match_func)(struct pwrseq_device *, struct device *); 15 + 16 + /** 17 + * struct pwrseq_unit_data - Configuration of a single power sequencing 18 + * unit. 19 + * @name: Name of the unit. 20 + * @deps: Units that must be enabled before this one and disabled after it 21 + * in the order they come in this array. Must be NULL-terminated. 22 + * @enable: Callback running the part of the power-on sequence provided by 23 + * this unit. 24 + * @disable: Callback running the part of the power-off sequence provided 25 + * by this unit. 26 + */ 27 + struct pwrseq_unit_data { 28 + const char *name; 29 + const struct pwrseq_unit_data **deps; 30 + pwrseq_power_state_func enable; 31 + pwrseq_power_state_func disable; 32 + }; 33 + 34 + /** 35 + * struct pwrseq_target_data - Configuration of a power sequencing target. 36 + * @name: Name of the target. 37 + * @unit: Final unit that this target must reach in order to be considered 38 + * enabled. 39 + * @post_enable: Callback run after the target unit has been enabled, *after* 40 + * the state lock has been released. It's useful for implementing 41 + * boot-up delays without blocking other users from powering up 42 + * using the same power sequencer. 43 + */ 44 + struct pwrseq_target_data { 45 + const char *name; 46 + const struct pwrseq_unit_data *unit; 47 + pwrseq_power_state_func post_enable; 48 + }; 49 + 50 + /** 51 + * struct pwrseq_config - Configuration used for registering a new provider. 52 + * @parent: Parent device for the sequencer. Must be set. 53 + * @owner: Module providing this device. 54 + * @drvdata: Private driver data. 55 + * @match: Provider callback used to match the consumer device to the sequencer. 56 + * @targets: Array of targets for this power sequencer. Must be NULL-terminated. 57 + */ 58 + struct pwrseq_config { 59 + struct device *parent; 60 + struct module *owner; 61 + void *drvdata; 62 + pwrseq_match_func match; 63 + const struct pwrseq_target_data **targets; 64 + }; 65 + 66 + struct pwrseq_device * 67 + pwrseq_device_register(const struct pwrseq_config *config); 68 + void pwrseq_device_unregister(struct pwrseq_device *pwrseq); 69 + struct pwrseq_device * 70 + devm_pwrseq_device_register(struct device *dev, 71 + const struct pwrseq_config *config); 72 + 73 + void *pwrseq_device_get_drvdata(struct pwrseq_device *pwrseq); 74 + 75 + #endif /* __POWER_SEQUENCING_PROVIDER_H__ */
+4
include/net/bluetooth/bluetooth.h
··· 441 441 typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status, 442 442 u16 opcode, struct sk_buff *skb); 443 443 444 + void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, 445 + hci_req_complete_t *req_complete, 446 + hci_req_complete_skb_t *req_complete_skb); 447 + 444 448 #define HCI_REQ_START BIT(0) 445 449 #define HCI_REQ_SKB BIT(1) 446 450
+1 -6
include/net/bluetooth/hci_core.h
··· 91 91 s8 rssi; 92 92 u16 uuid_count; 93 93 u8 (*uuids)[16]; 94 - unsigned long scan_start; 95 - unsigned long scan_duration; 96 94 unsigned long name_resolve_timeout; 97 95 }; 98 96 ··· 476 478 unsigned int iso_pkts; 477 479 478 480 unsigned long acl_last_tx; 479 - unsigned long sco_last_tx; 480 481 unsigned long le_last_tx; 481 482 482 483 __u8 le_tx_def_phys; ··· 527 530 528 531 struct discovery_state discovery; 529 532 530 - int discovery_old_state; 531 533 bool discovery_paused; 532 534 int advertising_old_state; 533 535 bool advertising_paused; ··· 645 649 int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type, 646 650 struct bt_codec *codec, __u8 *vnd_len, 647 651 __u8 **vnd_data); 652 + u8 (*classify_pkt_type)(struct hci_dev *hdev, struct sk_buff *skb); 648 653 }; 649 654 650 655 #define HCI_PHY_HANDLE(handle) (handle & 0xff) ··· 887 890 hdev->discovery.uuid_count = 0; 888 891 kfree(hdev->discovery.uuids); 889 892 hdev->discovery.uuids = NULL; 890 - hdev->discovery.scan_start = 0; 891 - hdev->discovery.scan_duration = 0; 892 893 } 893 894 894 895 bool hci_discovery_active(struct hci_dev *hdev);
+1 -1
include/net/bluetooth/hci_sock.h
··· 144 144 145 145 struct hci_dev_list_req { 146 146 __u16 dev_num; 147 - struct hci_dev_req dev_req[]; /* hci_dev_req structures */ 147 + struct hci_dev_req dev_req[] __counted_by(dev_num); 148 148 }; 149 149 150 150 struct hci_conn_list_req {
+26
include/net/bluetooth/hci_sync.h
··· 8 8 #define UINT_PTR(_handle) ((void *)((uintptr_t)_handle)) 9 9 #define PTR_UINT(_ptr) ((uintptr_t)((void *)_ptr)) 10 10 11 + #define HCI_REQ_DONE 0 12 + #define HCI_REQ_PEND 1 13 + #define HCI_REQ_CANCELED 2 14 + 15 + #define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock) 16 + #define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock) 17 + 18 + struct hci_request { 19 + struct hci_dev *hdev; 20 + struct sk_buff_head cmd_q; 21 + 22 + /* If something goes wrong when building the HCI request, the error 23 + * value is stored in this field. 24 + */ 25 + int err; 26 + }; 27 + 11 28 typedef int (*hci_cmd_sync_work_func_t)(struct hci_dev *hdev, void *data); 12 29 typedef void (*hci_cmd_sync_work_destroy_t)(struct hci_dev *hdev, void *data, 13 30 int err); ··· 37 20 }; 38 21 39 22 struct adv_info; 23 + 24 + struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen, 25 + const void *param, struct sock *sk); 26 + 40 27 /* Function with sync suffix shall not be called with hdev->lock held as they 41 28 * wait the command to complete and in the meantime an event could be received 42 29 * which could attempt to acquire hdev->lock causing a deadlock. ··· 152 131 153 132 int hci_update_connectable_sync(struct hci_dev *hdev); 154 133 134 + int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp); 135 + 155 136 int hci_start_discovery_sync(struct hci_dev *hdev); 156 137 int hci_stop_discovery_sync(struct hci_dev *hdev); 157 138 ··· 161 138 int hci_resume_sync(struct hci_dev *hdev); 162 139 163 140 struct hci_conn; 141 + struct hci_conn_params; 164 142 165 143 int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason); 166 144 ··· 180 156 int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn); 181 157 182 158 int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn); 159 + int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn, 160 + struct hci_conn_params *params);
+1 -1
include/net/bluetooth/rfcomm.h
··· 355 355 356 356 struct rfcomm_dev_list_req { 357 357 u16 dev_num; 358 - struct rfcomm_dev_info dev_info[]; 358 + struct rfcomm_dev_info dev_info[] __counted_by(dev_num); 359 359 }; 360 360 361 361 int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
+1 -2
net/bluetooth/Makefile
··· 14 14 15 15 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ 16 16 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \ 17 - ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \ 18 - eir.o hci_sync.o 17 + ecdh_helper.o mgmt_util.o mgmt_config.o hci_codec.o eir.o hci_sync.o 19 18 20 19 bluetooth-$(CONFIG_DEV_COREDUMP) += coredump.o 21 20
-1
net/bluetooth/hci_conn.c
··· 34 34 #include <net/bluetooth/iso.h> 35 35 #include <net/bluetooth/mgmt.h> 36 36 37 - #include "hci_request.h" 38 37 #include "smp.h" 39 38 #include "eir.h" 40 39
+41 -54
net/bluetooth/hci_core.c
··· 40 40 #include <net/bluetooth/l2cap.h> 41 41 #include <net/bluetooth/mgmt.h> 42 42 43 - #include "hci_request.h" 44 43 #include "hci_debugfs.h" 45 44 #include "smp.h" 46 45 #include "leds.h" ··· 311 312 return copied; 312 313 } 313 314 314 - static int hci_inq_req(struct hci_request *req, unsigned long opt) 315 - { 316 - struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; 317 - struct hci_dev *hdev = req->hdev; 318 - struct hci_cp_inquiry cp; 319 - 320 - BT_DBG("%s", hdev->name); 321 - 322 - if (test_bit(HCI_INQUIRY, &hdev->flags)) 323 - return 0; 324 - 325 - /* Start Inquiry */ 326 - memcpy(&cp.lap, &ir->lap, 3); 327 - cp.length = ir->length; 328 - cp.num_rsp = ir->num_rsp; 329 - hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); 330 - 331 - return 0; 332 - } 333 - 334 315 int hci_inquiry(void __user *arg) 335 316 { 336 317 __u8 __user *ptr = arg; 337 318 struct hci_inquiry_req ir; 338 319 struct hci_dev *hdev; 339 320 int err = 0, do_inquiry = 0, max_rsp; 340 - long timeo; 341 321 __u8 *buf; 342 322 343 323 if (copy_from_user(&ir, ptr, sizeof(ir))) ··· 355 377 } 356 378 hci_dev_unlock(hdev); 357 379 358 - timeo = ir.length * msecs_to_jiffies(2000); 359 - 360 380 if (do_inquiry) { 361 - err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, 362 - timeo, NULL); 381 + hci_req_sync_lock(hdev); 382 + err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp); 383 + hci_req_sync_unlock(hdev); 384 + 363 385 if (err < 0) 364 386 goto done; 365 387 ··· 696 718 697 719 switch (cmd) { 698 720 case HCISETAUTH: 699 - err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, 700 - 1, &dr.dev_opt, HCI_CMD_TIMEOUT); 721 + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, 722 + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); 701 723 break; 702 724 703 725 case HCISETENCRYPT: ··· 708 730 709 731 if (!test_bit(HCI_AUTH, &hdev->flags)) { 710 732 /* Auth must be enabled first */ 711 - err = __hci_cmd_sync_status(hdev, 712 - HCI_OP_WRITE_AUTH_ENABLE, 713 - 1, &dr.dev_opt, 714 - HCI_CMD_TIMEOUT); 733 + err = hci_cmd_sync_status(hdev, 734 + HCI_OP_WRITE_AUTH_ENABLE, 735 + 1, &dr.dev_opt, 736 + HCI_CMD_TIMEOUT); 715 737 if (err) 716 738 break; 717 739 } 718 740 719 - err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 720 - 1, &dr.dev_opt, 721 - HCI_CMD_TIMEOUT); 741 + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 742 + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); 722 743 break; 723 744 724 745 case HCISETSCAN: 725 - err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, 726 - 1, &dr.dev_opt, 727 - HCI_CMD_TIMEOUT); 746 + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, 747 + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); 728 748 729 749 /* Ensure that the connectable and discoverable states 730 750 * get correctly modified as this was a non-mgmt change. ··· 734 758 case HCISETLINKPOL: 735 759 policy = cpu_to_le16(dr.dev_opt); 736 760 737 - err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 738 - 2, &policy, 739 - HCI_CMD_TIMEOUT); 761 + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 762 + 2, &policy, HCI_CMD_TIMEOUT); 740 763 break; 741 764 742 765 case HCISETLINKMODE: ··· 776 801 struct hci_dev *hdev; 777 802 struct hci_dev_list_req *dl; 778 803 struct hci_dev_req *dr; 779 - int n = 0, size, err; 804 + int n = 0, err; 780 805 __u16 dev_num; 781 806 782 807 if (get_user(dev_num, (__u16 __user *) arg)) ··· 785 810 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) 786 811 return -EINVAL; 787 812 788 - size = sizeof(*dl) + dev_num * sizeof(*dr); 789 - 790 - dl = kzalloc(size, GFP_KERNEL); 813 + dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL); 791 814 if (!dl) 792 815 return -ENOMEM; 793 816 817 + dl->dev_num = dev_num; 794 818 dr = dl->dev_req; 795 819 796 820 read_lock(&hci_dev_list_lock); ··· 803 829 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) 804 830 flags &= ~BIT(HCI_UP); 805 831 806 - (dr + n)->dev_id = hdev->id; 807 - (dr + n)->dev_opt = flags; 832 + dr[n].dev_id = hdev->id; 833 + dr[n].dev_opt = flags; 808 834 809 835 if (++n >= dev_num) 810 836 break; ··· 812 838 read_unlock(&hci_dev_list_lock); 813 839 814 840 dl->dev_num = n; 815 - size = sizeof(*dl) + n * sizeof(*dr); 816 - 817 - err = copy_to_user(arg, dl, size); 841 + err = copy_to_user(arg, dl, struct_size(dl, dev_req, n)); 818 842 kfree(dl); 819 843 820 844 return err ? -EFAULT : 0; ··· 2551 2579 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); 2552 2580 2553 2581 hci_devcd_setup(hdev); 2554 - hci_request_setup(hdev); 2555 2582 2556 2583 hci_init_sysfs(hdev); 2557 2584 discovery_init(hdev); ··· 2883 2912 } 2884 2913 EXPORT_SYMBOL(hci_reset_dev); 2885 2914 2915 + static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb) 2916 + { 2917 + if (hdev->classify_pkt_type) 2918 + return hdev->classify_pkt_type(hdev, skb); 2919 + 2920 + return hci_skb_pkt_type(skb); 2921 + } 2922 + 2886 2923 /* Receive frame from HCI drivers */ 2887 2924 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) 2888 2925 { 2926 + u8 dev_pkt_type; 2927 + 2889 2928 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) 2890 2929 && !test_bit(HCI_INIT, &hdev->flags))) { 2891 2930 kfree_skb(skb); 2892 2931 return -ENXIO; 2932 + } 2933 + 2934 + /* Check if the driver agree with packet type classification */ 2935 + dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb); 2936 + if (hci_skb_pkt_type(skb) != dev_pkt_type) { 2937 + hci_skb_pkt_type(skb) = dev_pkt_type; 2893 2938 } 2894 2939 2895 2940 switch (hci_skb_pkt_type(skb)) { ··· 3052 3065 3053 3066 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); 3054 3067 3055 - skb = hci_prepare_cmd(hdev, opcode, plen, param); 3068 + skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL); 3056 3069 if (!skb) { 3057 3070 bt_dev_err(hdev, "no memory for command"); 3058 3071 return -ENOMEM; ··· 3087 3100 return -EINVAL; 3088 3101 } 3089 3102 3090 - skb = hci_prepare_cmd(hdev, opcode, plen, param); 3103 + skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL); 3091 3104 if (!skb) { 3092 3105 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", 3093 3106 opcode); ··· 4072 4085 return; 4073 4086 } 4074 4087 4075 - if (hci_req_status_pend(hdev) && 4088 + if (hdev->req_status == HCI_REQ_PEND && 4076 4089 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) { 4077 4090 kfree_skb(hdev->req_skb); 4078 4091 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
-1
net/bluetooth/hci_debugfs.c
··· 28 28 #include <net/bluetooth/hci_core.h> 29 29 30 30 #include "smp.h" 31 - #include "hci_request.h" 32 31 #include "hci_debugfs.h" 33 32 34 33 #define DEFINE_QUIRK_ATTRIBUTE(__name, __quirk) \
+2 -1
net/bluetooth/hci_event.c
··· 33 33 #include <net/bluetooth/hci_core.h> 34 34 #include <net/bluetooth/mgmt.h> 35 35 36 - #include "hci_request.h" 37 36 #include "hci_debugfs.h" 38 37 #include "hci_codec.h" 39 38 #include "smp.h" ··· 6986 6987 6987 6988 if (!pa_sync) 6988 6989 goto unlock; 6990 + 6991 + pa_sync->iso_qos.bcast.encryption = ev->encryption; 6989 6992 6990 6993 /* Notify iso layer */ 6991 6994 hci_connect_cfm(pa_sync, 0);
-903
net/bluetooth/hci_request.c
··· 1 - /* 2 - BlueZ - Bluetooth protocol stack for Linux 3 - 4 - Copyright (C) 2014 Intel Corporation 5 - 6 - This program is free software; you can redistribute it and/or modify 7 - it under the terms of the GNU General Public License version 2 as 8 - published by the Free Software Foundation; 9 - 10 - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 11 - OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 12 - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 13 - IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 14 - CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 15 - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 - 19 - ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 20 - COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 21 - SOFTWARE IS DISCLAIMED. 22 - */ 23 - 24 - #include <linux/sched/signal.h> 25 - 26 - #include <net/bluetooth/bluetooth.h> 27 - #include <net/bluetooth/hci_core.h> 28 - #include <net/bluetooth/mgmt.h> 29 - 30 - #include "smp.h" 31 - #include "hci_request.h" 32 - #include "msft.h" 33 - #include "eir.h" 34 - 35 - void hci_req_init(struct hci_request *req, struct hci_dev *hdev) 36 - { 37 - skb_queue_head_init(&req->cmd_q); 38 - req->hdev = hdev; 39 - req->err = 0; 40 - } 41 - 42 - void hci_req_purge(struct hci_request *req) 43 - { 44 - skb_queue_purge(&req->cmd_q); 45 - } 46 - 47 - bool hci_req_status_pend(struct hci_dev *hdev) 48 - { 49 - return hdev->req_status == HCI_REQ_PEND; 50 - } 51 - 52 - static int req_run(struct hci_request *req, hci_req_complete_t complete, 53 - hci_req_complete_skb_t complete_skb) 54 - { 55 - struct hci_dev *hdev = req->hdev; 56 - struct sk_buff *skb; 57 - unsigned long flags; 58 - 59 - bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); 60 - 61 - /* If an error occurred during request building, remove all HCI 62 - * commands queued on the HCI request queue. 63 - */ 64 - if (req->err) { 65 - skb_queue_purge(&req->cmd_q); 66 - return req->err; 67 - } 68 - 69 - /* Do not allow empty requests */ 70 - if (skb_queue_empty(&req->cmd_q)) 71 - return -ENODATA; 72 - 73 - skb = skb_peek_tail(&req->cmd_q); 74 - if (complete) { 75 - bt_cb(skb)->hci.req_complete = complete; 76 - } else if (complete_skb) { 77 - bt_cb(skb)->hci.req_complete_skb = complete_skb; 78 - bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; 79 - } 80 - 81 - spin_lock_irqsave(&hdev->cmd_q.lock, flags); 82 - skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); 83 - spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); 84 - 85 - queue_work(hdev->workqueue, &hdev->cmd_work); 86 - 87 - return 0; 88 - } 89 - 90 - int hci_req_run(struct hci_request *req, hci_req_complete_t complete) 91 - { 92 - return req_run(req, complete, NULL); 93 - } 94 - 95 - int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) 96 - { 97 - return req_run(req, NULL, complete); 98 - } 99 - 100 - void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, 101 - struct sk_buff *skb) 102 - { 103 - bt_dev_dbg(hdev, "result 0x%2.2x", result); 104 - 105 - if (hdev->req_status == HCI_REQ_PEND) { 106 - hdev->req_result = result; 107 - hdev->req_status = HCI_REQ_DONE; 108 - if (skb) { 109 - kfree_skb(hdev->req_skb); 110 - hdev->req_skb = skb_get(skb); 111 - } 112 - wake_up_interruptible(&hdev->req_wait_q); 113 - } 114 - } 115 - 116 - /* Execute request and wait for completion. */ 117 - int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, 118 - unsigned long opt), 119 - unsigned long opt, u32 timeout, u8 *hci_status) 120 - { 121 - struct hci_request req; 122 - int err = 0; 123 - 124 - bt_dev_dbg(hdev, "start"); 125 - 126 - hci_req_init(&req, hdev); 127 - 128 - hdev->req_status = HCI_REQ_PEND; 129 - 130 - err = func(&req, opt); 131 - if (err) { 132 - if (hci_status) 133 - *hci_status = HCI_ERROR_UNSPECIFIED; 134 - return err; 135 - } 136 - 137 - err = hci_req_run_skb(&req, hci_req_sync_complete); 138 - if (err < 0) { 139 - hdev->req_status = 0; 140 - 141 - /* ENODATA means the HCI request command queue is empty. 142 - * This can happen when a request with conditionals doesn't 143 - * trigger any commands to be sent. This is normal behavior 144 - * and should not trigger an error return. 145 - */ 146 - if (err == -ENODATA) { 147 - if (hci_status) 148 - *hci_status = 0; 149 - return 0; 150 - } 151 - 152 - if (hci_status) 153 - *hci_status = HCI_ERROR_UNSPECIFIED; 154 - 155 - return err; 156 - } 157 - 158 - err = wait_event_interruptible_timeout(hdev->req_wait_q, 159 - hdev->req_status != HCI_REQ_PEND, timeout); 160 - 161 - if (err == -ERESTARTSYS) 162 - return -EINTR; 163 - 164 - switch (hdev->req_status) { 165 - case HCI_REQ_DONE: 166 - err = -bt_to_errno(hdev->req_result); 167 - if (hci_status) 168 - *hci_status = hdev->req_result; 169 - break; 170 - 171 - case HCI_REQ_CANCELED: 172 - err = -hdev->req_result; 173 - if (hci_status) 174 - *hci_status = HCI_ERROR_UNSPECIFIED; 175 - break; 176 - 177 - default: 178 - err = -ETIMEDOUT; 179 - if (hci_status) 180 - *hci_status = HCI_ERROR_UNSPECIFIED; 181 - break; 182 - } 183 - 184 - kfree_skb(hdev->req_skb); 185 - hdev->req_skb = NULL; 186 - hdev->req_status = hdev->req_result = 0; 187 - 188 - bt_dev_dbg(hdev, "end: err %d", err); 189 - 190 - return err; 191 - } 192 - 193 - int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, 194 - unsigned long opt), 195 - unsigned long opt, u32 timeout, u8 *hci_status) 196 - { 197 - int ret; 198 - 199 - /* Serialize all requests */ 200 - hci_req_sync_lock(hdev); 201 - /* check the state after obtaing the lock to protect the HCI_UP 202 - * against any races from hci_dev_do_close when the controller 203 - * gets removed. 204 - */ 205 - if (test_bit(HCI_UP, &hdev->flags)) 206 - ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); 207 - else 208 - ret = -ENETDOWN; 209 - hci_req_sync_unlock(hdev); 210 - 211 - return ret; 212 - } 213 - 214 - struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, 215 - const void *param) 216 - { 217 - int len = HCI_COMMAND_HDR_SIZE + plen; 218 - struct hci_command_hdr *hdr; 219 - struct sk_buff *skb; 220 - 221 - skb = bt_skb_alloc(len, GFP_ATOMIC); 222 - if (!skb) 223 - return NULL; 224 - 225 - hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); 226 - hdr->opcode = cpu_to_le16(opcode); 227 - hdr->plen = plen; 228 - 229 - if (plen) 230 - skb_put_data(skb, param, plen); 231 - 232 - bt_dev_dbg(hdev, "skb len %d", skb->len); 233 - 234 - hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; 235 - hci_skb_opcode(skb) = opcode; 236 - 237 - return skb; 238 - } 239 - 240 - /* Queue a command to an asynchronous HCI request */ 241 - void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, 242 - const void *param, u8 event) 243 - { 244 - struct hci_dev *hdev = req->hdev; 245 - struct sk_buff *skb; 246 - 247 - bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); 248 - 249 - /* If an error occurred during request building, there is no point in 250 - * queueing the HCI command. We can simply return. 251 - */ 252 - if (req->err) 253 - return; 254 - 255 - skb = hci_prepare_cmd(hdev, opcode, plen, param); 256 - if (!skb) { 257 - bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", 258 - opcode); 259 - req->err = -ENOMEM; 260 - return; 261 - } 262 - 263 - if (skb_queue_empty(&req->cmd_q)) 264 - bt_cb(skb)->hci.req_flags |= HCI_REQ_START; 265 - 266 - hci_skb_event(skb) = event; 267 - 268 - skb_queue_tail(&req->cmd_q, skb); 269 - } 270 - 271 - void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, 272 - const void *param) 273 - { 274 - bt_dev_dbg(req->hdev, "HCI_REQ-0x%4.4x", opcode); 275 - hci_req_add_ev(req, opcode, plen, param, 0); 276 - } 277 - 278 - static void start_interleave_scan(struct hci_dev *hdev) 279 - { 280 - hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; 281 - queue_delayed_work(hdev->req_workqueue, 282 - &hdev->interleave_scan, 0); 283 - } 284 - 285 - static bool is_interleave_scanning(struct hci_dev *hdev) 286 - { 287 - return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; 288 - } 289 - 290 - static void cancel_interleave_scan(struct hci_dev *hdev) 291 - { 292 - bt_dev_dbg(hdev, "cancelling interleave scan"); 293 - 294 - cancel_delayed_work_sync(&hdev->interleave_scan); 295 - 296 - hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; 297 - } 298 - 299 - /* Return true if interleave_scan wasn't started until exiting this function, 300 - * otherwise, return false 301 - */ 302 - static bool __hci_update_interleaved_scan(struct hci_dev *hdev) 303 - { 304 - /* Do interleaved scan only if all of the following are true: 305 - * - There is at least one ADV monitor 306 - * - At least one pending LE connection or one device to be scanned for 307 - * - Monitor offloading is not supported 308 - * If so, we should alternate between allowlist scan and one without 309 - * any filters to save power. 310 - */ 311 - bool use_interleaving = hci_is_adv_monitoring(hdev) && 312 - !(list_empty(&hdev->pend_le_conns) && 313 - list_empty(&hdev->pend_le_reports)) && 314 - hci_get_adv_monitor_offload_ext(hdev) == 315 - HCI_ADV_MONITOR_EXT_NONE; 316 - bool is_interleaving = is_interleave_scanning(hdev); 317 - 318 - if (use_interleaving && !is_interleaving) { 319 - start_interleave_scan(hdev); 320 - bt_dev_dbg(hdev, "starting interleave scan"); 321 - return true; 322 - } 323 - 324 - if (!use_interleaving && is_interleaving) 325 - cancel_interleave_scan(hdev); 326 - 327 - return false; 328 - } 329 - 330 - void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn) 331 - { 332 - struct hci_dev *hdev = req->hdev; 333 - 334 - if (hdev->scanning_paused) { 335 - bt_dev_dbg(hdev, "Scanning is paused for suspend"); 336 - return; 337 - } 338 - 339 - if (use_ext_scan(hdev)) { 340 - struct hci_cp_le_set_ext_scan_enable cp; 341 - 342 - memset(&cp, 0, sizeof(cp)); 343 - cp.enable = LE_SCAN_DISABLE; 344 - hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp), 345 - &cp); 346 - } else { 347 - struct hci_cp_le_set_scan_enable cp; 348 - 349 - memset(&cp, 0, sizeof(cp)); 350 - cp.enable = LE_SCAN_DISABLE; 351 - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); 352 - } 353 - 354 - /* Disable address resolution */ 355 - if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) { 356 - __u8 enable = 0x00; 357 - 358 - hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); 359 - } 360 - } 361 - 362 - static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr, 363 - u8 bdaddr_type) 364 - { 365 - struct hci_cp_le_del_from_accept_list cp; 366 - 367 - cp.bdaddr_type = bdaddr_type; 368 - bacpy(&cp.bdaddr, bdaddr); 369 - 370 - bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr, 371 - cp.bdaddr_type); 372 - hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp); 373 - 374 - if (use_ll_privacy(req->hdev)) { 375 - struct smp_irk *irk; 376 - 377 - irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); 378 - if (irk) { 379 - struct hci_cp_le_del_from_resolv_list cp; 380 - 381 - cp.bdaddr_type = bdaddr_type; 382 - bacpy(&cp.bdaddr, bdaddr); 383 - 384 - hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST, 385 - sizeof(cp), &cp); 386 - } 387 - } 388 - } 389 - 390 - /* Adds connection to accept list if needed. On error, returns -1. */ 391 - static int add_to_accept_list(struct hci_request *req, 392 - struct hci_conn_params *params, u8 *num_entries, 393 - bool allow_rpa) 394 - { 395 - struct hci_cp_le_add_to_accept_list cp; 396 - struct hci_dev *hdev = req->hdev; 397 - 398 - /* Already in accept list */ 399 - if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr, 400 - params->addr_type)) 401 - return 0; 402 - 403 - /* Select filter policy to accept all advertising */ 404 - if (*num_entries >= hdev->le_accept_list_size) 405 - return -1; 406 - 407 - /* Accept list can not be used with RPAs */ 408 - if (!allow_rpa && 409 - !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && 410 - hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) { 411 - return -1; 412 - } 413 - 414 - /* During suspend, only wakeable devices can be in accept list */ 415 - if (hdev->suspended && 416 - !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) 417 - return 0; 418 - 419 - *num_entries += 1; 420 - cp.bdaddr_type = params->addr_type; 421 - bacpy(&cp.bdaddr, &params->addr); 422 - 423 - bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr, 424 - cp.bdaddr_type); 425 - hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp); 426 - 427 - if (use_ll_privacy(hdev)) { 428 - struct smp_irk *irk; 429 - 430 - irk = hci_find_irk_by_addr(hdev, &params->addr, 431 - params->addr_type); 432 - if (irk) { 433 - struct hci_cp_le_add_to_resolv_list cp; 434 - 435 - cp.bdaddr_type = params->addr_type; 436 - bacpy(&cp.bdaddr, &params->addr); 437 - memcpy(cp.peer_irk, irk->val, 16); 438 - 439 - if (hci_dev_test_flag(hdev, HCI_PRIVACY)) 440 - memcpy(cp.local_irk, hdev->irk, 16); 441 - else 442 - memset(cp.local_irk, 0, 16); 443 - 444 - hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST, 445 - sizeof(cp), &cp); 446 - } 447 - } 448 - 449 - return 0; 450 - } 451 - 452 - static u8 update_accept_list(struct hci_request *req) 453 - { 454 - struct hci_dev *hdev = req->hdev; 455 - struct hci_conn_params *params; 456 - struct bdaddr_list *b; 457 - u8 num_entries = 0; 458 - bool pend_conn, pend_report; 459 - /* We allow usage of accept list even with RPAs in suspend. In the worst 460 - * case, we won't be able to wake from devices that use the privacy1.2 461 - * features. Additionally, once we support privacy1.2 and IRK 462 - * offloading, we can update this to also check for those conditions. 463 - */ 464 - bool allow_rpa = hdev->suspended; 465 - 466 - if (use_ll_privacy(hdev)) 467 - allow_rpa = true; 468 - 469 - /* Go through the current accept list programmed into the 470 - * controller one by one and check if that address is still 471 - * in the list of pending connections or list of devices to 472 - * report. If not present in either list, then queue the 473 - * command to remove it from the controller. 474 - */ 475 - list_for_each_entry(b, &hdev->le_accept_list, list) { 476 - pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, 477 - &b->bdaddr, 478 - b->bdaddr_type); 479 - pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, 480 - &b->bdaddr, 481 - b->bdaddr_type); 482 - 483 - /* If the device is not likely to connect or report, 484 - * remove it from the accept list. 485 - */ 486 - if (!pend_conn && !pend_report) { 487 - del_from_accept_list(req, &b->bdaddr, b->bdaddr_type); 488 - continue; 489 - } 490 - 491 - /* Accept list can not be used with RPAs */ 492 - if (!allow_rpa && 493 - !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && 494 - hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { 495 - return 0x00; 496 - } 497 - 498 - num_entries++; 499 - } 500 - 501 - /* Since all no longer valid accept list entries have been 502 - * removed, walk through the list of pending connections 503 - * and ensure that any new device gets programmed into 504 - * the controller. 505 - * 506 - * If the list of the devices is larger than the list of 507 - * available accept list entries in the controller, then 508 - * just abort and return filer policy value to not use the 509 - * accept list. 510 - */ 511 - list_for_each_entry(params, &hdev->pend_le_conns, action) { 512 - if (add_to_accept_list(req, params, &num_entries, allow_rpa)) 513 - return 0x00; 514 - } 515 - 516 - /* After adding all new pending connections, walk through 517 - * the list of pending reports and also add these to the 518 - * accept list if there is still space. Abort if space runs out. 519 - */ 520 - list_for_each_entry(params, &hdev->pend_le_reports, action) { 521 - if (add_to_accept_list(req, params, &num_entries, allow_rpa)) 522 - return 0x00; 523 - } 524 - 525 - /* Use the allowlist unless the following conditions are all true: 526 - * - We are not currently suspending 527 - * - There are 1 or more ADV monitors registered and it's not offloaded 528 - * - Interleaved scanning is not currently using the allowlist 529 - */ 530 - if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && 531 - hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && 532 - hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) 533 - return 0x00; 534 - 535 - /* Select filter policy to use accept list */ 536 - return 0x01; 537 - } 538 - 539 - static bool scan_use_rpa(struct hci_dev *hdev) 540 - { 541 - return hci_dev_test_flag(hdev, HCI_PRIVACY); 542 - } 543 - 544 - static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, 545 - u16 window, u8 own_addr_type, u8 filter_policy, 546 - bool filter_dup, bool addr_resolv) 547 - { 548 - struct hci_dev *hdev = req->hdev; 549 - 550 - if (hdev->scanning_paused) { 551 - bt_dev_dbg(hdev, "Scanning is paused for suspend"); 552 - return; 553 - } 554 - 555 - if (use_ll_privacy(hdev) && addr_resolv) { 556 - u8 enable = 0x01; 557 - 558 - hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); 559 - } 560 - 561 - /* Use ext scanning if set ext scan param and ext scan enable is 562 - * supported 563 - */ 564 - if (use_ext_scan(hdev)) { 565 - struct hci_cp_le_set_ext_scan_params *ext_param_cp; 566 - struct hci_cp_le_set_ext_scan_enable ext_enable_cp; 567 - struct hci_cp_le_scan_phy_params *phy_params; 568 - u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2]; 569 - u32 plen; 570 - 571 - ext_param_cp = (void *)data; 572 - phy_params = (void *)ext_param_cp->data; 573 - 574 - memset(ext_param_cp, 0, sizeof(*ext_param_cp)); 575 - ext_param_cp->own_addr_type = own_addr_type; 576 - ext_param_cp->filter_policy = filter_policy; 577 - 578 - plen = sizeof(*ext_param_cp); 579 - 580 - if (scan_1m(hdev) || scan_2m(hdev)) { 581 - ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M; 582 - 583 - memset(phy_params, 0, sizeof(*phy_params)); 584 - phy_params->type = type; 585 - phy_params->interval = cpu_to_le16(interval); 586 - phy_params->window = cpu_to_le16(window); 587 - 588 - plen += sizeof(*phy_params); 589 - phy_params++; 590 - } 591 - 592 - if (scan_coded(hdev)) { 593 - ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED; 594 - 595 - memset(phy_params, 0, sizeof(*phy_params)); 596 - phy_params->type = type; 597 - phy_params->interval = cpu_to_le16(interval); 598 - phy_params->window = cpu_to_le16(window); 599 - 600 - plen += sizeof(*phy_params); 601 - phy_params++; 602 - } 603 - 604 - hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS, 605 - plen, ext_param_cp); 606 - 607 - memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); 608 - ext_enable_cp.enable = LE_SCAN_ENABLE; 609 - ext_enable_cp.filter_dup = filter_dup; 610 - 611 - hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, 612 - sizeof(ext_enable_cp), &ext_enable_cp); 613 - } else { 614 - struct hci_cp_le_set_scan_param param_cp; 615 - struct hci_cp_le_set_scan_enable enable_cp; 616 - 617 - memset(&param_cp, 0, sizeof(param_cp)); 618 - param_cp.type = type; 619 - param_cp.interval = cpu_to_le16(interval); 620 - param_cp.window = cpu_to_le16(window); 621 - param_cp.own_address_type = own_addr_type; 622 - param_cp.filter_policy = filter_policy; 623 - hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), 624 - &param_cp); 625 - 626 - memset(&enable_cp, 0, sizeof(enable_cp)); 627 - enable_cp.enable = LE_SCAN_ENABLE; 628 - enable_cp.filter_dup = filter_dup; 629 - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), 630 - &enable_cp); 631 - } 632 - } 633 - 634 - static void set_random_addr(struct hci_request *req, bdaddr_t *rpa); 635 - static int hci_update_random_address(struct hci_request *req, 636 - bool require_privacy, bool use_rpa, 637 - u8 *own_addr_type) 638 - { 639 - struct hci_dev *hdev = req->hdev; 640 - int err; 641 - 642 - /* If privacy is enabled use a resolvable private address. If 643 - * current RPA has expired or there is something else than 644 - * the current RPA in use, then generate a new one. 645 - */ 646 - if (use_rpa) { 647 - /* If Controller supports LL Privacy use own address type is 648 - * 0x03 649 - */ 650 - if (use_ll_privacy(hdev)) 651 - *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; 652 - else 653 - *own_addr_type = ADDR_LE_DEV_RANDOM; 654 - 655 - if (rpa_valid(hdev)) 656 - return 0; 657 - 658 - err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); 659 - if (err < 0) { 660 - bt_dev_err(hdev, "failed to generate new RPA"); 661 - return err; 662 - } 663 - 664 - set_random_addr(req, &hdev->rpa); 665 - 666 - return 0; 667 - } 668 - 669 - /* In case of required privacy without resolvable private address, 670 - * use an non-resolvable private address. This is useful for active 671 - * scanning and non-connectable advertising. 672 - */ 673 - if (require_privacy) { 674 - bdaddr_t nrpa; 675 - 676 - while (true) { 677 - /* The non-resolvable private address is generated 678 - * from random six bytes with the two most significant 679 - * bits cleared. 680 - */ 681 - get_random_bytes(&nrpa, 6); 682 - nrpa.b[5] &= 0x3f; 683 - 684 - /* The non-resolvable private address shall not be 685 - * equal to the public address. 686 - */ 687 - if (bacmp(&hdev->bdaddr, &nrpa)) 688 - break; 689 - } 690 - 691 - *own_addr_type = ADDR_LE_DEV_RANDOM; 692 - set_random_addr(req, &nrpa); 693 - return 0; 694 - } 695 - 696 - /* If forcing static address is in use or there is no public 697 - * address use the static address as random address (but skip 698 - * the HCI command if the current random address is already the 699 - * static one. 700 - * 701 - * In case BR/EDR has been disabled on a dual-mode controller 702 - * and a static address has been configured, then use that 703 - * address instead of the public BR/EDR address. 704 - */ 705 - if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || 706 - !bacmp(&hdev->bdaddr, BDADDR_ANY) || 707 - (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && 708 - bacmp(&hdev->static_addr, BDADDR_ANY))) { 709 - *own_addr_type = ADDR_LE_DEV_RANDOM; 710 - if (bacmp(&hdev->static_addr, &hdev->random_addr)) 711 - hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, 712 - &hdev->static_addr); 713 - return 0; 714 - } 715 - 716 - /* Neither privacy nor static address is being used so use a 717 - * public address. 718 - */ 719 - *own_addr_type = ADDR_LE_DEV_PUBLIC; 720 - 721 - return 0; 722 - } 723 - 724 - /* Ensure to call hci_req_add_le_scan_disable() first to disable the 725 - * controller based address resolution to be able to reconfigure 726 - * resolving list. 727 - */ 728 - void hci_req_add_le_passive_scan(struct hci_request *req) 729 - { 730 - struct hci_dev *hdev = req->hdev; 731 - u8 own_addr_type; 732 - u8 filter_policy; 733 - u16 window, interval; 734 - /* Default is to enable duplicates filter */ 735 - u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; 736 - /* Background scanning should run with address resolution */ 737 - bool addr_resolv = true; 738 - 739 - if (hdev->scanning_paused) { 740 - bt_dev_dbg(hdev, "Scanning is paused for suspend"); 741 - return; 742 - } 743 - 744 - /* Set require_privacy to false since no SCAN_REQ are send 745 - * during passive scanning. Not using an non-resolvable address 746 - * here is important so that peer devices using direct 747 - * advertising with our address will be correctly reported 748 - * by the controller. 749 - */ 750 - if (hci_update_random_address(req, false, scan_use_rpa(hdev), 751 - &own_addr_type)) 752 - return; 753 - 754 - if (hdev->enable_advmon_interleave_scan && 755 - __hci_update_interleaved_scan(hdev)) 756 - return; 757 - 758 - bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); 759 - /* Adding or removing entries from the accept list must 760 - * happen before enabling scanning. The controller does 761 - * not allow accept list modification while scanning. 762 - */ 763 - filter_policy = update_accept_list(req); 764 - 765 - /* When the controller is using random resolvable addresses and 766 - * with that having LE privacy enabled, then controllers with 767 - * Extended Scanner Filter Policies support can now enable support 768 - * for handling directed advertising. 769 - * 770 - * So instead of using filter polices 0x00 (no accept list) 771 - * and 0x01 (accept list enabled) use the new filter policies 772 - * 0x02 (no accept list) and 0x03 (accept list enabled). 773 - */ 774 - if (hci_dev_test_flag(hdev, HCI_PRIVACY) && 775 - (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) 776 - filter_policy |= 0x02; 777 - 778 - if (hdev->suspended) { 779 - window = hdev->le_scan_window_suspend; 780 - interval = hdev->le_scan_int_suspend; 781 - } else if (hci_is_le_conn_scanning(hdev)) { 782 - window = hdev->le_scan_window_connect; 783 - interval = hdev->le_scan_int_connect; 784 - } else if (hci_is_adv_monitoring(hdev)) { 785 - window = hdev->le_scan_window_adv_monitor; 786 - interval = hdev->le_scan_int_adv_monitor; 787 - 788 - /* Disable duplicates filter when scanning for advertisement 789 - * monitor for the following reasons. 790 - * 791 - * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm 792 - * controllers ignore RSSI_Sampling_Period when the duplicates 793 - * filter is enabled. 794 - * 795 - * For SW pattern filtering, when we're not doing interleaved 796 - * scanning, it is necessary to disable duplicates filter, 797 - * otherwise hosts can only receive one advertisement and it's 798 - * impossible to know if a peer is still in range. 799 - */ 800 - filter_dup = LE_SCAN_FILTER_DUP_DISABLE; 801 - } else { 802 - window = hdev->le_scan_window; 803 - interval = hdev->le_scan_interval; 804 - } 805 - 806 - bt_dev_dbg(hdev, "LE passive scan with accept list = %d", 807 - filter_policy); 808 - hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window, 809 - own_addr_type, filter_policy, filter_dup, 810 - addr_resolv); 811 - } 812 - 813 - static int hci_req_add_le_interleaved_scan(struct hci_request *req, 814 - unsigned long opt) 815 - { 816 - struct hci_dev *hdev = req->hdev; 817 - int ret = 0; 818 - 819 - hci_dev_lock(hdev); 820 - 821 - if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) 822 - hci_req_add_le_scan_disable(req, false); 823 - hci_req_add_le_passive_scan(req); 824 - 825 - switch (hdev->interleave_scan_state) { 826 - case INTERLEAVE_SCAN_ALLOWLIST: 827 - bt_dev_dbg(hdev, "next state: allowlist"); 828 - hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; 829 - break; 830 - case INTERLEAVE_SCAN_NO_FILTER: 831 - bt_dev_dbg(hdev, "next state: no filter"); 832 - hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; 833 - break; 834 - case INTERLEAVE_SCAN_NONE: 835 - BT_ERR("unexpected error"); 836 - ret = -1; 837 - } 838 - 839 - hci_dev_unlock(hdev); 840 - 841 - return ret; 842 - } 843 - 844 - static void interleave_scan_work(struct work_struct *work) 845 - { 846 - struct hci_dev *hdev = container_of(work, struct hci_dev, 847 - interleave_scan.work); 848 - u8 status; 849 - unsigned long timeout; 850 - 851 - if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { 852 - timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); 853 - } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { 854 - timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); 855 - } else { 856 - bt_dev_err(hdev, "unexpected error"); 857 - return; 858 - } 859 - 860 - hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0, 861 - HCI_CMD_TIMEOUT, &status); 862 - 863 - /* Don't continue interleaving if it was canceled */ 864 - if (is_interleave_scanning(hdev)) 865 - queue_delayed_work(hdev->req_workqueue, 866 - &hdev->interleave_scan, timeout); 867 - } 868 - 869 - static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) 870 - { 871 - struct hci_dev *hdev = req->hdev; 872 - 873 - /* If we're advertising or initiating an LE connection we can't 874 - * go ahead and change the random address at this time. This is 875 - * because the eventual initiator address used for the 876 - * subsequently created connection will be undefined (some 877 - * controllers use the new address and others the one we had 878 - * when the operation started). 879 - * 880 - * In this kind of scenario skip the update and let the random 881 - * address be updated at the next cycle. 882 - */ 883 - if (hci_dev_test_flag(hdev, HCI_LE_ADV) || 884 - hci_lookup_le_connect(hdev)) { 885 - bt_dev_dbg(hdev, "Deferring random address update"); 886 - hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 887 - return; 888 - } 889 - 890 - hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); 891 - } 892 - 893 - void hci_request_setup(struct hci_dev *hdev) 894 - { 895 - INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); 896 - } 897 - 898 - void hci_request_cancel_all(struct hci_dev *hdev) 899 - { 900 - hci_cmd_sync_cancel_sync(hdev, ENODEV); 901 - 902 - cancel_interleave_scan(hdev); 903 - }
-71
net/bluetooth/hci_request.h
··· 1 - /* 2 - BlueZ - Bluetooth protocol stack for Linux 3 - Copyright (C) 2014 Intel Corporation 4 - 5 - This program is free software; you can redistribute it and/or modify 6 - it under the terms of the GNU General Public License version 2 as 7 - published by the Free Software Foundation; 8 - 9 - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 10 - OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 11 - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 12 - IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 13 - CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 14 - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 - 18 - ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 19 - COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 20 - SOFTWARE IS DISCLAIMED. 21 - */ 22 - 23 - #include <asm/unaligned.h> 24 - 25 - #define HCI_REQ_DONE 0 26 - #define HCI_REQ_PEND 1 27 - #define HCI_REQ_CANCELED 2 28 - 29 - #define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock) 30 - #define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock) 31 - 32 - struct hci_request { 33 - struct hci_dev *hdev; 34 - struct sk_buff_head cmd_q; 35 - 36 - /* If something goes wrong when building the HCI request, the error 37 - * value is stored in this field. 38 - */ 39 - int err; 40 - }; 41 - 42 - void hci_req_init(struct hci_request *req, struct hci_dev *hdev); 43 - void hci_req_purge(struct hci_request *req); 44 - bool hci_req_status_pend(struct hci_dev *hdev); 45 - int hci_req_run(struct hci_request *req, hci_req_complete_t complete); 46 - int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete); 47 - void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, 48 - struct sk_buff *skb); 49 - void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, 50 - const void *param); 51 - void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, 52 - const void *param, u8 event); 53 - void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, 54 - hci_req_complete_t *req_complete, 55 - hci_req_complete_skb_t *req_complete_skb); 56 - 57 - int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, 58 - unsigned long opt), 59 - unsigned long opt, u32 timeout, u8 *hci_status); 60 - int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, 61 - unsigned long opt), 62 - unsigned long opt, u32 timeout, u8 *hci_status); 63 - 64 - struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, 65 - const void *param); 66 - 67 - void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn); 68 - void hci_req_add_le_passive_scan(struct hci_request *req); 69 - 70 - void hci_request_setup(struct hci_dev *hdev); 71 - void hci_request_cancel_all(struct hci_dev *hdev);
+84 -19
net/bluetooth/hci_sync.c
··· 12 12 #include <net/bluetooth/hci_core.h> 13 13 #include <net/bluetooth/mgmt.h> 14 14 15 - #include "hci_request.h" 16 15 #include "hci_codec.h" 17 16 #include "hci_debugfs.h" 18 17 #include "smp.h" ··· 48 49 wake_up_interruptible(&hdev->req_wait_q); 49 50 } 50 51 51 - static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, 52 - u32 plen, const void *param, 53 - struct sock *sk) 52 + struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen, 53 + const void *param, struct sock *sk) 54 54 { 55 55 int len = HCI_COMMAND_HDR_SIZE + plen; 56 56 struct hci_command_hdr *hdr; ··· 145 147 return 0; 146 148 } 147 149 150 + static void hci_request_init(struct hci_request *req, struct hci_dev *hdev) 151 + { 152 + skb_queue_head_init(&req->cmd_q); 153 + req->hdev = hdev; 154 + req->err = 0; 155 + } 156 + 148 157 /* This function requires the caller holds hdev->req_lock. */ 149 158 struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, 150 159 const void *param, u8 event, u32 timeout, ··· 163 158 164 159 bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode); 165 160 166 - hci_req_init(&req, hdev); 161 + hci_request_init(&req, hdev); 167 162 168 163 hci_cmd_sync_add(&req, opcode, plen, param, event, sk); 169 164 ··· 352 347 return hci_scan_disable_sync(hdev); 353 348 } 354 349 355 - static int hci_inquiry_sync(struct hci_dev *hdev, u8 length); 356 350 static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data) 357 351 { 358 - return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN); 352 + return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, 0); 359 353 } 360 354 361 355 static void le_scan_disable(struct work_struct *work) ··· 374 370 bt_dev_err(hdev, "failed to disable LE scan: %d", status); 375 371 goto _return; 376 372 } 377 - 378 - hdev->discovery.scan_start = 0; 379 373 380 374 /* If we were running LE only scan, change discovery state. If 381 375 * we were running both LE and BR/EDR inquiry simultaneously, ··· 572 570 hci_dev_unlock(hdev); 573 571 } 574 572 573 + static bool is_interleave_scanning(struct hci_dev *hdev) 574 + { 575 + return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; 576 + } 577 + 578 + static int hci_passive_scan_sync(struct hci_dev *hdev); 579 + 580 + static void interleave_scan_work(struct work_struct *work) 581 + { 582 + struct hci_dev *hdev = container_of(work, struct hci_dev, 583 + interleave_scan.work); 584 + unsigned long timeout; 585 + 586 + if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { 587 + timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); 588 + } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { 589 + timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); 590 + } else { 591 + bt_dev_err(hdev, "unexpected error"); 592 + return; 593 + } 594 + 595 + hci_passive_scan_sync(hdev); 596 + 597 + hci_dev_lock(hdev); 598 + 599 + switch (hdev->interleave_scan_state) { 600 + case INTERLEAVE_SCAN_ALLOWLIST: 601 + bt_dev_dbg(hdev, "next state: allowlist"); 602 + hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; 603 + break; 604 + case INTERLEAVE_SCAN_NO_FILTER: 605 + bt_dev_dbg(hdev, "next state: no filter"); 606 + hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; 607 + break; 608 + case INTERLEAVE_SCAN_NONE: 609 + bt_dev_err(hdev, "unexpected error"); 610 + } 611 + 612 + hci_dev_unlock(hdev); 613 + 614 + /* Don't continue interleaving if it was canceled */ 615 + if (is_interleave_scanning(hdev)) 616 + queue_delayed_work(hdev->req_workqueue, 617 + &hdev->interleave_scan, timeout); 618 + } 619 + 575 620 void hci_cmd_sync_init(struct hci_dev *hdev) 576 621 { 577 622 INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); ··· 630 581 INIT_WORK(&hdev->reenable_adv_work, reenable_adv); 631 582 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable); 632 583 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); 584 + INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); 633 585 } 634 586 635 587 static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev, ··· 2162 2112 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; 2163 2113 queue_delayed_work(hdev->req_workqueue, 2164 2114 &hdev->interleave_scan, 0); 2165 - } 2166 - 2167 - static bool is_interleave_scanning(struct hci_dev *hdev) 2168 - { 2169 - return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; 2170 2115 } 2171 2116 2172 2117 static void cancel_interleave_scan(struct hci_dev *hdev) ··· 5062 5017 cancel_delayed_work(&hdev->ncmd_timer); 5063 5018 cancel_delayed_work(&hdev->le_scan_disable); 5064 5019 5065 - hci_request_cancel_all(hdev); 5020 + hci_cmd_sync_cancel_sync(hdev, ENODEV); 5021 + 5022 + cancel_interleave_scan(hdev); 5066 5023 5067 5024 if (hdev->adv_instance_timeout) { 5068 5025 cancel_delayed_work_sync(&hdev->adv_instance_expire); ··· 5711 5664 return hci_update_passive_scan_sync(hdev); 5712 5665 } 5713 5666 5714 - static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) 5667 + int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp) 5715 5668 { 5716 5669 const u8 giac[3] = { 0x33, 0x8b, 0x9e }; 5717 5670 const u8 liac[3] = { 0x00, 0x8b, 0x9e }; ··· 5734 5687 memcpy(&cp.lap, giac, sizeof(cp.lap)); 5735 5688 5736 5689 cp.length = length; 5690 + cp.num_rsp = num_rsp; 5737 5691 5738 5692 return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY, 5739 5693 sizeof(cp), &cp, HCI_CMD_TIMEOUT); ··· 5821 5773 if (err) 5822 5774 return err; 5823 5775 5824 - return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); 5776 + return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0); 5825 5777 } 5826 5778 5827 5779 int hci_start_discovery_sync(struct hci_dev *hdev) ··· 5833 5785 5834 5786 switch (hdev->discovery.type) { 5835 5787 case DISCOV_TYPE_BREDR: 5836 - return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); 5788 + return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0); 5837 5789 case DISCOV_TYPE_INTERLEAVED: 5838 5790 /* When running simultaneous discovery, the LE scanning time 5839 5791 * should occupy the whole discovery time sine BR/EDR inquiry ··· 5903 5855 return err; 5904 5856 5905 5857 hdev->discovery_paused = true; 5906 - hdev->discovery_old_state = old_state; 5907 5858 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 5908 5859 5909 5860 return 0; ··· 6770 6723 } 6771 6724 6772 6725 return -ENOENT; 6726 + } 6727 + 6728 + int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn, 6729 + struct hci_conn_params *params) 6730 + { 6731 + struct hci_cp_le_conn_update cp; 6732 + 6733 + memset(&cp, 0, sizeof(cp)); 6734 + cp.handle = cpu_to_le16(conn->handle); 6735 + cp.conn_interval_min = cpu_to_le16(params->conn_min_interval); 6736 + cp.conn_interval_max = cpu_to_le16(params->conn_max_interval); 6737 + cp.conn_latency = cpu_to_le16(params->conn_latency); 6738 + cp.supervision_timeout = cpu_to_le16(params->supervision_timeout); 6739 + cp.min_ce_len = cpu_to_le16(0x0000); 6740 + cp.max_ce_len = cpu_to_le16(0x0000); 6741 + 6742 + return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE, 6743 + sizeof(cp), &cp, HCI_CMD_TIMEOUT); 6773 6744 }
-5
net/bluetooth/iso.c
··· 1720 1720 release_sock(sk); 1721 1721 } 1722 1722 1723 - struct iso_list_data { 1724 - struct hci_conn *hcon; 1725 - int count; 1726 - }; 1727 - 1728 1723 static bool iso_match_big(struct sock *sk, void *data) 1729 1724 { 1730 1725 struct hci_evt_le_big_sync_estabilished *ev = data;
+48 -3
net/bluetooth/mgmt.c
··· 33 33 #include <net/bluetooth/l2cap.h> 34 34 #include <net/bluetooth/mgmt.h> 35 35 36 - #include "hci_request.h" 37 36 #include "smp.h" 38 37 #include "mgmt_util.h" 39 38 #include "mgmt_config.h" ··· 41 42 #include "aosp.h" 42 43 43 44 #define MGMT_VERSION 1 44 - #define MGMT_REVISION 22 45 + #define MGMT_REVISION 23 45 46 46 47 static const u16 mgmt_commands[] = { 47 48 MGMT_OP_READ_INDEX_LIST, ··· 7812 7813 return err; 7813 7814 } 7814 7815 7816 + static int conn_update_sync(struct hci_dev *hdev, void *data) 7817 + { 7818 + struct hci_conn_params *params = data; 7819 + struct hci_conn *conn; 7820 + 7821 + conn = hci_conn_hash_lookup_le(hdev, &params->addr, params->addr_type); 7822 + if (!conn) 7823 + return -ECANCELED; 7824 + 7825 + return hci_le_conn_update_sync(hdev, conn, params); 7826 + } 7827 + 7815 7828 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, 7816 7829 u16 len) 7817 7830 { ··· 7857 7846 7858 7847 hci_dev_lock(hdev); 7859 7848 7860 - hci_conn_params_clear_disabled(hdev); 7849 + if (param_count > 1) 7850 + hci_conn_params_clear_disabled(hdev); 7861 7851 7862 7852 for (i = 0; i < param_count; i++) { 7863 7853 struct mgmt_conn_param *param = &cp->params[i]; 7864 7854 struct hci_conn_params *hci_param; 7865 7855 u16 min, max, latency, timeout; 7856 + bool update = false; 7866 7857 u8 addr_type; 7867 7858 7868 7859 bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr, ··· 7892 7879 continue; 7893 7880 } 7894 7881 7882 + /* Detect when the loading is for an existing parameter then 7883 + * attempt to trigger the connection update procedure. 7884 + */ 7885 + if (!i && param_count == 1) { 7886 + hci_param = hci_conn_params_lookup(hdev, 7887 + &param->addr.bdaddr, 7888 + addr_type); 7889 + if (hci_param) 7890 + update = true; 7891 + else 7892 + hci_conn_params_clear_disabled(hdev); 7893 + } 7894 + 7895 7895 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr, 7896 7896 addr_type); 7897 7897 if (!hci_param) { ··· 7916 7890 hci_param->conn_max_interval = max; 7917 7891 hci_param->conn_latency = latency; 7918 7892 hci_param->supervision_timeout = timeout; 7893 + 7894 + /* Check if we need to trigger a connection update */ 7895 + if (update) { 7896 + struct hci_conn *conn; 7897 + 7898 + /* Lookup for existing connection as central and check 7899 + * if parameters match and if they don't then trigger 7900 + * a connection update. 7901 + */ 7902 + conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr, 7903 + addr_type); 7904 + if (conn && conn->role == HCI_ROLE_MASTER && 7905 + (conn->le_conn_min_interval != min || 7906 + conn->le_conn_max_interval != max || 7907 + conn->le_conn_latency != latency || 7908 + conn->le_supv_timeout != timeout)) 7909 + hci_cmd_sync_queue(hdev, conn_update_sync, 7910 + hci_param, NULL); 7911 + } 7919 7912 } 7920 7913 7921 7914 hci_dev_unlock(hdev);
-1
net/bluetooth/msft.c
··· 7 7 #include <net/bluetooth/hci_core.h> 8 8 #include <net/bluetooth/mgmt.h> 9 9 10 - #include "hci_request.h" 11 10 #include "mgmt_util.h" 12 11 #include "msft.h" 13 12
+10 -13
net/bluetooth/rfcomm/tty.c
··· 504 504 struct rfcomm_dev *dev; 505 505 struct rfcomm_dev_list_req *dl; 506 506 struct rfcomm_dev_info *di; 507 - int n = 0, size, err; 507 + int n = 0, err; 508 508 u16 dev_num; 509 509 510 510 BT_DBG(""); ··· 515 515 if (!dev_num || dev_num > (PAGE_SIZE * 4) / sizeof(*di)) 516 516 return -EINVAL; 517 517 518 - size = sizeof(*dl) + dev_num * sizeof(*di); 519 - 520 - dl = kzalloc(size, GFP_KERNEL); 518 + dl = kzalloc(struct_size(dl, dev_info, dev_num), GFP_KERNEL); 521 519 if (!dl) 522 520 return -ENOMEM; 523 521 522 + dl->dev_num = dev_num; 524 523 di = dl->dev_info; 525 524 526 525 mutex_lock(&rfcomm_dev_lock); ··· 527 528 list_for_each_entry(dev, &rfcomm_dev_list, list) { 528 529 if (!tty_port_get(&dev->port)) 529 530 continue; 530 - (di + n)->id = dev->id; 531 - (di + n)->flags = dev->flags; 532 - (di + n)->state = dev->dlc->state; 533 - (di + n)->channel = dev->channel; 534 - bacpy(&(di + n)->src, &dev->src); 535 - bacpy(&(di + n)->dst, &dev->dst); 531 + di[n].id = dev->id; 532 + di[n].flags = dev->flags; 533 + di[n].state = dev->dlc->state; 534 + di[n].channel = dev->channel; 535 + bacpy(&di[n].src, &dev->src); 536 + bacpy(&di[n].dst, &dev->dst); 536 537 tty_port_put(&dev->port); 537 538 if (++n >= dev_num) 538 539 break; ··· 541 542 mutex_unlock(&rfcomm_dev_lock); 542 543 543 544 dl->dev_num = n; 544 - size = sizeof(*dl) + n * sizeof(*di); 545 - 546 - err = copy_to_user(arg, dl, size); 545 + err = copy_to_user(arg, dl, struct_size(dl, dev_info, n)); 547 546 kfree(dl); 548 547 549 548 return err ? -EFAULT : 0;