Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/holtmann/bluetooth-next-2.6

+4190 -193
+2
Documentation/00-INDEX
··· 82 82 - info on the Block I/O (BIO) layer. 83 83 blockdev/ 84 84 - info on block devices & drivers 85 + btmrvl.txt 86 + - info on Marvell Bluetooth driver usage. 85 87 cachetlb.txt 86 88 - describes the cache/TLB flushing interfaces Linux uses. 87 89 cdrom/
+119
Documentation/btmrvl.txt
··· 1 + ======================================================================= 2 + README for btmrvl driver 3 + ======================================================================= 4 + 5 + 6 + All commands are used via debugfs interface. 7 + 8 + ===================== 9 + Set/get driver configurations: 10 + 11 + Path: /debug/btmrvl/config/ 12 + 13 + gpiogap=[n] 14 + hscfgcmd 15 + These commands are used to configure the host sleep parameters. 16 + bit 8:0 -- Gap 17 + bit 16:8 -- GPIO 18 + 19 + where GPIO is the pin number of GPIO used to wake up the host. 20 + It could be any valid GPIO pin# (e.g. 0-7) or 0xff (SDIO interface 21 + wakeup will be used instead). 22 + 23 + where Gap is the gap in milli seconds between wakeup signal and 24 + wakeup event, or 0xff for special host sleep setting. 25 + 26 + Usage: 27 + # Use SDIO interface to wake up the host and set GAP to 0x80: 28 + echo 0xff80 > /debug/btmrvl/config/gpiogap 29 + echo 1 > /debug/btmrvl/config/hscfgcmd 30 + 31 + # Use GPIO pin #3 to wake up the host and set GAP to 0xff: 32 + echo 0x03ff > /debug/btmrvl/config/gpiogap 33 + echo 1 > /debug/btmrvl/config/hscfgcmd 34 + 35 + psmode=[n] 36 + pscmd 37 + These commands are used to enable/disable auto sleep mode 38 + 39 + where the option is: 40 + 1 -- Enable auto sleep mode 41 + 0 -- Disable auto sleep mode 42 + 43 + Usage: 44 + # Enable auto sleep mode 45 + echo 1 > /debug/btmrvl/config/psmode 46 + echo 1 > /debug/btmrvl/config/pscmd 47 + 48 + # Disable auto sleep mode 49 + echo 0 > /debug/btmrvl/config/psmode 50 + echo 1 > /debug/btmrvl/config/pscmd 51 + 52 + 53 + hsmode=[n] 54 + hscmd 55 + These commands are used to enable host sleep or wake up firmware 56 + 57 + where the option is: 58 + 1 -- Enable host sleep 59 + 0 -- Wake up firmware 60 + 61 + Usage: 62 + # Enable host sleep 63 + echo 1 > /debug/btmrvl/config/hsmode 64 + echo 1 > /debug/btmrvl/config/hscmd 65 + 66 + # Wake up firmware 67 + echo 0 > /debug/btmrvl/config/hsmode 68 + echo 1 > /debug/btmrvl/config/hscmd 69 + 70 + 71 + ====================== 72 + Get driver status: 73 + 74 + Path: /debug/btmrvl/status/ 75 + 76 + Usage: 77 + cat /debug/btmrvl/status/<args> 78 + 79 + where the args are: 80 + 81 + curpsmode 82 + This command displays current auto sleep status. 83 + 84 + psstate 85 + This command display the power save state. 86 + 87 + hsstate 88 + This command display the host sleep state. 89 + 90 + txdnldrdy 91 + This command displays the value of Tx download ready flag. 92 + 93 + 94 + ===================== 95 + 96 + Use hcitool to issue raw hci command, refer to hcitool manual 97 + 98 + Usage: Hcitool cmd <ogf> <ocf> [Parameters] 99 + 100 + Interface Control Command 101 + hcitool cmd 0x3f 0x5b 0xf5 0x01 0x00 --Enable All interface 102 + hcitool cmd 0x3f 0x5b 0xf5 0x01 0x01 --Enable Wlan interface 103 + hcitool cmd 0x3f 0x5b 0xf5 0x01 0x02 --Enable BT interface 104 + hcitool cmd 0x3f 0x5b 0xf5 0x00 0x00 --Disable All interface 105 + hcitool cmd 0x3f 0x5b 0xf5 0x00 0x01 --Disable Wlan interface 106 + hcitool cmd 0x3f 0x5b 0xf5 0x00 0x02 --Disable BT interface 107 + 108 + ======================================================================= 109 + 110 + 111 + SD8688 firmware: 112 + 113 + /lib/firmware/sd8688_helper.bin 114 + /lib/firmware/sd8688.bin 115 + 116 + 117 + The images can be downloaded from: 118 + 119 + git.infradead.org/users/dwmw2/linux-firmware.git/libertas/
+25
drivers/bluetooth/Kconfig
··· 170 170 Say Y here to compile support for virtual HCI devices into the 171 171 kernel or say M to compile it as module (hci_vhci). 172 172 173 + config BT_MRVL 174 + tristate "Marvell Bluetooth driver support" 175 + help 176 + The core driver to support Marvell Bluetooth devices. 177 + 178 + This driver is required if you want to support 179 + Marvell Bluetooth devices, such as 8688. 180 + 181 + Say Y here to compile Marvell Bluetooth driver 182 + into the kernel or say M to compile it as module. 183 + 184 + config BT_MRVL_SDIO 185 + tristate "Marvell BT-over-SDIO driver" 186 + depends on BT_MRVL && MMC 187 + select FW_LOADER 188 + help 189 + The driver for Marvell Bluetooth chipsets with SDIO interface. 190 + 191 + This driver is required if you want to use Marvell Bluetooth 192 + devices with SDIO interface. Currently only SD8688 chipset is 193 + supported. 194 + 195 + Say Y here to compile support for Marvell BT-over-SDIO driver 196 + into the kernel or say M to compile it as module. 197 + 173 198 endmenu 174 199
+6
drivers/bluetooth/Makefile
··· 15 15 obj-$(CONFIG_BT_HCIBTUSB) += btusb.o 16 16 obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o 17 17 18 + obj-$(CONFIG_BT_MRVL) += btmrvl.o 19 + obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o 20 + 21 + btmrvl-y := btmrvl_main.o 22 + btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o 23 + 18 24 hci_uart-y := hci_ldisc.o 19 25 hci_uart-$(CONFIG_BT_HCIUART_H4) += hci_h4.o 20 26 hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o
+432
drivers/bluetooth/btmrvl_debugfs.c
··· 1 + /** 2 + * Marvell Bluetooth driver: debugfs related functions 3 + * 4 + * Copyright (C) 2009, Marvell International Ltd. 5 + * 6 + * This software file (the "File") is distributed by Marvell International 7 + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 8 + * (the "License"). You may use, redistribute and/or modify this File in 9 + * accordance with the terms and conditions of the License, a copy of which 10 + * is available by writing to the Free Software Foundation, Inc., 11 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the 12 + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. 13 + * 14 + * 15 + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE 16 + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE 17 + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about 18 + * this warranty disclaimer. 19 + **/ 20 + 21 + #include <linux/debugfs.h> 22 + 23 + #include <net/bluetooth/bluetooth.h> 24 + #include <net/bluetooth/hci_core.h> 25 + 26 + #include "btmrvl_drv.h" 27 + 28 + struct btmrvl_debugfs_data { 29 + struct dentry *root_dir, *config_dir, *status_dir; 30 + 31 + /* config */ 32 + struct dentry *drvdbg; 33 + struct dentry *psmode; 34 + struct dentry *pscmd; 35 + struct dentry *hsmode; 36 + struct dentry *hscmd; 37 + struct dentry *gpiogap; 38 + struct dentry *hscfgcmd; 39 + 40 + /* status */ 41 + struct dentry *curpsmode; 42 + struct dentry *hsstate; 43 + struct dentry *psstate; 44 + struct dentry *txdnldready; 45 + }; 46 + 47 + static int btmrvl_open_generic(struct inode *inode, struct file *file) 48 + { 49 + file->private_data = inode->i_private; 50 + return 0; 51 + } 52 + 53 + static ssize_t btmrvl_hscfgcmd_write(struct file *file, 54 + const char __user *ubuf, size_t count, loff_t *ppos) 55 + { 56 + struct btmrvl_private *priv = file->private_data; 57 + char buf[16]; 58 + long result, ret; 59 + 60 + memset(buf, 0, sizeof(buf)); 61 + 62 + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) 63 + return -EFAULT; 64 + 65 + ret = strict_strtol(buf, 10, &result); 66 + 67 + priv->btmrvl_dev.hscfgcmd = result; 68 + 69 + if (priv->btmrvl_dev.hscfgcmd) { 70 + btmrvl_prepare_command(priv); 71 + wake_up_interruptible(&priv->main_thread.wait_q); 72 + } 73 + 74 + return count; 75 + } 76 + 77 + static ssize_t btmrvl_hscfgcmd_read(struct file *file, char __user *userbuf, 78 + size_t count, loff_t *ppos) 79 + { 80 + struct btmrvl_private *priv = file->private_data; 81 + char buf[16]; 82 + int ret; 83 + 84 + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", 85 + priv->btmrvl_dev.hscfgcmd); 86 + 87 + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); 88 + } 89 + 90 + static const struct file_operations btmrvl_hscfgcmd_fops = { 91 + .read = btmrvl_hscfgcmd_read, 92 + .write = btmrvl_hscfgcmd_write, 93 + .open = btmrvl_open_generic, 94 + }; 95 + 96 + static ssize_t btmrvl_psmode_write(struct file *file, const char __user *ubuf, 97 + size_t count, loff_t *ppos) 98 + { 99 + struct btmrvl_private *priv = file->private_data; 100 + char buf[16]; 101 + long result, ret; 102 + 103 + memset(buf, 0, sizeof(buf)); 104 + 105 + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) 106 + return -EFAULT; 107 + 108 + ret = strict_strtol(buf, 10, &result); 109 + 110 + priv->btmrvl_dev.psmode = result; 111 + 112 + return count; 113 + } 114 + 115 + static ssize_t btmrvl_psmode_read(struct file *file, char __user *userbuf, 116 + size_t count, loff_t *ppos) 117 + { 118 + struct btmrvl_private *priv = file->private_data; 119 + char buf[16]; 120 + int ret; 121 + 122 + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", 123 + priv->btmrvl_dev.psmode); 124 + 125 + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); 126 + } 127 + 128 + static const struct file_operations btmrvl_psmode_fops = { 129 + .read = btmrvl_psmode_read, 130 + .write = btmrvl_psmode_write, 131 + .open = btmrvl_open_generic, 132 + }; 133 + 134 + static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf, 135 + size_t count, loff_t *ppos) 136 + { 137 + struct btmrvl_private *priv = file->private_data; 138 + char buf[16]; 139 + long result, ret; 140 + 141 + memset(buf, 0, sizeof(buf)); 142 + 143 + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) 144 + return -EFAULT; 145 + 146 + ret = strict_strtol(buf, 10, &result); 147 + 148 + priv->btmrvl_dev.pscmd = result; 149 + 150 + if (priv->btmrvl_dev.pscmd) { 151 + btmrvl_prepare_command(priv); 152 + wake_up_interruptible(&priv->main_thread.wait_q); 153 + } 154 + 155 + return count; 156 + 157 + } 158 + 159 + static ssize_t btmrvl_pscmd_read(struct file *file, char __user *userbuf, 160 + size_t count, loff_t *ppos) 161 + { 162 + struct btmrvl_private *priv = file->private_data; 163 + char buf[16]; 164 + int ret; 165 + 166 + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.pscmd); 167 + 168 + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); 169 + } 170 + 171 + static const struct file_operations btmrvl_pscmd_fops = { 172 + .read = btmrvl_pscmd_read, 173 + .write = btmrvl_pscmd_write, 174 + .open = btmrvl_open_generic, 175 + }; 176 + 177 + static ssize_t btmrvl_gpiogap_write(struct file *file, const char __user *ubuf, 178 + size_t count, loff_t *ppos) 179 + { 180 + struct btmrvl_private *priv = file->private_data; 181 + char buf[16]; 182 + long result, ret; 183 + 184 + memset(buf, 0, sizeof(buf)); 185 + 186 + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) 187 + return -EFAULT; 188 + 189 + ret = strict_strtol(buf, 16, &result); 190 + 191 + priv->btmrvl_dev.gpio_gap = result; 192 + 193 + return count; 194 + } 195 + 196 + static ssize_t btmrvl_gpiogap_read(struct file *file, char __user *userbuf, 197 + size_t count, loff_t *ppos) 198 + { 199 + struct btmrvl_private *priv = file->private_data; 200 + char buf[16]; 201 + int ret; 202 + 203 + ret = snprintf(buf, sizeof(buf) - 1, "0x%x\n", 204 + priv->btmrvl_dev.gpio_gap); 205 + 206 + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); 207 + } 208 + 209 + static const struct file_operations btmrvl_gpiogap_fops = { 210 + .read = btmrvl_gpiogap_read, 211 + .write = btmrvl_gpiogap_write, 212 + .open = btmrvl_open_generic, 213 + }; 214 + 215 + static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf, 216 + size_t count, loff_t *ppos) 217 + { 218 + struct btmrvl_private *priv = (struct btmrvl_private *) file->private_data; 219 + char buf[16]; 220 + long result, ret; 221 + 222 + memset(buf, 0, sizeof(buf)); 223 + 224 + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) 225 + return -EFAULT; 226 + 227 + ret = strict_strtol(buf, 10, &result); 228 + 229 + priv->btmrvl_dev.hscmd = result; 230 + if (priv->btmrvl_dev.hscmd) { 231 + btmrvl_prepare_command(priv); 232 + wake_up_interruptible(&priv->main_thread.wait_q); 233 + } 234 + 235 + return count; 236 + } 237 + 238 + static ssize_t btmrvl_hscmd_read(struct file *file, char __user *userbuf, 239 + size_t count, loff_t *ppos) 240 + { 241 + struct btmrvl_private *priv = file->private_data; 242 + char buf[16]; 243 + int ret; 244 + 245 + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.hscmd); 246 + 247 + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); 248 + } 249 + 250 + static const struct file_operations btmrvl_hscmd_fops = { 251 + .read = btmrvl_hscmd_read, 252 + .write = btmrvl_hscmd_write, 253 + .open = btmrvl_open_generic, 254 + }; 255 + 256 + static ssize_t btmrvl_hsmode_write(struct file *file, const char __user *ubuf, 257 + size_t count, loff_t *ppos) 258 + { 259 + struct btmrvl_private *priv = file->private_data; 260 + char buf[16]; 261 + long result, ret; 262 + 263 + memset(buf, 0, sizeof(buf)); 264 + 265 + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) 266 + return -EFAULT; 267 + 268 + ret = strict_strtol(buf, 10, &result); 269 + 270 + priv->btmrvl_dev.hsmode = result; 271 + 272 + return count; 273 + } 274 + 275 + static ssize_t btmrvl_hsmode_read(struct file *file, char __user * userbuf, 276 + size_t count, loff_t *ppos) 277 + { 278 + struct btmrvl_private *priv = file->private_data; 279 + char buf[16]; 280 + int ret; 281 + 282 + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.hsmode); 283 + 284 + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); 285 + } 286 + 287 + static const struct file_operations btmrvl_hsmode_fops = { 288 + .read = btmrvl_hsmode_read, 289 + .write = btmrvl_hsmode_write, 290 + .open = btmrvl_open_generic, 291 + }; 292 + 293 + static ssize_t btmrvl_curpsmode_read(struct file *file, char __user *userbuf, 294 + size_t count, loff_t *ppos) 295 + { 296 + struct btmrvl_private *priv = file->private_data; 297 + char buf[16]; 298 + int ret; 299 + 300 + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->adapter->psmode); 301 + 302 + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); 303 + } 304 + 305 + static const struct file_operations btmrvl_curpsmode_fops = { 306 + .read = btmrvl_curpsmode_read, 307 + .open = btmrvl_open_generic, 308 + }; 309 + 310 + static ssize_t btmrvl_psstate_read(struct file *file, char __user * userbuf, 311 + size_t count, loff_t *ppos) 312 + { 313 + struct btmrvl_private *priv = file->private_data; 314 + char buf[16]; 315 + int ret; 316 + 317 + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->adapter->ps_state); 318 + 319 + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); 320 + } 321 + 322 + static const struct file_operations btmrvl_psstate_fops = { 323 + .read = btmrvl_psstate_read, 324 + .open = btmrvl_open_generic, 325 + }; 326 + 327 + static ssize_t btmrvl_hsstate_read(struct file *file, char __user *userbuf, 328 + size_t count, loff_t *ppos) 329 + { 330 + struct btmrvl_private *priv = file->private_data; 331 + char buf[16]; 332 + int ret; 333 + 334 + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->adapter->hs_state); 335 + 336 + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); 337 + } 338 + 339 + static const struct file_operations btmrvl_hsstate_fops = { 340 + .read = btmrvl_hsstate_read, 341 + .open = btmrvl_open_generic, 342 + }; 343 + 344 + static ssize_t btmrvl_txdnldready_read(struct file *file, char __user *userbuf, 345 + size_t count, loff_t *ppos) 346 + { 347 + struct btmrvl_private *priv = file->private_data; 348 + char buf[16]; 349 + int ret; 350 + 351 + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", 352 + priv->btmrvl_dev.tx_dnld_rdy); 353 + 354 + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); 355 + } 356 + 357 + static const struct file_operations btmrvl_txdnldready_fops = { 358 + .read = btmrvl_txdnldready_read, 359 + .open = btmrvl_open_generic, 360 + }; 361 + 362 + void btmrvl_debugfs_init(struct hci_dev *hdev) 363 + { 364 + struct btmrvl_private *priv = hdev->driver_data; 365 + struct btmrvl_debugfs_data *dbg; 366 + 367 + dbg = kzalloc(sizeof(*dbg), GFP_KERNEL); 368 + priv->debugfs_data = dbg; 369 + 370 + if (!dbg) { 371 + BT_ERR("Can not allocate memory for btmrvl_debugfs_data."); 372 + return; 373 + } 374 + 375 + dbg->root_dir = debugfs_create_dir("btmrvl", NULL); 376 + 377 + dbg->config_dir = debugfs_create_dir("config", dbg->root_dir); 378 + 379 + dbg->psmode = debugfs_create_file("psmode", 0644, dbg->config_dir, 380 + hdev->driver_data, &btmrvl_psmode_fops); 381 + dbg->pscmd = debugfs_create_file("pscmd", 0644, dbg->config_dir, 382 + hdev->driver_data, &btmrvl_pscmd_fops); 383 + dbg->gpiogap = debugfs_create_file("gpiogap", 0644, dbg->config_dir, 384 + hdev->driver_data, &btmrvl_gpiogap_fops); 385 + dbg->hsmode = debugfs_create_file("hsmode", 0644, dbg->config_dir, 386 + hdev->driver_data, &btmrvl_hsmode_fops); 387 + dbg->hscmd = debugfs_create_file("hscmd", 0644, dbg->config_dir, 388 + hdev->driver_data, &btmrvl_hscmd_fops); 389 + dbg->hscfgcmd = debugfs_create_file("hscfgcmd", 0644, dbg->config_dir, 390 + hdev->driver_data, &btmrvl_hscfgcmd_fops); 391 + 392 + dbg->status_dir = debugfs_create_dir("status", dbg->root_dir); 393 + dbg->curpsmode = debugfs_create_file("curpsmode", 0444, 394 + dbg->status_dir, 395 + hdev->driver_data, 396 + &btmrvl_curpsmode_fops); 397 + dbg->psstate = debugfs_create_file("psstate", 0444, dbg->status_dir, 398 + hdev->driver_data, &btmrvl_psstate_fops); 399 + dbg->hsstate = debugfs_create_file("hsstate", 0444, dbg->status_dir, 400 + hdev->driver_data, &btmrvl_hsstate_fops); 401 + dbg->txdnldready = debugfs_create_file("txdnldready", 0444, 402 + dbg->status_dir, 403 + hdev->driver_data, 404 + &btmrvl_txdnldready_fops); 405 + } 406 + 407 + void btmrvl_debugfs_remove(struct hci_dev *hdev) 408 + { 409 + struct btmrvl_private *priv = hdev->driver_data; 410 + struct btmrvl_debugfs_data *dbg = priv->debugfs_data; 411 + 412 + if (!dbg) 413 + return; 414 + 415 + debugfs_remove(dbg->psmode); 416 + debugfs_remove(dbg->pscmd); 417 + debugfs_remove(dbg->gpiogap); 418 + debugfs_remove(dbg->hsmode); 419 + debugfs_remove(dbg->hscmd); 420 + debugfs_remove(dbg->hscfgcmd); 421 + debugfs_remove(dbg->config_dir); 422 + 423 + debugfs_remove(dbg->curpsmode); 424 + debugfs_remove(dbg->psstate); 425 + debugfs_remove(dbg->hsstate); 426 + debugfs_remove(dbg->txdnldready); 427 + debugfs_remove(dbg->status_dir); 428 + 429 + debugfs_remove(dbg->root_dir); 430 + 431 + kfree(dbg); 432 + }
+139
drivers/bluetooth/btmrvl_drv.h
··· 1 + /* 2 + * Marvell Bluetooth driver: global definitions & declarations 3 + * 4 + * Copyright (C) 2009, Marvell International Ltd. 5 + * 6 + * This software file (the "File") is distributed by Marvell International 7 + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 8 + * (the "License"). You may use, redistribute and/or modify this File in 9 + * accordance with the terms and conditions of the License, a copy of which 10 + * is available by writing to the Free Software Foundation, Inc., 11 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the 12 + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. 13 + * 14 + * 15 + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE 16 + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE 17 + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about 18 + * this warranty disclaimer. 19 + * 20 + */ 21 + 22 + #include <linux/kthread.h> 23 + #include <linux/bitops.h> 24 + #include <net/bluetooth/bluetooth.h> 25 + 26 + #define BTM_HEADER_LEN 4 27 + #define BTM_UPLD_SIZE 2312 28 + 29 + /* Time to wait until Host Sleep state change in millisecond */ 30 + #define WAIT_UNTIL_HS_STATE_CHANGED 5000 31 + /* Time to wait for command response in millisecond */ 32 + #define WAIT_UNTIL_CMD_RESP 5000 33 + 34 + struct btmrvl_thread { 35 + struct task_struct *task; 36 + wait_queue_head_t wait_q; 37 + void *priv; 38 + }; 39 + 40 + struct btmrvl_device { 41 + void *card; 42 + struct hci_dev *hcidev; 43 + 44 + u8 tx_dnld_rdy; 45 + 46 + u8 psmode; 47 + u8 pscmd; 48 + u8 hsmode; 49 + u8 hscmd; 50 + 51 + /* Low byte is gap, high byte is GPIO */ 52 + u16 gpio_gap; 53 + 54 + u8 hscfgcmd; 55 + u8 sendcmdflag; 56 + }; 57 + 58 + struct btmrvl_adapter { 59 + u32 int_count; 60 + struct sk_buff_head tx_queue; 61 + u8 psmode; 62 + u8 ps_state; 63 + u8 hs_state; 64 + u8 wakeup_tries; 65 + wait_queue_head_t cmd_wait_q; 66 + u8 cmd_complete; 67 + }; 68 + 69 + struct btmrvl_private { 70 + struct btmrvl_device btmrvl_dev; 71 + struct btmrvl_adapter *adapter; 72 + struct btmrvl_thread main_thread; 73 + int (*hw_host_to_card) (struct btmrvl_private *priv, 74 + u8 *payload, u16 nb); 75 + int (*hw_wakeup_firmware) (struct btmrvl_private *priv); 76 + spinlock_t driver_lock; /* spinlock used by driver */ 77 + #ifdef CONFIG_DEBUG_FS 78 + void *debugfs_data; 79 + #endif 80 + }; 81 + 82 + #define MRVL_VENDOR_PKT 0xFE 83 + 84 + /* Bluetooth commands */ 85 + #define BT_CMD_AUTO_SLEEP_MODE 0x23 86 + #define BT_CMD_HOST_SLEEP_CONFIG 0x59 87 + #define BT_CMD_HOST_SLEEP_ENABLE 0x5A 88 + #define BT_CMD_MODULE_CFG_REQ 0x5B 89 + 90 + /* Sub-commands: Module Bringup/Shutdown Request */ 91 + #define MODULE_BRINGUP_REQ 0xF1 92 + #define MODULE_SHUTDOWN_REQ 0xF2 93 + 94 + #define BT_EVENT_POWER_STATE 0x20 95 + 96 + /* Bluetooth Power States */ 97 + #define BT_PS_ENABLE 0x02 98 + #define BT_PS_DISABLE 0x03 99 + #define BT_PS_SLEEP 0x01 100 + 101 + #define OGF 0x3F 102 + 103 + /* Host Sleep states */ 104 + #define HS_ACTIVATED 0x01 105 + #define HS_DEACTIVATED 0x00 106 + 107 + /* Power Save modes */ 108 + #define PS_SLEEP 0x01 109 + #define PS_AWAKE 0x00 110 + 111 + struct btmrvl_cmd { 112 + __le16 ocf_ogf; 113 + u8 length; 114 + u8 data[4]; 115 + } __attribute__ ((packed)); 116 + 117 + struct btmrvl_event { 118 + u8 ec; /* event counter */ 119 + u8 length; 120 + u8 data[4]; 121 + } __attribute__ ((packed)); 122 + 123 + /* Prototype of global function */ 124 + 125 + struct btmrvl_private *btmrvl_add_card(void *card); 126 + int btmrvl_remove_card(struct btmrvl_private *priv); 127 + 128 + void btmrvl_interrupt(struct btmrvl_private *priv); 129 + 130 + void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb); 131 + int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb); 132 + 133 + int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd); 134 + int btmrvl_prepare_command(struct btmrvl_private *priv); 135 + 136 + #ifdef CONFIG_DEBUG_FS 137 + void btmrvl_debugfs_init(struct hci_dev *hdev); 138 + void btmrvl_debugfs_remove(struct hci_dev *hdev); 139 + #endif
+624
drivers/bluetooth/btmrvl_main.c
··· 1 + /** 2 + * Marvell Bluetooth driver 3 + * 4 + * Copyright (C) 2009, Marvell International Ltd. 5 + * 6 + * This software file (the "File") is distributed by Marvell International 7 + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 8 + * (the "License"). You may use, redistribute and/or modify this File in 9 + * accordance with the terms and conditions of the License, a copy of which 10 + * is available by writing to the Free Software Foundation, Inc., 11 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the 12 + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. 13 + * 14 + * 15 + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE 16 + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE 17 + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about 18 + * this warranty disclaimer. 19 + **/ 20 + 21 + #include <net/bluetooth/bluetooth.h> 22 + #include <net/bluetooth/hci_core.h> 23 + 24 + #include "btmrvl_drv.h" 25 + 26 + #define VERSION "1.0" 27 + 28 + /* 29 + * This function is called by interface specific interrupt handler. 30 + * It updates Power Save & Host Sleep states, and wakes up the main 31 + * thread. 32 + */ 33 + void btmrvl_interrupt(struct btmrvl_private *priv) 34 + { 35 + priv->adapter->ps_state = PS_AWAKE; 36 + 37 + priv->adapter->wakeup_tries = 0; 38 + 39 + priv->adapter->int_count++; 40 + 41 + wake_up_interruptible(&priv->main_thread.wait_q); 42 + } 43 + EXPORT_SYMBOL_GPL(btmrvl_interrupt); 44 + 45 + void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb) 46 + { 47 + struct hci_event_hdr *hdr = (void *) skb->data; 48 + struct hci_ev_cmd_complete *ec; 49 + u16 opcode, ocf; 50 + 51 + if (hdr->evt == HCI_EV_CMD_COMPLETE) { 52 + ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE); 53 + opcode = __le16_to_cpu(ec->opcode); 54 + ocf = hci_opcode_ocf(opcode); 55 + if (ocf == BT_CMD_MODULE_CFG_REQ && 56 + priv->btmrvl_dev.sendcmdflag) { 57 + priv->btmrvl_dev.sendcmdflag = false; 58 + priv->adapter->cmd_complete = true; 59 + wake_up_interruptible(&priv->adapter->cmd_wait_q); 60 + } 61 + } 62 + } 63 + EXPORT_SYMBOL_GPL(btmrvl_check_evtpkt); 64 + 65 + int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb) 66 + { 67 + struct btmrvl_adapter *adapter = priv->adapter; 68 + struct btmrvl_event *event; 69 + u8 ret = 0; 70 + 71 + event = (struct btmrvl_event *) skb->data; 72 + if (event->ec != 0xff) { 73 + BT_DBG("Not Marvell Event=%x", event->ec); 74 + ret = -EINVAL; 75 + goto exit; 76 + } 77 + 78 + switch (event->data[0]) { 79 + case BT_CMD_AUTO_SLEEP_MODE: 80 + if (!event->data[2]) { 81 + if (event->data[1] == BT_PS_ENABLE) 82 + adapter->psmode = 1; 83 + else 84 + adapter->psmode = 0; 85 + BT_DBG("PS Mode:%s", 86 + (adapter->psmode) ? "Enable" : "Disable"); 87 + } else { 88 + BT_DBG("PS Mode command failed"); 89 + } 90 + break; 91 + 92 + case BT_CMD_HOST_SLEEP_CONFIG: 93 + if (!event->data[3]) 94 + BT_DBG("gpio=%x, gap=%x", event->data[1], 95 + event->data[2]); 96 + else 97 + BT_DBG("HSCFG command failed"); 98 + break; 99 + 100 + case BT_CMD_HOST_SLEEP_ENABLE: 101 + if (!event->data[1]) { 102 + adapter->hs_state = HS_ACTIVATED; 103 + if (adapter->psmode) 104 + adapter->ps_state = PS_SLEEP; 105 + wake_up_interruptible(&adapter->cmd_wait_q); 106 + BT_DBG("HS ACTIVATED!"); 107 + } else { 108 + BT_DBG("HS Enable failed"); 109 + } 110 + break; 111 + 112 + case BT_CMD_MODULE_CFG_REQ: 113 + if (priv->btmrvl_dev.sendcmdflag && 114 + event->data[1] == MODULE_BRINGUP_REQ) { 115 + BT_DBG("EVENT:%s", (event->data[2]) ? 116 + "Bring-up failed" : "Bring-up succeed"); 117 + } else if (priv->btmrvl_dev.sendcmdflag && 118 + event->data[1] == MODULE_SHUTDOWN_REQ) { 119 + BT_DBG("EVENT:%s", (event->data[2]) ? 120 + "Shutdown failed" : "Shutdown succeed"); 121 + } else { 122 + BT_DBG("BT_CMD_MODULE_CFG_REQ resp for APP"); 123 + ret = -EINVAL; 124 + } 125 + break; 126 + 127 + case BT_EVENT_POWER_STATE: 128 + if (event->data[1] == BT_PS_SLEEP) 129 + adapter->ps_state = PS_SLEEP; 130 + BT_DBG("EVENT:%s", 131 + (adapter->ps_state) ? "PS_SLEEP" : "PS_AWAKE"); 132 + break; 133 + 134 + default: 135 + BT_DBG("Unknown Event=%d", event->data[0]); 136 + ret = -EINVAL; 137 + break; 138 + } 139 + 140 + exit: 141 + if (!ret) 142 + kfree_skb(skb); 143 + 144 + return ret; 145 + } 146 + EXPORT_SYMBOL_GPL(btmrvl_process_event); 147 + 148 + int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd) 149 + { 150 + struct sk_buff *skb; 151 + struct btmrvl_cmd *cmd; 152 + int ret = 0; 153 + 154 + skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC); 155 + if (skb == NULL) { 156 + BT_ERR("No free skb"); 157 + return -ENOMEM; 158 + } 159 + 160 + cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd)); 161 + cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_MODULE_CFG_REQ)); 162 + cmd->length = 1; 163 + cmd->data[0] = subcmd; 164 + 165 + bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; 166 + 167 + skb->dev = (void *) priv->btmrvl_dev.hcidev; 168 + skb_queue_head(&priv->adapter->tx_queue, skb); 169 + 170 + priv->btmrvl_dev.sendcmdflag = true; 171 + 172 + priv->adapter->cmd_complete = false; 173 + 174 + BT_DBG("Queue module cfg Command"); 175 + 176 + wake_up_interruptible(&priv->main_thread.wait_q); 177 + 178 + if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q, 179 + priv->adapter->cmd_complete, 180 + msecs_to_jiffies(WAIT_UNTIL_CMD_RESP))) { 181 + ret = -ETIMEDOUT; 182 + BT_ERR("module_cfg_cmd(%x): timeout: %d", 183 + subcmd, priv->btmrvl_dev.sendcmdflag); 184 + } 185 + 186 + BT_DBG("module cfg Command done"); 187 + 188 + return ret; 189 + } 190 + EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd); 191 + 192 + static int btmrvl_enable_hs(struct btmrvl_private *priv) 193 + { 194 + struct sk_buff *skb; 195 + struct btmrvl_cmd *cmd; 196 + int ret = 0; 197 + 198 + skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC); 199 + if (skb == NULL) { 200 + BT_ERR("No free skb"); 201 + return -ENOMEM; 202 + } 203 + 204 + cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd)); 205 + cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_ENABLE)); 206 + cmd->length = 0; 207 + 208 + bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; 209 + 210 + skb->dev = (void *) priv->btmrvl_dev.hcidev; 211 + skb_queue_head(&priv->adapter->tx_queue, skb); 212 + 213 + BT_DBG("Queue hs enable Command"); 214 + 215 + wake_up_interruptible(&priv->main_thread.wait_q); 216 + 217 + if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q, 218 + priv->adapter->hs_state, 219 + msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED))) { 220 + ret = -ETIMEDOUT; 221 + BT_ERR("timeout: %d, %d,%d", priv->adapter->hs_state, 222 + priv->adapter->ps_state, 223 + priv->adapter->wakeup_tries); 224 + } 225 + 226 + return ret; 227 + } 228 + 229 + int btmrvl_prepare_command(struct btmrvl_private *priv) 230 + { 231 + struct sk_buff *skb = NULL; 232 + struct btmrvl_cmd *cmd; 233 + int ret = 0; 234 + 235 + if (priv->btmrvl_dev.hscfgcmd) { 236 + priv->btmrvl_dev.hscfgcmd = 0; 237 + 238 + skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC); 239 + if (skb == NULL) { 240 + BT_ERR("No free skb"); 241 + return -ENOMEM; 242 + } 243 + 244 + cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd)); 245 + cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_CONFIG)); 246 + cmd->length = 2; 247 + cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8; 248 + cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff); 249 + 250 + bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; 251 + 252 + skb->dev = (void *) priv->btmrvl_dev.hcidev; 253 + skb_queue_head(&priv->adapter->tx_queue, skb); 254 + 255 + BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x", 256 + cmd->data[0], cmd->data[1]); 257 + } 258 + 259 + if (priv->btmrvl_dev.pscmd) { 260 + priv->btmrvl_dev.pscmd = 0; 261 + 262 + skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC); 263 + if (skb == NULL) { 264 + BT_ERR("No free skb"); 265 + return -ENOMEM; 266 + } 267 + 268 + cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd)); 269 + cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_AUTO_SLEEP_MODE)); 270 + cmd->length = 1; 271 + 272 + if (priv->btmrvl_dev.psmode) 273 + cmd->data[0] = BT_PS_ENABLE; 274 + else 275 + cmd->data[0] = BT_PS_DISABLE; 276 + 277 + bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; 278 + 279 + skb->dev = (void *) priv->btmrvl_dev.hcidev; 280 + skb_queue_head(&priv->adapter->tx_queue, skb); 281 + 282 + BT_DBG("Queue PSMODE Command:%d", cmd->data[0]); 283 + } 284 + 285 + if (priv->btmrvl_dev.hscmd) { 286 + priv->btmrvl_dev.hscmd = 0; 287 + 288 + if (priv->btmrvl_dev.hsmode) { 289 + ret = btmrvl_enable_hs(priv); 290 + } else { 291 + ret = priv->hw_wakeup_firmware(priv); 292 + priv->adapter->hs_state = HS_DEACTIVATED; 293 + } 294 + } 295 + 296 + return ret; 297 + } 298 + 299 + static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb) 300 + { 301 + int ret = 0; 302 + 303 + if (!skb || !skb->data) 304 + return -EINVAL; 305 + 306 + if (!skb->len || ((skb->len + BTM_HEADER_LEN) > BTM_UPLD_SIZE)) { 307 + BT_ERR("Tx Error: Bad skb length %d : %d", 308 + skb->len, BTM_UPLD_SIZE); 309 + return -EINVAL; 310 + } 311 + 312 + if (skb_headroom(skb) < BTM_HEADER_LEN) { 313 + struct sk_buff *tmp = skb; 314 + 315 + skb = skb_realloc_headroom(skb, BTM_HEADER_LEN); 316 + if (!skb) { 317 + BT_ERR("Tx Error: realloc_headroom failed %d", 318 + BTM_HEADER_LEN); 319 + skb = tmp; 320 + return -EINVAL; 321 + } 322 + 323 + kfree_skb(tmp); 324 + } 325 + 326 + skb_push(skb, BTM_HEADER_LEN); 327 + 328 + /* header type: byte[3] 329 + * HCI_COMMAND = 1, ACL_DATA = 2, SCO_DATA = 3, 0xFE = Vendor 330 + * header length: byte[2][1][0] 331 + */ 332 + 333 + skb->data[0] = (skb->len & 0x0000ff); 334 + skb->data[1] = (skb->len & 0x00ff00) >> 8; 335 + skb->data[2] = (skb->len & 0xff0000) >> 16; 336 + skb->data[3] = bt_cb(skb)->pkt_type; 337 + 338 + if (priv->hw_host_to_card) 339 + ret = priv->hw_host_to_card(priv, skb->data, skb->len); 340 + 341 + return ret; 342 + } 343 + 344 + static void btmrvl_init_adapter(struct btmrvl_private *priv) 345 + { 346 + skb_queue_head_init(&priv->adapter->tx_queue); 347 + 348 + priv->adapter->ps_state = PS_AWAKE; 349 + 350 + init_waitqueue_head(&priv->adapter->cmd_wait_q); 351 + } 352 + 353 + static void btmrvl_free_adapter(struct btmrvl_private *priv) 354 + { 355 + skb_queue_purge(&priv->adapter->tx_queue); 356 + 357 + kfree(priv->adapter); 358 + 359 + priv->adapter = NULL; 360 + } 361 + 362 + static int btmrvl_ioctl(struct hci_dev *hdev, 363 + unsigned int cmd, unsigned long arg) 364 + { 365 + return -ENOIOCTLCMD; 366 + } 367 + 368 + static void btmrvl_destruct(struct hci_dev *hdev) 369 + { 370 + } 371 + 372 + static int btmrvl_send_frame(struct sk_buff *skb) 373 + { 374 + struct hci_dev *hdev = (struct hci_dev *) skb->dev; 375 + struct btmrvl_private *priv = NULL; 376 + 377 + BT_DBG("type=%d, len=%d", skb->pkt_type, skb->len); 378 + 379 + if (!hdev || !hdev->driver_data) { 380 + BT_ERR("Frame for unknown HCI device"); 381 + return -ENODEV; 382 + } 383 + 384 + priv = (struct btmrvl_private *) hdev->driver_data; 385 + if (!test_bit(HCI_RUNNING, &hdev->flags)) { 386 + BT_ERR("Failed testing HCI_RUNING, flags=%lx", hdev->flags); 387 + print_hex_dump_bytes("data: ", DUMP_PREFIX_OFFSET, 388 + skb->data, skb->len); 389 + return -EBUSY; 390 + } 391 + 392 + switch (bt_cb(skb)->pkt_type) { 393 + case HCI_COMMAND_PKT: 394 + hdev->stat.cmd_tx++; 395 + break; 396 + 397 + case HCI_ACLDATA_PKT: 398 + hdev->stat.acl_tx++; 399 + break; 400 + 401 + case HCI_SCODATA_PKT: 402 + hdev->stat.sco_tx++; 403 + break; 404 + } 405 + 406 + skb_queue_tail(&priv->adapter->tx_queue, skb); 407 + 408 + wake_up_interruptible(&priv->main_thread.wait_q); 409 + 410 + return 0; 411 + } 412 + 413 + static int btmrvl_flush(struct hci_dev *hdev) 414 + { 415 + struct btmrvl_private *priv = hdev->driver_data; 416 + 417 + skb_queue_purge(&priv->adapter->tx_queue); 418 + 419 + return 0; 420 + } 421 + 422 + static int btmrvl_close(struct hci_dev *hdev) 423 + { 424 + struct btmrvl_private *priv = hdev->driver_data; 425 + 426 + if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 427 + return 0; 428 + 429 + skb_queue_purge(&priv->adapter->tx_queue); 430 + 431 + return 0; 432 + } 433 + 434 + static int btmrvl_open(struct hci_dev *hdev) 435 + { 436 + set_bit(HCI_RUNNING, &hdev->flags); 437 + 438 + return 0; 439 + } 440 + 441 + /* 442 + * This function handles the event generated by firmware, rx data 443 + * received from firmware, and tx data sent from kernel. 444 + */ 445 + static int btmrvl_service_main_thread(void *data) 446 + { 447 + struct btmrvl_thread *thread = data; 448 + struct btmrvl_private *priv = thread->priv; 449 + struct btmrvl_adapter *adapter = priv->adapter; 450 + wait_queue_t wait; 451 + struct sk_buff *skb; 452 + ulong flags; 453 + 454 + init_waitqueue_entry(&wait, current); 455 + 456 + current->flags |= PF_NOFREEZE; 457 + 458 + for (;;) { 459 + add_wait_queue(&thread->wait_q, &wait); 460 + 461 + set_current_state(TASK_INTERRUPTIBLE); 462 + 463 + if (adapter->wakeup_tries || 464 + ((!adapter->int_count) && 465 + (!priv->btmrvl_dev.tx_dnld_rdy || 466 + skb_queue_empty(&adapter->tx_queue)))) { 467 + BT_DBG("main_thread is sleeping..."); 468 + schedule(); 469 + } 470 + 471 + set_current_state(TASK_RUNNING); 472 + 473 + remove_wait_queue(&thread->wait_q, &wait); 474 + 475 + BT_DBG("main_thread woke up"); 476 + 477 + if (kthread_should_stop()) { 478 + BT_DBG("main_thread: break from main thread"); 479 + break; 480 + } 481 + 482 + spin_lock_irqsave(&priv->driver_lock, flags); 483 + if (adapter->int_count) { 484 + adapter->int_count = 0; 485 + } else if (adapter->ps_state == PS_SLEEP && 486 + !skb_queue_empty(&adapter->tx_queue)) { 487 + spin_unlock_irqrestore(&priv->driver_lock, flags); 488 + adapter->wakeup_tries++; 489 + priv->hw_wakeup_firmware(priv); 490 + continue; 491 + } 492 + spin_unlock_irqrestore(&priv->driver_lock, flags); 493 + 494 + if (adapter->ps_state == PS_SLEEP) 495 + continue; 496 + 497 + if (!priv->btmrvl_dev.tx_dnld_rdy) 498 + continue; 499 + 500 + skb = skb_dequeue(&adapter->tx_queue); 501 + if (skb) { 502 + if (btmrvl_tx_pkt(priv, skb)) 503 + priv->btmrvl_dev.hcidev->stat.err_tx++; 504 + else 505 + priv->btmrvl_dev.hcidev->stat.byte_tx += skb->len; 506 + 507 + kfree_skb(skb); 508 + } 509 + } 510 + 511 + return 0; 512 + } 513 + 514 + struct btmrvl_private *btmrvl_add_card(void *card) 515 + { 516 + struct hci_dev *hdev = NULL; 517 + struct btmrvl_private *priv; 518 + int ret; 519 + 520 + priv = kzalloc(sizeof(*priv), GFP_KERNEL); 521 + if (!priv) { 522 + BT_ERR("Can not allocate priv"); 523 + goto err_priv; 524 + } 525 + 526 + priv->adapter = kzalloc(sizeof(*priv->adapter), GFP_KERNEL); 527 + if (!priv->adapter) { 528 + BT_ERR("Allocate buffer for btmrvl_adapter failed!"); 529 + goto err_adapter; 530 + } 531 + 532 + btmrvl_init_adapter(priv); 533 + 534 + hdev = hci_alloc_dev(); 535 + if (!hdev) { 536 + BT_ERR("Can not allocate HCI device"); 537 + goto err_hdev; 538 + } 539 + 540 + BT_DBG("Starting kthread..."); 541 + priv->main_thread.priv = priv; 542 + spin_lock_init(&priv->driver_lock); 543 + 544 + init_waitqueue_head(&priv->main_thread.wait_q); 545 + priv->main_thread.task = kthread_run(btmrvl_service_main_thread, 546 + &priv->main_thread, "btmrvl_main_service"); 547 + 548 + priv->btmrvl_dev.hcidev = hdev; 549 + priv->btmrvl_dev.card = card; 550 + 551 + hdev->driver_data = priv; 552 + 553 + priv->btmrvl_dev.tx_dnld_rdy = true; 554 + 555 + hdev->type = HCI_SDIO; 556 + hdev->open = btmrvl_open; 557 + hdev->close = btmrvl_close; 558 + hdev->flush = btmrvl_flush; 559 + hdev->send = btmrvl_send_frame; 560 + hdev->destruct = btmrvl_destruct; 561 + hdev->ioctl = btmrvl_ioctl; 562 + hdev->owner = THIS_MODULE; 563 + 564 + ret = hci_register_dev(hdev); 565 + if (ret < 0) { 566 + BT_ERR("Can not register HCI device"); 567 + goto err_hci_register_dev; 568 + } 569 + 570 + #ifdef CONFIG_DEBUG_FS 571 + btmrvl_debugfs_init(hdev); 572 + #endif 573 + 574 + return priv; 575 + 576 + err_hci_register_dev: 577 + /* Stop the thread servicing the interrupts */ 578 + kthread_stop(priv->main_thread.task); 579 + 580 + hci_free_dev(hdev); 581 + 582 + err_hdev: 583 + btmrvl_free_adapter(priv); 584 + 585 + err_adapter: 586 + kfree(priv); 587 + 588 + err_priv: 589 + return NULL; 590 + } 591 + EXPORT_SYMBOL_GPL(btmrvl_add_card); 592 + 593 + int btmrvl_remove_card(struct btmrvl_private *priv) 594 + { 595 + struct hci_dev *hdev; 596 + 597 + hdev = priv->btmrvl_dev.hcidev; 598 + 599 + wake_up_interruptible(&priv->adapter->cmd_wait_q); 600 + 601 + kthread_stop(priv->main_thread.task); 602 + 603 + #ifdef CONFIG_DEBUG_FS 604 + btmrvl_debugfs_remove(hdev); 605 + #endif 606 + 607 + hci_unregister_dev(hdev); 608 + 609 + hci_free_dev(hdev); 610 + 611 + priv->btmrvl_dev.hcidev = NULL; 612 + 613 + btmrvl_free_adapter(priv); 614 + 615 + kfree(priv); 616 + 617 + return 0; 618 + } 619 + EXPORT_SYMBOL_GPL(btmrvl_remove_card); 620 + 621 + MODULE_AUTHOR("Marvell International Ltd."); 622 + MODULE_DESCRIPTION("Marvell Bluetooth driver ver " VERSION); 623 + MODULE_VERSION(VERSION); 624 + MODULE_LICENSE("GPL v2");
+1003
drivers/bluetooth/btmrvl_sdio.c
··· 1 + /** 2 + * Marvell BT-over-SDIO driver: SDIO interface related functions. 3 + * 4 + * Copyright (C) 2009, Marvell International Ltd. 5 + * 6 + * This software file (the "File") is distributed by Marvell International 7 + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 8 + * (the "License"). You may use, redistribute and/or modify this File in 9 + * accordance with the terms and conditions of the License, a copy of which 10 + * is available by writing to the Free Software Foundation, Inc., 11 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the 12 + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. 13 + * 14 + * 15 + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE 16 + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE 17 + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about 18 + * this warranty disclaimer. 19 + **/ 20 + 21 + #include <linux/firmware.h> 22 + 23 + #include <linux/mmc/sdio_ids.h> 24 + #include <linux/mmc/sdio_func.h> 25 + 26 + #include <net/bluetooth/bluetooth.h> 27 + #include <net/bluetooth/hci_core.h> 28 + 29 + #include "btmrvl_drv.h" 30 + #include "btmrvl_sdio.h" 31 + 32 + #define VERSION "1.0" 33 + 34 + /* The btmrvl_sdio_remove() callback function is called 35 + * when user removes this module from kernel space or ejects 36 + * the card from the slot. The driver handles these 2 cases 37 + * differently. 38 + * If the user is removing the module, a MODULE_SHUTDOWN_REQ 39 + * command is sent to firmware and interrupt will be disabled. 40 + * If the card is removed, there is no need to send command 41 + * or disable interrupt. 42 + * 43 + * The variable 'user_rmmod' is used to distinguish these two 44 + * scenarios. This flag is initialized as FALSE in case the card 45 + * is removed, and will be set to TRUE for module removal when 46 + * module_exit function is called. 47 + */ 48 + static u8 user_rmmod; 49 + 50 + static const struct btmrvl_sdio_device btmrvl_sdio_sd6888 = { 51 + .helper = "sd8688_helper.bin", 52 + .firmware = "sd8688.bin", 53 + }; 54 + 55 + static const struct sdio_device_id btmrvl_sdio_ids[] = { 56 + /* Marvell SD8688 Bluetooth device */ 57 + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105), 58 + .driver_data = (unsigned long) &btmrvl_sdio_sd6888 }, 59 + 60 + { } /* Terminating entry */ 61 + }; 62 + 63 + MODULE_DEVICE_TABLE(sdio, btmrvl_sdio_ids); 64 + 65 + static int btmrvl_sdio_get_rx_unit(struct btmrvl_sdio_card *card) 66 + { 67 + u8 reg; 68 + int ret; 69 + 70 + reg = sdio_readb(card->func, CARD_RX_UNIT_REG, &ret); 71 + if (!ret) 72 + card->rx_unit = reg; 73 + 74 + return ret; 75 + } 76 + 77 + static int btmrvl_sdio_read_fw_status(struct btmrvl_sdio_card *card, u16 *dat) 78 + { 79 + u8 fws0, fws1; 80 + int ret; 81 + 82 + *dat = 0; 83 + 84 + fws0 = sdio_readb(card->func, CARD_FW_STATUS0_REG, &ret); 85 + 86 + if (!ret) 87 + fws1 = sdio_readb(card->func, CARD_FW_STATUS1_REG, &ret); 88 + 89 + if (ret) 90 + return -EIO; 91 + 92 + *dat = (((u16) fws1) << 8) | fws0; 93 + 94 + return 0; 95 + } 96 + 97 + static int btmrvl_sdio_read_rx_len(struct btmrvl_sdio_card *card, u16 *dat) 98 + { 99 + u8 reg; 100 + int ret; 101 + 102 + reg = sdio_readb(card->func, CARD_RX_LEN_REG, &ret); 103 + if (!ret) 104 + *dat = (u16) reg << card->rx_unit; 105 + 106 + return ret; 107 + } 108 + 109 + static int btmrvl_sdio_enable_host_int_mask(struct btmrvl_sdio_card *card, 110 + u8 mask) 111 + { 112 + int ret; 113 + 114 + sdio_writeb(card->func, mask, HOST_INT_MASK_REG, &ret); 115 + if (ret) { 116 + BT_ERR("Unable to enable the host interrupt!"); 117 + ret = -EIO; 118 + } 119 + 120 + return ret; 121 + } 122 + 123 + static int btmrvl_sdio_disable_host_int_mask(struct btmrvl_sdio_card *card, 124 + u8 mask) 125 + { 126 + u8 host_int_mask; 127 + int ret; 128 + 129 + host_int_mask = sdio_readb(card->func, HOST_INT_MASK_REG, &ret); 130 + if (ret) 131 + return -EIO; 132 + 133 + host_int_mask &= ~mask; 134 + 135 + sdio_writeb(card->func, host_int_mask, HOST_INT_MASK_REG, &ret); 136 + if (ret < 0) { 137 + BT_ERR("Unable to disable the host interrupt!"); 138 + return -EIO; 139 + } 140 + 141 + return 0; 142 + } 143 + 144 + static int btmrvl_sdio_poll_card_status(struct btmrvl_sdio_card *card, u8 bits) 145 + { 146 + unsigned int tries; 147 + u8 status; 148 + int ret; 149 + 150 + for (tries = 0; tries < MAX_POLL_TRIES * 1000; tries++) { 151 + status = sdio_readb(card->func, CARD_STATUS_REG, &ret); 152 + if (ret) 153 + goto failed; 154 + if ((status & bits) == bits) 155 + return ret; 156 + 157 + udelay(1); 158 + } 159 + 160 + ret = -ETIMEDOUT; 161 + 162 + failed: 163 + BT_ERR("FAILED! ret=%d", ret); 164 + 165 + return ret; 166 + } 167 + 168 + static int btmrvl_sdio_verify_fw_download(struct btmrvl_sdio_card *card, 169 + int pollnum) 170 + { 171 + int ret = -ETIMEDOUT; 172 + u16 firmwarestat; 173 + unsigned int tries; 174 + 175 + /* Wait for firmware to become ready */ 176 + for (tries = 0; tries < pollnum; tries++) { 177 + if (btmrvl_sdio_read_fw_status(card, &firmwarestat) < 0) 178 + continue; 179 + 180 + if (firmwarestat == FIRMWARE_READY) { 181 + ret = 0; 182 + break; 183 + } else { 184 + msleep(10); 185 + } 186 + } 187 + 188 + return ret; 189 + } 190 + 191 + static int btmrvl_sdio_download_helper(struct btmrvl_sdio_card *card) 192 + { 193 + const struct firmware *fw_helper = NULL; 194 + const u8 *helper = NULL; 195 + int ret; 196 + void *tmphlprbuf = NULL; 197 + int tmphlprbufsz, hlprblknow, helperlen; 198 + u8 *helperbuf; 199 + u32 tx_len; 200 + 201 + ret = request_firmware(&fw_helper, card->helper, 202 + &card->func->dev); 203 + if ((ret < 0) || !fw_helper) { 204 + BT_ERR("request_firmware(helper) failed, error code = %d", 205 + ret); 206 + ret = -ENOENT; 207 + goto done; 208 + } 209 + 210 + helper = fw_helper->data; 211 + helperlen = fw_helper->size; 212 + 213 + BT_DBG("Downloading helper image (%d bytes), block size %d bytes", 214 + helperlen, SDIO_BLOCK_SIZE); 215 + 216 + tmphlprbufsz = ALIGN_SZ(BTM_UPLD_SIZE, BTSDIO_DMA_ALIGN); 217 + 218 + tmphlprbuf = kmalloc(tmphlprbufsz, GFP_KERNEL); 219 + if (!tmphlprbuf) { 220 + BT_ERR("Unable to allocate buffer for helper." 221 + " Terminating download"); 222 + ret = -ENOMEM; 223 + goto done; 224 + } 225 + 226 + memset(tmphlprbuf, 0, tmphlprbufsz); 227 + 228 + helperbuf = (u8 *) ALIGN_ADDR(tmphlprbuf, BTSDIO_DMA_ALIGN); 229 + 230 + /* Perform helper data transfer */ 231 + tx_len = (FIRMWARE_TRANSFER_NBLOCK * SDIO_BLOCK_SIZE) 232 + - SDIO_HEADER_LEN; 233 + hlprblknow = 0; 234 + 235 + do { 236 + ret = btmrvl_sdio_poll_card_status(card, 237 + CARD_IO_READY | DN_LD_CARD_RDY); 238 + if (ret < 0) { 239 + BT_ERR("Helper download poll status timeout @ %d", 240 + hlprblknow); 241 + goto done; 242 + } 243 + 244 + /* Check if there is more data? */ 245 + if (hlprblknow >= helperlen) 246 + break; 247 + 248 + if (helperlen - hlprblknow < tx_len) 249 + tx_len = helperlen - hlprblknow; 250 + 251 + /* Little-endian */ 252 + helperbuf[0] = ((tx_len & 0x000000ff) >> 0); 253 + helperbuf[1] = ((tx_len & 0x0000ff00) >> 8); 254 + helperbuf[2] = ((tx_len & 0x00ff0000) >> 16); 255 + helperbuf[3] = ((tx_len & 0xff000000) >> 24); 256 + 257 + memcpy(&helperbuf[SDIO_HEADER_LEN], &helper[hlprblknow], 258 + tx_len); 259 + 260 + /* Now send the data */ 261 + ret = sdio_writesb(card->func, card->ioport, helperbuf, 262 + FIRMWARE_TRANSFER_NBLOCK * SDIO_BLOCK_SIZE); 263 + if (ret < 0) { 264 + BT_ERR("IO error during helper download @ %d", 265 + hlprblknow); 266 + goto done; 267 + } 268 + 269 + hlprblknow += tx_len; 270 + } while (true); 271 + 272 + BT_DBG("Transferring helper image EOF block"); 273 + 274 + memset(helperbuf, 0x0, SDIO_BLOCK_SIZE); 275 + 276 + ret = sdio_writesb(card->func, card->ioport, helperbuf, 277 + SDIO_BLOCK_SIZE); 278 + if (ret < 0) { 279 + BT_ERR("IO error in writing helper image EOF block"); 280 + goto done; 281 + } 282 + 283 + ret = 0; 284 + 285 + done: 286 + kfree(tmphlprbuf); 287 + if (fw_helper) 288 + release_firmware(fw_helper); 289 + 290 + return ret; 291 + } 292 + 293 + static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card) 294 + { 295 + const struct firmware *fw_firmware = NULL; 296 + const u8 *firmware = NULL; 297 + int firmwarelen, tmpfwbufsz, ret; 298 + unsigned int tries, offset; 299 + u8 base0, base1; 300 + void *tmpfwbuf = NULL; 301 + u8 *fwbuf; 302 + u16 len; 303 + int txlen = 0, tx_blocks = 0, count = 0; 304 + 305 + ret = request_firmware(&fw_firmware, card->firmware, 306 + &card->func->dev); 307 + if ((ret < 0) || !fw_firmware) { 308 + BT_ERR("request_firmware(firmware) failed, error code = %d", 309 + ret); 310 + ret = -ENOENT; 311 + goto done; 312 + } 313 + 314 + firmware = fw_firmware->data; 315 + firmwarelen = fw_firmware->size; 316 + 317 + BT_DBG("Downloading FW image (%d bytes)", firmwarelen); 318 + 319 + tmpfwbufsz = ALIGN_SZ(BTM_UPLD_SIZE, BTSDIO_DMA_ALIGN); 320 + tmpfwbuf = kmalloc(tmpfwbufsz, GFP_KERNEL); 321 + if (!tmpfwbuf) { 322 + BT_ERR("Unable to allocate buffer for firmware." 323 + " Terminating download"); 324 + ret = -ENOMEM; 325 + goto done; 326 + } 327 + 328 + memset(tmpfwbuf, 0, tmpfwbufsz); 329 + 330 + /* Ensure aligned firmware buffer */ 331 + fwbuf = (u8 *) ALIGN_ADDR(tmpfwbuf, BTSDIO_DMA_ALIGN); 332 + 333 + /* Perform firmware data transfer */ 334 + offset = 0; 335 + do { 336 + ret = btmrvl_sdio_poll_card_status(card, 337 + CARD_IO_READY | DN_LD_CARD_RDY); 338 + if (ret < 0) { 339 + BT_ERR("FW download with helper poll status" 340 + " timeout @ %d", offset); 341 + goto done; 342 + } 343 + 344 + /* Check if there is more data ? */ 345 + if (offset >= firmwarelen) 346 + break; 347 + 348 + for (tries = 0; tries < MAX_POLL_TRIES; tries++) { 349 + base0 = sdio_readb(card->func, 350 + SQ_READ_BASE_ADDRESS_A0_REG, &ret); 351 + if (ret) { 352 + BT_ERR("BASE0 register read failed:" 353 + " base0 = 0x%04X(%d)." 354 + " Terminating download", 355 + base0, base0); 356 + ret = -EIO; 357 + goto done; 358 + } 359 + base1 = sdio_readb(card->func, 360 + SQ_READ_BASE_ADDRESS_A1_REG, &ret); 361 + if (ret) { 362 + BT_ERR("BASE1 register read failed:" 363 + " base1 = 0x%04X(%d)." 364 + " Terminating download", 365 + base1, base1); 366 + ret = -EIO; 367 + goto done; 368 + } 369 + 370 + len = (((u16) base1) << 8) | base0; 371 + if (len) 372 + break; 373 + 374 + udelay(10); 375 + } 376 + 377 + if (!len) 378 + break; 379 + else if (len > BTM_UPLD_SIZE) { 380 + BT_ERR("FW download failure @%d, invalid length %d", 381 + offset, len); 382 + ret = -EINVAL; 383 + goto done; 384 + } 385 + 386 + txlen = len; 387 + 388 + if (len & BIT(0)) { 389 + count++; 390 + if (count > MAX_WRITE_IOMEM_RETRY) { 391 + BT_ERR("FW download failure @%d, " 392 + "over max retry count", offset); 393 + ret = -EIO; 394 + goto done; 395 + } 396 + BT_ERR("FW CRC error indicated by the helper: " 397 + "len = 0x%04X, txlen = %d", len, txlen); 398 + len &= ~BIT(0); 399 + /* Set txlen to 0 so as to resend from same offset */ 400 + txlen = 0; 401 + } else { 402 + count = 0; 403 + 404 + /* Last block ? */ 405 + if (firmwarelen - offset < txlen) 406 + txlen = firmwarelen - offset; 407 + 408 + tx_blocks = 409 + (txlen + SDIO_BLOCK_SIZE - 1) / SDIO_BLOCK_SIZE; 410 + 411 + memcpy(fwbuf, &firmware[offset], txlen); 412 + } 413 + 414 + ret = sdio_writesb(card->func, card->ioport, fwbuf, 415 + tx_blocks * SDIO_BLOCK_SIZE); 416 + 417 + if (ret < 0) { 418 + BT_ERR("FW download, writesb(%d) failed @%d", 419 + count, offset); 420 + sdio_writeb(card->func, HOST_CMD53_FIN, CONFIG_REG, 421 + &ret); 422 + if (ret) 423 + BT_ERR("writeb failed (CFG)"); 424 + } 425 + 426 + offset += txlen; 427 + } while (true); 428 + 429 + BT_DBG("FW download over, size %d bytes", offset); 430 + 431 + ret = 0; 432 + 433 + done: 434 + kfree(tmpfwbuf); 435 + 436 + if (fw_firmware) 437 + release_firmware(fw_firmware); 438 + 439 + return ret; 440 + } 441 + 442 + static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv) 443 + { 444 + u16 buf_len = 0; 445 + int ret, buf_block_len, blksz; 446 + struct sk_buff *skb = NULL; 447 + u32 type; 448 + u8 *payload = NULL; 449 + struct hci_dev *hdev = priv->btmrvl_dev.hcidev; 450 + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; 451 + 452 + if (!card || !card->func) { 453 + BT_ERR("card or function is NULL!"); 454 + ret = -EINVAL; 455 + goto exit; 456 + } 457 + 458 + /* Read the length of data to be transferred */ 459 + ret = btmrvl_sdio_read_rx_len(card, &buf_len); 460 + if (ret < 0) { 461 + BT_ERR("read rx_len failed"); 462 + ret = -EIO; 463 + goto exit; 464 + } 465 + 466 + blksz = SDIO_BLOCK_SIZE; 467 + buf_block_len = (buf_len + blksz - 1) / blksz; 468 + 469 + if (buf_len <= SDIO_HEADER_LEN 470 + || (buf_block_len * blksz) > ALLOC_BUF_SIZE) { 471 + BT_ERR("invalid packet length: %d", buf_len); 472 + ret = -EINVAL; 473 + goto exit; 474 + } 475 + 476 + /* Allocate buffer */ 477 + skb = bt_skb_alloc(buf_block_len * blksz + BTSDIO_DMA_ALIGN, 478 + GFP_ATOMIC); 479 + if (skb == NULL) { 480 + BT_ERR("No free skb"); 481 + goto exit; 482 + } 483 + 484 + if ((unsigned long) skb->data & (BTSDIO_DMA_ALIGN - 1)) { 485 + skb_put(skb, (unsigned long) skb->data & 486 + (BTSDIO_DMA_ALIGN - 1)); 487 + skb_pull(skb, (unsigned long) skb->data & 488 + (BTSDIO_DMA_ALIGN - 1)); 489 + } 490 + 491 + payload = skb->data; 492 + 493 + ret = sdio_readsb(card->func, payload, card->ioport, 494 + buf_block_len * blksz); 495 + if (ret < 0) { 496 + BT_ERR("readsb failed: %d", ret); 497 + ret = -EIO; 498 + goto exit; 499 + } 500 + 501 + /* This is SDIO specific header length: byte[2][1][0], type: byte[3] 502 + * (HCI_COMMAND = 1, ACL_DATA = 2, SCO_DATA = 3, 0xFE = Vendor) 503 + */ 504 + 505 + buf_len = payload[0]; 506 + buf_len |= (u16) payload[1] << 8; 507 + type = payload[3]; 508 + 509 + switch (type) { 510 + case HCI_ACLDATA_PKT: 511 + case HCI_SCODATA_PKT: 512 + case HCI_EVENT_PKT: 513 + bt_cb(skb)->pkt_type = type; 514 + skb->dev = (void *)hdev; 515 + skb_put(skb, buf_len); 516 + skb_pull(skb, SDIO_HEADER_LEN); 517 + 518 + if (type == HCI_EVENT_PKT) 519 + btmrvl_check_evtpkt(priv, skb); 520 + 521 + hci_recv_frame(skb); 522 + hdev->stat.byte_rx += buf_len; 523 + break; 524 + 525 + case MRVL_VENDOR_PKT: 526 + bt_cb(skb)->pkt_type = HCI_VENDOR_PKT; 527 + skb->dev = (void *)hdev; 528 + skb_put(skb, buf_len); 529 + skb_pull(skb, SDIO_HEADER_LEN); 530 + 531 + if (btmrvl_process_event(priv, skb)) 532 + hci_recv_frame(skb); 533 + 534 + hdev->stat.byte_rx += buf_len; 535 + break; 536 + 537 + default: 538 + BT_ERR("Unknow packet type:%d", type); 539 + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, payload, 540 + blksz * buf_block_len); 541 + 542 + kfree_skb(skb); 543 + skb = NULL; 544 + break; 545 + } 546 + 547 + exit: 548 + if (ret) { 549 + hdev->stat.err_rx++; 550 + if (skb) 551 + kfree_skb(skb); 552 + } 553 + 554 + return ret; 555 + } 556 + 557 + static int btmrvl_sdio_get_int_status(struct btmrvl_private *priv, u8 * ireg) 558 + { 559 + int ret; 560 + u8 sdio_ireg = 0; 561 + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; 562 + 563 + *ireg = 0; 564 + 565 + sdio_ireg = sdio_readb(card->func, HOST_INTSTATUS_REG, &ret); 566 + if (ret) { 567 + BT_ERR("sdio_readb: read int status register failed"); 568 + ret = -EIO; 569 + goto done; 570 + } 571 + 572 + if (sdio_ireg != 0) { 573 + /* 574 + * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS 575 + * Clear the interrupt status register and re-enable the 576 + * interrupt. 577 + */ 578 + BT_DBG("sdio_ireg = 0x%x", sdio_ireg); 579 + 580 + sdio_writeb(card->func, ~(sdio_ireg) & (DN_LD_HOST_INT_STATUS | 581 + UP_LD_HOST_INT_STATUS), 582 + HOST_INTSTATUS_REG, &ret); 583 + if (ret) { 584 + BT_ERR("sdio_writeb: clear int status register " 585 + "failed"); 586 + ret = -EIO; 587 + goto done; 588 + } 589 + } 590 + 591 + if (sdio_ireg & DN_LD_HOST_INT_STATUS) { 592 + if (priv->btmrvl_dev.tx_dnld_rdy) 593 + BT_DBG("tx_done already received: " 594 + " int_status=0x%x", sdio_ireg); 595 + else 596 + priv->btmrvl_dev.tx_dnld_rdy = true; 597 + } 598 + 599 + if (sdio_ireg & UP_LD_HOST_INT_STATUS) 600 + btmrvl_sdio_card_to_host(priv); 601 + 602 + *ireg = sdio_ireg; 603 + 604 + ret = 0; 605 + 606 + done: 607 + return ret; 608 + } 609 + 610 + static void btmrvl_sdio_interrupt(struct sdio_func *func) 611 + { 612 + struct btmrvl_private *priv; 613 + struct hci_dev *hcidev; 614 + struct btmrvl_sdio_card *card; 615 + u8 ireg = 0; 616 + 617 + card = sdio_get_drvdata(func); 618 + if (card && card->priv) { 619 + priv = card->priv; 620 + hcidev = priv->btmrvl_dev.hcidev; 621 + 622 + if (btmrvl_sdio_get_int_status(priv, &ireg)) 623 + BT_ERR("reading HOST_INT_STATUS_REG failed"); 624 + else 625 + BT_DBG("HOST_INT_STATUS_REG %#x", ireg); 626 + 627 + btmrvl_interrupt(priv); 628 + } 629 + } 630 + 631 + static int btmrvl_sdio_register_dev(struct btmrvl_sdio_card *card) 632 + { 633 + struct sdio_func *func; 634 + u8 reg; 635 + int ret = 0; 636 + 637 + if (!card || !card->func) { 638 + BT_ERR("Error: card or function is NULL!"); 639 + ret = -EINVAL; 640 + goto failed; 641 + } 642 + 643 + func = card->func; 644 + 645 + sdio_claim_host(func); 646 + 647 + ret = sdio_enable_func(func); 648 + if (ret) { 649 + BT_ERR("sdio_enable_func() failed: ret=%d", ret); 650 + ret = -EIO; 651 + goto release_host; 652 + } 653 + 654 + ret = sdio_claim_irq(func, btmrvl_sdio_interrupt); 655 + if (ret) { 656 + BT_ERR("sdio_claim_irq failed: ret=%d", ret); 657 + ret = -EIO; 658 + goto disable_func; 659 + } 660 + 661 + ret = sdio_set_block_size(card->func, SDIO_BLOCK_SIZE); 662 + if (ret) { 663 + BT_ERR("cannot set SDIO block size"); 664 + ret = -EIO; 665 + goto release_irq; 666 + } 667 + 668 + reg = sdio_readb(func, IO_PORT_0_REG, &ret); 669 + if (ret < 0) { 670 + ret = -EIO; 671 + goto release_irq; 672 + } 673 + 674 + card->ioport = reg; 675 + 676 + reg = sdio_readb(func, IO_PORT_1_REG, &ret); 677 + if (ret < 0) { 678 + ret = -EIO; 679 + goto release_irq; 680 + } 681 + 682 + card->ioport |= (reg << 8); 683 + 684 + reg = sdio_readb(func, IO_PORT_2_REG, &ret); 685 + if (ret < 0) { 686 + ret = -EIO; 687 + goto release_irq; 688 + } 689 + 690 + card->ioport |= (reg << 16); 691 + 692 + BT_DBG("SDIO FUNC%d IO port: 0x%x", func->num, card->ioport); 693 + 694 + sdio_set_drvdata(func, card); 695 + 696 + sdio_release_host(func); 697 + 698 + return 0; 699 + 700 + release_irq: 701 + sdio_release_irq(func); 702 + 703 + disable_func: 704 + sdio_disable_func(func); 705 + 706 + release_host: 707 + sdio_release_host(func); 708 + 709 + failed: 710 + return ret; 711 + } 712 + 713 + static int btmrvl_sdio_unregister_dev(struct btmrvl_sdio_card *card) 714 + { 715 + if (card && card->func) { 716 + sdio_claim_host(card->func); 717 + sdio_release_irq(card->func); 718 + sdio_disable_func(card->func); 719 + sdio_release_host(card->func); 720 + sdio_set_drvdata(card->func, NULL); 721 + } 722 + 723 + return 0; 724 + } 725 + 726 + static int btmrvl_sdio_enable_host_int(struct btmrvl_sdio_card *card) 727 + { 728 + int ret; 729 + 730 + if (!card || !card->func) 731 + return -EINVAL; 732 + 733 + sdio_claim_host(card->func); 734 + 735 + ret = btmrvl_sdio_enable_host_int_mask(card, HIM_ENABLE); 736 + 737 + btmrvl_sdio_get_rx_unit(card); 738 + 739 + sdio_release_host(card->func); 740 + 741 + return ret; 742 + } 743 + 744 + static int btmrvl_sdio_disable_host_int(struct btmrvl_sdio_card *card) 745 + { 746 + int ret; 747 + 748 + if (!card || !card->func) 749 + return -EINVAL; 750 + 751 + sdio_claim_host(card->func); 752 + 753 + ret = btmrvl_sdio_disable_host_int_mask(card, HIM_DISABLE); 754 + 755 + sdio_release_host(card->func); 756 + 757 + return ret; 758 + } 759 + 760 + static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv, 761 + u8 *payload, u16 nb) 762 + { 763 + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; 764 + int ret = 0; 765 + int buf_block_len; 766 + int blksz; 767 + int i = 0; 768 + u8 *buf = NULL; 769 + void *tmpbuf = NULL; 770 + int tmpbufsz; 771 + 772 + if (!card || !card->func) { 773 + BT_ERR("card or function is NULL!"); 774 + return -EINVAL; 775 + } 776 + 777 + buf = payload; 778 + if ((unsigned long) payload & (BTSDIO_DMA_ALIGN - 1)) { 779 + tmpbufsz = ALIGN_SZ(nb, BTSDIO_DMA_ALIGN); 780 + tmpbuf = kzalloc(tmpbufsz, GFP_KERNEL); 781 + if (!tmpbuf) 782 + return -ENOMEM; 783 + buf = (u8 *) ALIGN_ADDR(tmpbuf, BTSDIO_DMA_ALIGN); 784 + memcpy(buf, payload, nb); 785 + } 786 + 787 + blksz = SDIO_BLOCK_SIZE; 788 + buf_block_len = (nb + blksz - 1) / blksz; 789 + 790 + sdio_claim_host(card->func); 791 + 792 + do { 793 + /* Transfer data to card */ 794 + ret = sdio_writesb(card->func, card->ioport, buf, 795 + buf_block_len * blksz); 796 + if (ret < 0) { 797 + i++; 798 + BT_ERR("i=%d writesb failed: %d", i, ret); 799 + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, 800 + payload, nb); 801 + ret = -EIO; 802 + if (i > MAX_WRITE_IOMEM_RETRY) 803 + goto exit; 804 + } 805 + } while (ret); 806 + 807 + priv->btmrvl_dev.tx_dnld_rdy = false; 808 + 809 + exit: 810 + sdio_release_host(card->func); 811 + 812 + return ret; 813 + } 814 + 815 + static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card) 816 + { 817 + int ret = 0; 818 + 819 + if (!card || !card->func) { 820 + BT_ERR("card or function is NULL!"); 821 + return -EINVAL; 822 + } 823 + sdio_claim_host(card->func); 824 + 825 + if (!btmrvl_sdio_verify_fw_download(card, 1)) { 826 + BT_DBG("Firmware already downloaded!"); 827 + goto done; 828 + } 829 + 830 + ret = btmrvl_sdio_download_helper(card); 831 + if (ret) { 832 + BT_ERR("Failed to download helper!"); 833 + ret = -EIO; 834 + goto done; 835 + } 836 + 837 + if (btmrvl_sdio_download_fw_w_helper(card)) { 838 + BT_ERR("Failed to download firmware!"); 839 + ret = -EIO; 840 + goto done; 841 + } 842 + 843 + if (btmrvl_sdio_verify_fw_download(card, MAX_POLL_TRIES)) { 844 + BT_ERR("FW failed to be active in time!"); 845 + ret = -ETIMEDOUT; 846 + goto done; 847 + } 848 + 849 + done: 850 + sdio_release_host(card->func); 851 + 852 + return ret; 853 + } 854 + 855 + static int btmrvl_sdio_wakeup_fw(struct btmrvl_private *priv) 856 + { 857 + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; 858 + int ret = 0; 859 + 860 + if (!card || !card->func) { 861 + BT_ERR("card or function is NULL!"); 862 + return -EINVAL; 863 + } 864 + 865 + sdio_claim_host(card->func); 866 + 867 + sdio_writeb(card->func, HOST_POWER_UP, CONFIG_REG, &ret); 868 + 869 + sdio_release_host(card->func); 870 + 871 + BT_DBG("wake up firmware"); 872 + 873 + return ret; 874 + } 875 + 876 + static int btmrvl_sdio_probe(struct sdio_func *func, 877 + const struct sdio_device_id *id) 878 + { 879 + int ret = 0; 880 + struct btmrvl_private *priv = NULL; 881 + struct btmrvl_sdio_card *card = NULL; 882 + 883 + BT_INFO("vendor=0x%x, device=0x%x, class=%d, fn=%d", 884 + id->vendor, id->device, id->class, func->num); 885 + 886 + card = kzalloc(sizeof(*card), GFP_KERNEL); 887 + if (!card) { 888 + ret = -ENOMEM; 889 + goto done; 890 + } 891 + 892 + card->func = func; 893 + 894 + if (id->driver_data) { 895 + struct btmrvl_sdio_device *data = (void *) id->driver_data; 896 + card->helper = data->helper; 897 + card->firmware = data->firmware; 898 + } 899 + 900 + if (btmrvl_sdio_register_dev(card) < 0) { 901 + BT_ERR("Failed to register BT device!"); 902 + ret = -ENODEV; 903 + goto free_card; 904 + } 905 + 906 + /* Disable the interrupts on the card */ 907 + btmrvl_sdio_disable_host_int(card); 908 + 909 + if (btmrvl_sdio_download_fw(card)) { 910 + BT_ERR("Downloading firmware failed!"); 911 + ret = -ENODEV; 912 + goto unreg_dev; 913 + } 914 + 915 + msleep(100); 916 + 917 + btmrvl_sdio_enable_host_int(card); 918 + 919 + priv = btmrvl_add_card(card); 920 + if (!priv) { 921 + BT_ERR("Initializing card failed!"); 922 + ret = -ENODEV; 923 + goto disable_host_int; 924 + } 925 + 926 + card->priv = priv; 927 + 928 + /* Initialize the interface specific function pointers */ 929 + priv->hw_host_to_card = btmrvl_sdio_host_to_card; 930 + priv->hw_wakeup_firmware = btmrvl_sdio_wakeup_fw; 931 + 932 + btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ); 933 + 934 + return 0; 935 + 936 + disable_host_int: 937 + btmrvl_sdio_disable_host_int(card); 938 + unreg_dev: 939 + btmrvl_sdio_unregister_dev(card); 940 + free_card: 941 + kfree(card); 942 + done: 943 + return ret; 944 + } 945 + 946 + static void btmrvl_sdio_remove(struct sdio_func *func) 947 + { 948 + struct btmrvl_sdio_card *card; 949 + 950 + if (func) { 951 + card = sdio_get_drvdata(func); 952 + if (card) { 953 + /* Send SHUTDOWN command & disable interrupt 954 + * if user removes the module. 955 + */ 956 + if (user_rmmod) { 957 + btmrvl_send_module_cfg_cmd(card->priv, 958 + MODULE_SHUTDOWN_REQ); 959 + btmrvl_sdio_disable_host_int(card); 960 + } 961 + BT_DBG("unregester dev"); 962 + btmrvl_sdio_unregister_dev(card); 963 + btmrvl_remove_card(card->priv); 964 + kfree(card); 965 + } 966 + } 967 + } 968 + 969 + static struct sdio_driver bt_mrvl_sdio = { 970 + .name = "btmrvl_sdio", 971 + .id_table = btmrvl_sdio_ids, 972 + .probe = btmrvl_sdio_probe, 973 + .remove = btmrvl_sdio_remove, 974 + }; 975 + 976 + static int btmrvl_sdio_init_module(void) 977 + { 978 + if (sdio_register_driver(&bt_mrvl_sdio) != 0) { 979 + BT_ERR("SDIO Driver Registration Failed"); 980 + return -ENODEV; 981 + } 982 + 983 + /* Clear the flag in case user removes the card. */ 984 + user_rmmod = 0; 985 + 986 + return 0; 987 + } 988 + 989 + static void btmrvl_sdio_exit_module(void) 990 + { 991 + /* Set the flag as user is removing this module. */ 992 + user_rmmod = 1; 993 + 994 + sdio_unregister_driver(&bt_mrvl_sdio); 995 + } 996 + 997 + module_init(btmrvl_sdio_init_module); 998 + module_exit(btmrvl_sdio_exit_module); 999 + 1000 + MODULE_AUTHOR("Marvell International Ltd."); 1001 + MODULE_DESCRIPTION("Marvell BT-over-SDIO driver ver " VERSION); 1002 + MODULE_VERSION(VERSION); 1003 + MODULE_LICENSE("GPL v2");
+108
drivers/bluetooth/btmrvl_sdio.h
··· 1 + /** 2 + * Marvell BT-over-SDIO driver: SDIO interface related definitions 3 + * 4 + * Copyright (C) 2009, Marvell International Ltd. 5 + * 6 + * This software file (the "File") is distributed by Marvell International 7 + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 8 + * (the "License"). You may use, redistribute and/or modify this File in 9 + * accordance with the terms and conditions of the License, a copy of which 10 + * is available by writing to the Free Software Foundation, Inc., 11 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the 12 + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. 13 + * 14 + * 15 + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE 16 + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE 17 + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about 18 + * this warranty disclaimer. 19 + * 20 + **/ 21 + 22 + #define SDIO_HEADER_LEN 4 23 + 24 + /* SD block size can not bigger than 64 due to buf size limit in firmware */ 25 + /* define SD block size for data Tx/Rx */ 26 + #define SDIO_BLOCK_SIZE 64 27 + 28 + /* Number of blocks for firmware transfer */ 29 + #define FIRMWARE_TRANSFER_NBLOCK 2 30 + 31 + /* This is for firmware specific length */ 32 + #define FW_EXTRA_LEN 36 33 + 34 + #define MRVDRV_SIZE_OF_CMD_BUFFER (2 * 1024) 35 + 36 + #define MRVDRV_BT_RX_PACKET_BUFFER_SIZE \ 37 + (HCI_MAX_FRAME_SIZE + FW_EXTRA_LEN) 38 + 39 + #define ALLOC_BUF_SIZE (((max_t (int, MRVDRV_BT_RX_PACKET_BUFFER_SIZE, \ 40 + MRVDRV_SIZE_OF_CMD_BUFFER) + SDIO_HEADER_LEN \ 41 + + SDIO_BLOCK_SIZE - 1) / SDIO_BLOCK_SIZE) \ 42 + * SDIO_BLOCK_SIZE) 43 + 44 + /* The number of times to try when polling for status */ 45 + #define MAX_POLL_TRIES 100 46 + 47 + /* Max retry number of CMD53 write */ 48 + #define MAX_WRITE_IOMEM_RETRY 2 49 + 50 + /* Host Control Registers */ 51 + #define IO_PORT_0_REG 0x00 52 + #define IO_PORT_1_REG 0x01 53 + #define IO_PORT_2_REG 0x02 54 + 55 + #define CONFIG_REG 0x03 56 + #define HOST_POWER_UP BIT(1) 57 + #define HOST_CMD53_FIN BIT(2) 58 + 59 + #define HOST_INT_MASK_REG 0x04 60 + #define HIM_DISABLE 0xff 61 + #define HIM_ENABLE (BIT(0) | BIT(1)) 62 + 63 + #define HOST_INTSTATUS_REG 0x05 64 + #define UP_LD_HOST_INT_STATUS BIT(0) 65 + #define DN_LD_HOST_INT_STATUS BIT(1) 66 + 67 + /* Card Control Registers */ 68 + #define SQ_READ_BASE_ADDRESS_A0_REG 0x10 69 + #define SQ_READ_BASE_ADDRESS_A1_REG 0x11 70 + 71 + #define CARD_STATUS_REG 0x20 72 + #define DN_LD_CARD_RDY BIT(0) 73 + #define CARD_IO_READY BIT(3) 74 + 75 + #define CARD_FW_STATUS0_REG 0x40 76 + #define CARD_FW_STATUS1_REG 0x41 77 + #define FIRMWARE_READY 0xfedc 78 + 79 + #define CARD_RX_LEN_REG 0x42 80 + #define CARD_RX_UNIT_REG 0x43 81 + 82 + 83 + struct btmrvl_sdio_card { 84 + struct sdio_func *func; 85 + u32 ioport; 86 + const char *helper; 87 + const char *firmware; 88 + u8 rx_unit; 89 + struct btmrvl_private *priv; 90 + }; 91 + 92 + struct btmrvl_sdio_device { 93 + const char *helper; 94 + const char *firmware; 95 + }; 96 + 97 + 98 + /* Platform specific DMA alignment */ 99 + #define BTSDIO_DMA_ALIGN 8 100 + 101 + /* Macros for Data Alignment : size */ 102 + #define ALIGN_SZ(p, a) \ 103 + (((p) + ((a) - 1)) & ~((a) - 1)) 104 + 105 + /* Macros for Data Alignment : address */ 106 + #define ALIGN_ADDR(p, a) \ 107 + ((((unsigned long)(p)) + (((unsigned long)(a)) - 1)) & \ 108 + ~(((unsigned long)(a)) - 1))
+175 -23
drivers/bluetooth/btusb.c
··· 35 35 #include <net/bluetooth/bluetooth.h> 36 36 #include <net/bluetooth/hci_core.h> 37 37 38 - #define VERSION "0.5" 38 + #define VERSION "0.6" 39 39 40 40 static int ignore_dga; 41 41 static int ignore_csr; ··· 145 145 #define BTUSB_INTR_RUNNING 0 146 146 #define BTUSB_BULK_RUNNING 1 147 147 #define BTUSB_ISOC_RUNNING 2 148 + #define BTUSB_SUSPENDING 3 148 149 149 150 struct btusb_data { 150 151 struct hci_dev *hdev; ··· 158 157 unsigned long flags; 159 158 160 159 struct work_struct work; 160 + struct work_struct waker; 161 161 162 162 struct usb_anchor tx_anchor; 163 163 struct usb_anchor intr_anchor; 164 164 struct usb_anchor bulk_anchor; 165 165 struct usb_anchor isoc_anchor; 166 + struct usb_anchor deferred; 167 + int tx_in_flight; 168 + spinlock_t txlock; 166 169 167 170 struct usb_endpoint_descriptor *intr_ep; 168 171 struct usb_endpoint_descriptor *bulk_tx_ep; ··· 179 174 unsigned int sco_num; 180 175 int isoc_altsetting; 181 176 int suspend_count; 177 + int did_iso_resume:1; 182 178 }; 179 + 180 + static int inc_tx(struct btusb_data *data) 181 + { 182 + unsigned long flags; 183 + int rv; 184 + 185 + spin_lock_irqsave(&data->txlock, flags); 186 + rv = test_bit(BTUSB_SUSPENDING, &data->flags); 187 + if (!rv) 188 + data->tx_in_flight++; 189 + spin_unlock_irqrestore(&data->txlock, flags); 190 + 191 + return rv; 192 + } 183 193 184 194 static void btusb_intr_complete(struct urb *urb) 185 195 { ··· 222 202 if (!test_bit(BTUSB_INTR_RUNNING, &data->flags)) 223 203 return; 224 204 205 + usb_mark_last_busy(data->udev); 225 206 usb_anchor_urb(urb, &data->intr_anchor); 226 207 227 208 err = usb_submit_urb(urb, GFP_ATOMIC); ··· 322 301 struct urb *urb; 323 302 unsigned char *buf; 324 303 unsigned int pipe; 325 - int err, size; 304 + int err, size = HCI_MAX_FRAME_SIZE; 326 305 327 306 BT_DBG("%s", hdev->name); 328 307 ··· 332 311 urb = usb_alloc_urb(0, mem_flags); 333 312 if (!urb) 334 313 return -ENOMEM; 335 - 336 - size = le16_to_cpu(data->bulk_rx_ep->wMaxPacketSize); 337 314 338 315 buf = kmalloc(size, mem_flags); 339 316 if (!buf) { ··· 346 327 347 328 urb->transfer_flags |= URB_FREE_BUFFER; 348 329 330 + usb_mark_last_busy(data->udev); 349 331 usb_anchor_urb(urb, &data->bulk_anchor); 350 332 351 333 err = usb_submit_urb(urb, mem_flags); ··· 485 465 { 486 466 struct sk_buff *skb = urb->context; 487 467 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 468 + struct btusb_data *data = hdev->driver_data; 469 + 470 + BT_DBG("%s urb %p status %d count %d", hdev->name, 471 + urb, urb->status, urb->actual_length); 472 + 473 + if (!test_bit(HCI_RUNNING, &hdev->flags)) 474 + goto done; 475 + 476 + if (!urb->status) 477 + hdev->stat.byte_tx += urb->transfer_buffer_length; 478 + else 479 + hdev->stat.err_tx++; 480 + 481 + done: 482 + spin_lock(&data->txlock); 483 + data->tx_in_flight--; 484 + spin_unlock(&data->txlock); 485 + 486 + kfree(urb->setup_packet); 487 + 488 + kfree_skb(skb); 489 + } 490 + 491 + static void btusb_isoc_tx_complete(struct urb *urb) 492 + { 493 + struct sk_buff *skb = urb->context; 494 + struct hci_dev *hdev = (struct hci_dev *) skb->dev; 488 495 489 496 BT_DBG("%s urb %p status %d count %d", hdev->name, 490 497 urb, urb->status, urb->actual_length); ··· 537 490 538 491 BT_DBG("%s", hdev->name); 539 492 493 + err = usb_autopm_get_interface(data->intf); 494 + if (err < 0) 495 + return err; 496 + 497 + data->intf->needs_remote_wakeup = 1; 498 + 540 499 if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) 541 - return 0; 500 + goto done; 542 501 543 502 if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) 544 - return 0; 503 + goto done; 545 504 546 505 err = btusb_submit_intr_urb(hdev, GFP_KERNEL); 547 506 if (err < 0) ··· 562 509 set_bit(BTUSB_BULK_RUNNING, &data->flags); 563 510 btusb_submit_bulk_urb(hdev, GFP_KERNEL); 564 511 512 + done: 513 + usb_autopm_put_interface(data->intf); 565 514 return 0; 566 515 567 516 failed: 568 517 clear_bit(BTUSB_INTR_RUNNING, &data->flags); 569 518 clear_bit(HCI_RUNNING, &hdev->flags); 519 + usb_autopm_put_interface(data->intf); 570 520 return err; 521 + } 522 + 523 + static void btusb_stop_traffic(struct btusb_data *data) 524 + { 525 + usb_kill_anchored_urbs(&data->intr_anchor); 526 + usb_kill_anchored_urbs(&data->bulk_anchor); 527 + usb_kill_anchored_urbs(&data->isoc_anchor); 571 528 } 572 529 573 530 static int btusb_close(struct hci_dev *hdev) 574 531 { 575 532 struct btusb_data *data = hdev->driver_data; 533 + int err; 576 534 577 535 BT_DBG("%s", hdev->name); 578 536 ··· 593 529 cancel_work_sync(&data->work); 594 530 595 531 clear_bit(BTUSB_ISOC_RUNNING, &data->flags); 596 - usb_kill_anchored_urbs(&data->isoc_anchor); 597 - 598 532 clear_bit(BTUSB_BULK_RUNNING, &data->flags); 599 - usb_kill_anchored_urbs(&data->bulk_anchor); 600 - 601 533 clear_bit(BTUSB_INTR_RUNNING, &data->flags); 602 - usb_kill_anchored_urbs(&data->intr_anchor); 534 + 535 + btusb_stop_traffic(data); 536 + err = usb_autopm_get_interface(data->intf); 537 + if (err < 0) 538 + return 0; 539 + 540 + data->intf->needs_remote_wakeup = 0; 541 + usb_autopm_put_interface(data->intf); 603 542 604 543 return 0; 605 544 } ··· 689 622 urb->dev = data->udev; 690 623 urb->pipe = pipe; 691 624 urb->context = skb; 692 - urb->complete = btusb_tx_complete; 625 + urb->complete = btusb_isoc_tx_complete; 693 626 urb->interval = data->isoc_tx_ep->bInterval; 694 627 695 628 urb->transfer_flags = URB_ISO_ASAP; ··· 700 633 le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); 701 634 702 635 hdev->stat.sco_tx++; 703 - break; 636 + goto skip_waking; 704 637 705 638 default: 706 639 return -EILSEQ; 707 640 } 708 641 642 + err = inc_tx(data); 643 + if (err) { 644 + usb_anchor_urb(urb, &data->deferred); 645 + schedule_work(&data->waker); 646 + err = 0; 647 + goto done; 648 + } 649 + 650 + skip_waking: 709 651 usb_anchor_urb(urb, &data->tx_anchor); 710 652 711 653 err = usb_submit_urb(urb, GFP_ATOMIC); ··· 722 646 BT_ERR("%s urb %p submission failed", hdev->name, urb); 723 647 kfree(urb->setup_packet); 724 648 usb_unanchor_urb(urb); 649 + } else { 650 + usb_mark_last_busy(data->udev); 725 651 } 726 652 727 653 usb_free_urb(urb); 728 654 655 + done: 729 656 return err; 730 657 } 731 658 ··· 800 721 { 801 722 struct btusb_data *data = container_of(work, struct btusb_data, work); 802 723 struct hci_dev *hdev = data->hdev; 724 + int err; 803 725 804 726 if (hdev->conn_hash.sco_num > 0) { 727 + if (!data->did_iso_resume) { 728 + err = usb_autopm_get_interface(data->isoc); 729 + if (err < 0) { 730 + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); 731 + usb_kill_anchored_urbs(&data->isoc_anchor); 732 + return; 733 + } 734 + 735 + data->did_iso_resume = 1; 736 + } 805 737 if (data->isoc_altsetting != 2) { 806 738 clear_bit(BTUSB_ISOC_RUNNING, &data->flags); 807 739 usb_kill_anchored_urbs(&data->isoc_anchor); ··· 832 742 usb_kill_anchored_urbs(&data->isoc_anchor); 833 743 834 744 __set_isoc_interface(hdev, 0); 745 + if (data->did_iso_resume) { 746 + data->did_iso_resume = 0; 747 + usb_autopm_put_interface(data->isoc); 748 + } 835 749 } 750 + } 751 + 752 + static void btusb_waker(struct work_struct *work) 753 + { 754 + struct btusb_data *data = container_of(work, struct btusb_data, waker); 755 + int err; 756 + 757 + err = usb_autopm_get_interface(data->intf); 758 + if (err < 0) 759 + return; 760 + 761 + usb_autopm_put_interface(data->intf); 836 762 } 837 763 838 764 static int btusb_probe(struct usb_interface *intf, ··· 920 814 spin_lock_init(&data->lock); 921 815 922 816 INIT_WORK(&data->work, btusb_work); 817 + INIT_WORK(&data->waker, btusb_waker); 818 + spin_lock_init(&data->txlock); 923 819 924 820 init_usb_anchor(&data->tx_anchor); 925 821 init_usb_anchor(&data->intr_anchor); 926 822 init_usb_anchor(&data->bulk_anchor); 927 823 init_usb_anchor(&data->isoc_anchor); 824 + init_usb_anchor(&data->deferred); 928 825 929 826 hdev = hci_alloc_dev(); 930 827 if (!hdev) { ··· 1052 943 hci_free_dev(hdev); 1053 944 } 1054 945 946 + #ifdef CONFIG_PM 1055 947 static int btusb_suspend(struct usb_interface *intf, pm_message_t message) 1056 948 { 1057 949 struct btusb_data *data = usb_get_intfdata(intf); ··· 1062 952 if (data->suspend_count++) 1063 953 return 0; 1064 954 955 + spin_lock_irq(&data->txlock); 956 + if (!(interface_to_usbdev(intf)->auto_pm && data->tx_in_flight)) { 957 + set_bit(BTUSB_SUSPENDING, &data->flags); 958 + spin_unlock_irq(&data->txlock); 959 + } else { 960 + spin_unlock_irq(&data->txlock); 961 + data->suspend_count--; 962 + return -EBUSY; 963 + } 964 + 1065 965 cancel_work_sync(&data->work); 1066 966 967 + btusb_stop_traffic(data); 1067 968 usb_kill_anchored_urbs(&data->tx_anchor); 1068 969 1069 - usb_kill_anchored_urbs(&data->isoc_anchor); 1070 - usb_kill_anchored_urbs(&data->bulk_anchor); 1071 - usb_kill_anchored_urbs(&data->intr_anchor); 1072 - 1073 970 return 0; 971 + } 972 + 973 + static void play_deferred(struct btusb_data *data) 974 + { 975 + struct urb *urb; 976 + int err; 977 + 978 + while ((urb = usb_get_from_anchor(&data->deferred))) { 979 + err = usb_submit_urb(urb, GFP_ATOMIC); 980 + if (err < 0) 981 + break; 982 + 983 + data->tx_in_flight++; 984 + } 985 + usb_scuttle_anchored_urbs(&data->deferred); 1074 986 } 1075 987 1076 988 static int btusb_resume(struct usb_interface *intf) 1077 989 { 1078 990 struct btusb_data *data = usb_get_intfdata(intf); 1079 991 struct hci_dev *hdev = data->hdev; 1080 - int err; 992 + int err = 0; 1081 993 1082 994 BT_DBG("intf %p", intf); 1083 995 ··· 1107 975 return 0; 1108 976 1109 977 if (!test_bit(HCI_RUNNING, &hdev->flags)) 1110 - return 0; 978 + goto done; 1111 979 1112 980 if (test_bit(BTUSB_INTR_RUNNING, &data->flags)) { 1113 981 err = btusb_submit_intr_urb(hdev, GFP_NOIO); 1114 982 if (err < 0) { 1115 983 clear_bit(BTUSB_INTR_RUNNING, &data->flags); 1116 - return err; 984 + goto failed; 1117 985 } 1118 986 } 1119 987 ··· 1121 989 err = btusb_submit_bulk_urb(hdev, GFP_NOIO); 1122 990 if (err < 0) { 1123 991 clear_bit(BTUSB_BULK_RUNNING, &data->flags); 1124 - return err; 1125 - } else 1126 - btusb_submit_bulk_urb(hdev, GFP_NOIO); 992 + goto failed; 993 + } 994 + 995 + btusb_submit_bulk_urb(hdev, GFP_NOIO); 1127 996 } 1128 997 1129 998 if (test_bit(BTUSB_ISOC_RUNNING, &data->flags)) { ··· 1134 1001 btusb_submit_isoc_urb(hdev, GFP_NOIO); 1135 1002 } 1136 1003 1004 + spin_lock_irq(&data->txlock); 1005 + play_deferred(data); 1006 + clear_bit(BTUSB_SUSPENDING, &data->flags); 1007 + spin_unlock_irq(&data->txlock); 1008 + schedule_work(&data->work); 1009 + 1137 1010 return 0; 1011 + 1012 + failed: 1013 + usb_scuttle_anchored_urbs(&data->deferred); 1014 + done: 1015 + spin_lock_irq(&data->txlock); 1016 + clear_bit(BTUSB_SUSPENDING, &data->flags); 1017 + spin_unlock_irq(&data->txlock); 1018 + 1019 + return err; 1138 1020 } 1021 + #endif 1139 1022 1140 1023 static struct usb_driver btusb_driver = { 1141 1024 .name = "btusb", 1142 1025 .probe = btusb_probe, 1143 1026 .disconnect = btusb_disconnect, 1027 + #ifdef CONFIG_PM 1144 1028 .suspend = btusb_suspend, 1145 1029 .resume = btusb_resume, 1030 + #endif 1146 1031 .id_table = btusb_table, 1032 + .supports_autosuspend = 1, 1147 1033 }; 1148 1034 1149 1035 static int __init btusb_init(void)
+2 -1
drivers/bluetooth/hci_bcsp.c
··· 373 373 374 374 i = 0; 375 375 skb_queue_walk_safe(&bcsp->unack, skb, tmp) { 376 - if (i++ >= pkts_to_be_removed) 376 + if (i >= pkts_to_be_removed) 377 377 break; 378 + i++; 378 379 379 380 __skb_unlink(skb, &bcsp->unack); 380 381 kfree_skb(skb);
+4 -1
include/net/bluetooth/bluetooth.h
··· 138 138 struct bt_skb_cb { 139 139 __u8 pkt_type; 140 140 __u8 incoming; 141 + __u8 tx_seq; 142 + __u8 retries; 143 + __u8 sar; 141 144 }; 142 - #define bt_cb(skb) ((struct bt_skb_cb *)(skb->cb)) 145 + #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb)) 143 146 144 147 static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how) 145 148 {
+7 -3
include/net/bluetooth/hci_core.h
··· 117 117 struct sk_buff *sent_cmd; 118 118 struct sk_buff *reassembly[3]; 119 119 120 - struct semaphore req_lock; 120 + struct mutex req_lock; 121 121 wait_queue_head_t req_wait_q; 122 122 __u32 req_status; 123 123 __u32 req_result; ··· 187 187 struct work_struct work_del; 188 188 189 189 struct device dev; 190 + atomic_t devref; 190 191 191 192 struct hci_dev *hdev; 192 193 void *l2cap_data; ··· 339 338 340 339 void hci_conn_enter_active_mode(struct hci_conn *conn); 341 340 void hci_conn_enter_sniff_mode(struct hci_conn *conn); 341 + 342 + void hci_conn_hold_device(struct hci_conn *conn); 343 + void hci_conn_put_device(struct hci_conn *conn); 342 344 343 345 static inline void hci_conn_hold(struct hci_conn *conn) 344 346 { ··· 704 700 #define HCI_REQ_PEND 1 705 701 #define HCI_REQ_CANCELED 2 706 702 707 - #define hci_req_lock(d) down(&d->req_lock) 708 - #define hci_req_unlock(d) up(&d->req_lock) 703 + #define hci_req_lock(d) mutex_lock(&d->req_lock) 704 + #define hci_req_unlock(d) mutex_unlock(&d->req_lock) 709 705 710 706 void hci_req_complete(struct hci_dev *hdev, int result); 711 707
+118 -14
include/net/bluetooth/l2cap.h
··· 27 27 28 28 /* L2CAP defaults */ 29 29 #define L2CAP_DEFAULT_MTU 672 30 + #define L2CAP_DEFAULT_MIN_MTU 48 30 31 #define L2CAP_DEFAULT_FLUSH_TO 0xffff 31 - #define L2CAP_DEFAULT_RX_WINDOW 1 32 - #define L2CAP_DEFAULT_MAX_RECEIVE 1 33 - #define L2CAP_DEFAULT_RETRANS_TO 300 /* 300 milliseconds */ 34 - #define L2CAP_DEFAULT_MONITOR_TO 1000 /* 1 second */ 35 - #define L2CAP_DEFAULT_MAX_RX_APDU 0xfff7 32 + #define L2CAP_DEFAULT_TX_WINDOW 63 33 + #define L2CAP_DEFAULT_NUM_TO_ACK (L2CAP_DEFAULT_TX_WINDOW/5) 34 + #define L2CAP_DEFAULT_MAX_TX 3 35 + #define L2CAP_DEFAULT_RETRANS_TO 1000 /* 1 second */ 36 + #define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ 37 + #define L2CAP_DEFAULT_MAX_PDU_SIZE 672 36 38 37 39 #define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */ 38 40 #define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */ ··· 54 52 __u16 imtu; 55 53 __u16 flush_to; 56 54 __u8 mode; 55 + __u8 fcs; 57 56 }; 58 57 59 58 #define L2CAP_CONNINFO 0x02 ··· 95 92 /* L2CAP checksum option */ 96 93 #define L2CAP_FCS_NONE 0x00 97 94 #define L2CAP_FCS_CRC16 0x01 95 + 96 + /* L2CAP Control Field bit masks */ 97 + #define L2CAP_CTRL_SAR 0xC000 98 + #define L2CAP_CTRL_REQSEQ 0x3F00 99 + #define L2CAP_CTRL_TXSEQ 0x007E 100 + #define L2CAP_CTRL_RETRANS 0x0080 101 + #define L2CAP_CTRL_FINAL 0x0080 102 + #define L2CAP_CTRL_POLL 0x0010 103 + #define L2CAP_CTRL_SUPERVISE 0x000C 104 + #define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */ 105 + 106 + #define L2CAP_CTRL_TXSEQ_SHIFT 1 107 + #define L2CAP_CTRL_REQSEQ_SHIFT 8 108 + #define L2CAP_CTRL_SAR_SHIFT 14 109 + 110 + /* L2CAP Supervisory Function */ 111 + #define L2CAP_SUPER_RCV_READY 0x0000 112 + #define L2CAP_SUPER_REJECT 0x0004 113 + #define L2CAP_SUPER_RCV_NOT_READY 0x0008 114 + #define L2CAP_SUPER_SELECT_REJECT 0x000C 115 + 116 + /* L2CAP Segmentation and Reassembly */ 117 + #define L2CAP_SDU_UNSEGMENTED 0x0000 118 + #define L2CAP_SDU_START 0x4000 119 + #define L2CAP_SDU_END 0x8000 120 + #define L2CAP_SDU_CONTINUE 0xC000 98 121 99 122 /* L2CAP structures */ 100 123 struct l2cap_hdr { ··· 219 190 #define L2CAP_MODE_RETRANS 0x01 220 191 #define L2CAP_MODE_FLOWCTL 0x02 221 192 #define L2CAP_MODE_ERTM 0x03 222 - #define L2CAP_MODE_STREAM 0x04 193 + #define L2CAP_MODE_STREAMING 0x04 223 194 224 195 struct l2cap_disconn_req { 225 196 __le16 dcid; ··· 290 261 291 262 /* ----- L2CAP channel and socket info ----- */ 292 263 #define l2cap_pi(sk) ((struct l2cap_pinfo *) sk) 264 + #define TX_QUEUE(sk) (&l2cap_pi(sk)->tx_queue) 265 + #define SREJ_QUEUE(sk) (&l2cap_pi(sk)->srej_queue) 266 + #define SREJ_LIST(sk) (&l2cap_pi(sk)->srej_l.list) 267 + 268 + struct srej_list { 269 + __u8 tx_seq; 270 + struct list_head list; 271 + }; 293 272 294 273 struct l2cap_pinfo { 295 274 struct bt_sock bt; ··· 308 271 __u16 imtu; 309 272 __u16 omtu; 310 273 __u16 flush_to; 311 - __u8 sec_level; 274 + __u8 mode; 275 + __u8 num_conf_req; 276 + __u8 num_conf_rsp; 277 + 278 + __u8 fcs; 279 + __u8 sec_level; 312 280 __u8 role_switch; 313 - __u8 force_reliable; 281 + __u8 force_reliable; 314 282 315 283 __u8 conf_req[64]; 316 284 __u8 conf_len; 317 285 __u8 conf_state; 318 - __u8 conf_retry; 286 + __u8 conn_state; 287 + 288 + __u8 next_tx_seq; 289 + __u8 expected_ack_seq; 290 + __u8 req_seq; 291 + __u8 expected_tx_seq; 292 + __u8 buffer_seq; 293 + __u8 buffer_seq_srej; 294 + __u8 srej_save_reqseq; 295 + __u8 unacked_frames; 296 + __u8 retry_count; 297 + __u8 num_to_ack; 298 + __u16 sdu_len; 299 + __u16 partial_sdu_len; 300 + struct sk_buff *sdu; 319 301 320 302 __u8 ident; 321 303 304 + __u8 remote_tx_win; 305 + __u8 remote_max_tx; 306 + __u16 retrans_timeout; 307 + __u16 monitor_timeout; 308 + __u16 max_pdu_size; 309 + 322 310 __le16 sport; 323 311 312 + struct timer_list retrans_timer; 313 + struct timer_list monitor_timer; 314 + struct sk_buff_head tx_queue; 315 + struct sk_buff_head srej_queue; 316 + struct srej_list srej_l; 324 317 struct l2cap_conn *conn; 325 318 struct sock *next_c; 326 319 struct sock *prev_c; 327 320 }; 328 321 329 - #define L2CAP_CONF_REQ_SENT 0x01 330 - #define L2CAP_CONF_INPUT_DONE 0x02 331 - #define L2CAP_CONF_OUTPUT_DONE 0x04 332 - #define L2CAP_CONF_CONNECT_PEND 0x80 322 + #define L2CAP_CONF_REQ_SENT 0x01 323 + #define L2CAP_CONF_INPUT_DONE 0x02 324 + #define L2CAP_CONF_OUTPUT_DONE 0x04 325 + #define L2CAP_CONF_MTU_DONE 0x08 326 + #define L2CAP_CONF_MODE_DONE 0x10 327 + #define L2CAP_CONF_CONNECT_PEND 0x20 328 + #define L2CAP_CONF_NO_FCS_RECV 0x40 329 + #define L2CAP_CONF_STATE2_DEVICE 0x80 333 330 334 - #define L2CAP_CONF_MAX_RETRIES 2 331 + #define L2CAP_CONF_MAX_CONF_REQ 2 332 + #define L2CAP_CONF_MAX_CONF_RSP 2 333 + 334 + #define L2CAP_CONN_SAR_SDU 0x01 335 + #define L2CAP_CONN_SREJ_SENT 0x02 336 + #define L2CAP_CONN_WAIT_F 0x04 337 + #define L2CAP_CONN_SREJ_ACT 0x08 338 + #define L2CAP_CONN_SEND_PBIT 0x10 339 + #define L2CAP_CONN_REMOTE_BUSY 0x20 340 + #define L2CAP_CONN_LOCAL_BUSY 0x40 341 + 342 + #define __mod_retrans_timer() mod_timer(&l2cap_pi(sk)->retrans_timer, \ 343 + jiffies + msecs_to_jiffies(L2CAP_DEFAULT_RETRANS_TO)); 344 + #define __mod_monitor_timer() mod_timer(&l2cap_pi(sk)->monitor_timer, \ 345 + jiffies + msecs_to_jiffies(L2CAP_DEFAULT_MONITOR_TO)); 346 + 347 + static inline int l2cap_tx_window_full(struct sock *sk) 348 + { 349 + struct l2cap_pinfo *pi = l2cap_pi(sk); 350 + int sub; 351 + 352 + sub = (pi->next_tx_seq - pi->expected_ack_seq) % 64; 353 + 354 + if (sub < 0) 355 + sub += 64; 356 + 357 + return (sub == pi->remote_tx_win); 358 + } 359 + 360 + #define __get_txseq(ctrl) ((ctrl) & L2CAP_CTRL_TXSEQ) >> 1 361 + #define __get_reqseq(ctrl) ((ctrl) & L2CAP_CTRL_REQSEQ) >> 8 362 + #define __is_iframe(ctrl) !((ctrl) & L2CAP_CTRL_FRAME_TYPE) 363 + #define __is_sframe(ctrl) (ctrl) & L2CAP_CTRL_FRAME_TYPE 364 + #define __is_sar_start(ctrl) ((ctrl) & L2CAP_CTRL_SAR) == L2CAP_SDU_START 335 365 336 366 void l2cap_load(void); 337 367
+2
include/net/bluetooth/rfcomm.h
··· 29 29 #define RFCOMM_CONN_TIMEOUT (HZ * 30) 30 30 #define RFCOMM_DISC_TIMEOUT (HZ * 20) 31 31 #define RFCOMM_AUTH_TIMEOUT (HZ * 25) 32 + #define RFCOMM_IDLE_TIMEOUT (HZ * 2) 32 33 33 34 #define RFCOMM_DEFAULT_MTU 127 34 35 #define RFCOMM_DEFAULT_CREDITS 7 ··· 155 154 struct rfcomm_session { 156 155 struct list_head list; 157 156 struct socket *sock; 157 + struct timer_list timer; 158 158 unsigned long state; 159 159 unsigned long flags; 160 160 atomic_t refcnt;
+1
net/bluetooth/Kconfig
··· 34 34 config BT_L2CAP 35 35 tristate "L2CAP protocol support" 36 36 depends on BT 37 + select CRC16 37 38 help 38 39 L2CAP (Logical Link Control and Adaptation Protocol) provides 39 40 connection oriented and connection-less data transport. L2CAP
+16 -1
net/bluetooth/hci_conn.c
··· 246 246 if (hdev->notify) 247 247 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); 248 248 249 + atomic_set(&conn->devref, 0); 250 + 249 251 hci_conn_init_sysfs(conn); 250 252 251 253 tasklet_enable(&hdev->tx_task); ··· 290 288 291 289 skb_queue_purge(&conn->data_q); 292 290 293 - hci_conn_del_sysfs(conn); 291 + hci_conn_put_device(conn); 294 292 295 293 hci_dev_put(hdev); 296 294 ··· 584 582 585 583 hci_dev_unlock(hdev); 586 584 } 585 + 586 + void hci_conn_hold_device(struct hci_conn *conn) 587 + { 588 + atomic_inc(&conn->devref); 589 + } 590 + EXPORT_SYMBOL(hci_conn_hold_device); 591 + 592 + void hci_conn_put_device(struct hci_conn *conn) 593 + { 594 + if (atomic_dec_and_test(&conn->devref)) 595 + hci_conn_del_sysfs(conn); 596 + } 597 + EXPORT_SYMBOL(hci_conn_put_device); 587 598 588 599 int hci_get_conn_list(void __user *arg) 589 600 {
+1 -1
net/bluetooth/hci_core.c
··· 911 911 hdev->reassembly[i] = NULL; 912 912 913 913 init_waitqueue_head(&hdev->req_wait_q); 914 - init_MUTEX(&hdev->req_lock); 914 + mutex_init(&hdev->req_lock); 915 915 916 916 inquiry_cache_init(hdev); 917 917
+2
net/bluetooth/hci_event.c
··· 887 887 } else 888 888 conn->state = BT_CONNECTED; 889 889 890 + hci_conn_hold_device(conn); 890 891 hci_conn_add_sysfs(conn); 891 892 892 893 if (test_bit(HCI_AUTH, &hdev->flags)) ··· 1694 1693 conn->handle = __le16_to_cpu(ev->handle); 1695 1694 conn->state = BT_CONNECTED; 1696 1695 1696 + hci_conn_hold_device(conn); 1697 1697 hci_conn_add_sysfs(conn); 1698 1698 break; 1699 1699
+45 -21
net/bluetooth/hidp/core.c
··· 40 40 41 41 #include <linux/input.h> 42 42 #include <linux/hid.h> 43 + #include <linux/hidraw.h> 43 44 44 45 #include <net/bluetooth/bluetooth.h> 45 46 #include <net/bluetooth/hci_core.h> ··· 93 92 { 94 93 __module_get(THIS_MODULE); 95 94 list_add(&session->list, &hidp_session_list); 95 + 96 + hci_conn_hold_device(session->conn); 96 97 } 97 98 98 99 static void __hidp_unlink_session(struct hidp_session *session) 99 100 { 101 + hci_conn_put_device(session->conn); 102 + 100 103 list_del(&session->list); 101 104 module_put(THIS_MODULE); 102 105 } ··· 379 374 380 375 /* Kill session thread */ 381 376 atomic_inc(&session->terminate); 377 + hidp_schedule(session); 382 378 } 383 379 } 384 380 ··· 579 573 if (session->hid) { 580 574 if (session->hid->claimed & HID_CLAIMED_INPUT) 581 575 hidinput_disconnect(session->hid); 576 + if (session->hid->claimed & HID_CLAIMED_HIDRAW) 577 + hidraw_disconnect(session->hid); 578 + 582 579 hid_destroy_device(session->hid); 580 + session->hid = NULL; 583 581 } 584 582 585 583 /* Wakeup user-space polling for socket errors */ ··· 611 601 { 612 602 bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src; 613 603 bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst; 604 + struct device *device = NULL; 614 605 struct hci_dev *hdev; 615 - struct hci_conn *conn; 616 606 617 607 hdev = hci_get_route(dst, src); 618 608 if (!hdev) 619 609 return NULL; 620 610 621 - conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); 611 + session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); 612 + if (session->conn) 613 + device = &session->conn->dev; 622 614 623 615 hci_dev_put(hdev); 624 616 625 - return conn ? &conn->dev : NULL; 617 + return device; 626 618 } 627 619 628 620 static int hidp_setup_input(struct hidp_session *session, 629 621 struct hidp_connadd_req *req) 630 622 { 631 623 struct input_dev *input; 632 - int i; 624 + int err, i; 633 625 634 626 input = input_allocate_device(); 635 627 if (!input) ··· 678 666 679 667 input->event = hidp_input_event; 680 668 681 - return input_register_device(input); 669 + err = input_register_device(input); 670 + if (err < 0) { 671 + hci_conn_put_device(session->conn); 672 + return err; 673 + } 674 + 675 + return 0; 682 676 } 683 677 684 678 static int hidp_open(struct hid_device *hid) ··· 766 748 { 767 749 struct hid_device *hid; 768 750 bdaddr_t src, dst; 769 - int ret; 751 + int err; 770 752 771 753 hid = hid_allocate_device(); 772 - if (IS_ERR(hid)) { 773 - ret = PTR_ERR(session->hid); 774 - goto err; 775 - } 754 + if (IS_ERR(hid)) 755 + return PTR_ERR(session->hid); 776 756 777 757 session->hid = hid; 778 758 session->req = req; ··· 792 776 hid->dev.parent = hidp_get_device(session); 793 777 hid->ll_driver = &hidp_hid_driver; 794 778 795 - ret = hid_add_device(hid); 796 - if (ret) 797 - goto err_hid; 779 + err = hid_add_device(hid); 780 + if (err < 0) 781 + goto failed; 798 782 799 783 return 0; 800 - err_hid: 784 + 785 + failed: 801 786 hid_destroy_device(hid); 802 787 session->hid = NULL; 803 - err: 804 - return ret; 788 + 789 + return err; 805 790 } 806 791 807 792 int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock) ··· 852 835 if (req->rd_size > 0) { 853 836 err = hidp_setup_hid(session, req); 854 837 if (err && err != -ENODEV) 855 - goto err_skb; 838 + goto purge; 856 839 } 857 840 858 841 if (!session->hid) { 859 842 err = hidp_setup_input(session, req); 860 843 if (err < 0) 861 - goto err_skb; 844 + goto purge; 862 845 } 863 846 864 847 __hidp_link_session(session); ··· 886 869 887 870 __hidp_unlink_session(session); 888 871 889 - if (session->input) 872 + if (session->input) { 890 873 input_unregister_device(session->input); 891 - if (session->hid) 874 + session->input = NULL; 875 + } 876 + 877 + if (session->hid) { 892 878 hid_destroy_device(session->hid); 893 - err_skb: 879 + session->hid = NULL; 880 + } 881 + 882 + purge: 894 883 skb_queue_purge(&session->ctrl_transmit); 895 884 skb_queue_purge(&session->intr_transmit); 885 + 896 886 failed: 897 887 up_write(&hidp_session_sem); 898 888
+2
net/bluetooth/hidp/hidp.h
··· 126 126 struct hidp_session { 127 127 struct list_head list; 128 128 129 + struct hci_conn *conn; 130 + 129 131 struct socket *ctrl_sock; 130 132 struct socket *intr_sock; 131 133
+1267 -100
net/bluetooth/l2cap.c
··· 41 41 #include <linux/list.h> 42 42 #include <linux/device.h> 43 43 #include <linux/uaccess.h> 44 + #include <linux/crc16.h> 44 45 #include <net/sock.h> 45 46 46 47 #include <asm/system.h> ··· 51 50 #include <net/bluetooth/hci_core.h> 52 51 #include <net/bluetooth/l2cap.h> 53 52 54 - #define VERSION "2.13" 53 + #define VERSION "2.14" 54 + 55 + static int enable_ertm = 0; 55 56 56 57 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; 57 58 static u8 l2cap_fixed_chan[8] = { 0x02, }; ··· 334 331 return hci_send_acl(conn->hcon, skb, 0); 335 332 } 336 333 334 + static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control) 335 + { 336 + struct sk_buff *skb; 337 + struct l2cap_hdr *lh; 338 + struct l2cap_conn *conn = pi->conn; 339 + int count, hlen = L2CAP_HDR_SIZE + 2; 340 + 341 + if (pi->fcs == L2CAP_FCS_CRC16) 342 + hlen += 2; 343 + 344 + BT_DBG("pi %p, control 0x%2.2x", pi, control); 345 + 346 + count = min_t(unsigned int, conn->mtu, hlen); 347 + control |= L2CAP_CTRL_FRAME_TYPE; 348 + 349 + skb = bt_skb_alloc(count, GFP_ATOMIC); 350 + if (!skb) 351 + return -ENOMEM; 352 + 353 + lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 354 + lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); 355 + lh->cid = cpu_to_le16(pi->dcid); 356 + put_unaligned_le16(control, skb_put(skb, 2)); 357 + 358 + if (pi->fcs == L2CAP_FCS_CRC16) { 359 + u16 fcs = crc16(0, (u8 *)lh, count - 2); 360 + put_unaligned_le16(fcs, skb_put(skb, 2)); 361 + } 362 + 363 + return hci_send_acl(pi->conn->hcon, skb, 0); 364 + } 365 + 366 + static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control) 367 + { 368 + if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) 369 + control |= L2CAP_SUPER_RCV_NOT_READY; 370 + else 371 + control |= L2CAP_SUPER_RCV_READY; 372 + 373 + return l2cap_send_sframe(pi, control); 374 + } 375 + 337 376 static void l2cap_do_start(struct sock *sk) 338 377 { 339 378 struct l2cap_conn *conn = l2cap_pi(sk)->conn; ··· 407 362 l2cap_send_cmd(conn, conn->info_ident, 408 363 L2CAP_INFO_REQ, sizeof(req), &req); 409 364 } 365 + } 366 + 367 + static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk) 368 + { 369 + struct l2cap_disconn_req req; 370 + 371 + req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid); 372 + req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 373 + l2cap_send_cmd(conn, l2cap_get_ident(conn), 374 + L2CAP_DISCONN_REQ, sizeof(req), &req); 410 375 } 411 376 412 377 /* ---- L2CAP connections ---- */ ··· 703 648 case BT_CONFIG: 704 649 if (sk->sk_type == SOCK_SEQPACKET) { 705 650 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 706 - struct l2cap_disconn_req req; 707 651 708 652 sk->sk_state = BT_DISCONN; 709 653 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 710 - 711 - req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid); 712 - req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 713 - l2cap_send_cmd(conn, l2cap_get_ident(conn), 714 - L2CAP_DISCONN_REQ, sizeof(req), &req); 654 + l2cap_send_disconn_req(conn, sk); 715 655 } else 716 656 l2cap_chan_del(sk, reason); 717 657 break; ··· 765 715 766 716 pi->imtu = l2cap_pi(parent)->imtu; 767 717 pi->omtu = l2cap_pi(parent)->omtu; 718 + pi->mode = l2cap_pi(parent)->mode; 719 + pi->fcs = l2cap_pi(parent)->fcs; 768 720 pi->sec_level = l2cap_pi(parent)->sec_level; 769 721 pi->role_switch = l2cap_pi(parent)->role_switch; 770 722 pi->force_reliable = l2cap_pi(parent)->force_reliable; 771 723 } else { 772 724 pi->imtu = L2CAP_DEFAULT_MTU; 773 725 pi->omtu = 0; 726 + pi->mode = L2CAP_MODE_BASIC; 727 + pi->fcs = L2CAP_FCS_CRC16; 774 728 pi->sec_level = BT_SECURITY_LOW; 775 729 pi->role_switch = 0; 776 730 pi->force_reliable = 0; ··· 1010 956 goto done; 1011 957 } 1012 958 959 + switch (l2cap_pi(sk)->mode) { 960 + case L2CAP_MODE_BASIC: 961 + break; 962 + case L2CAP_MODE_ERTM: 963 + case L2CAP_MODE_STREAMING: 964 + if (enable_ertm) 965 + break; 966 + /* fall through */ 967 + default: 968 + err = -ENOTSUPP; 969 + goto done; 970 + } 971 + 1013 972 switch (sk->sk_state) { 1014 973 case BT_CONNECT: 1015 974 case BT_CONNECT2: ··· 1071 1004 1072 1005 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) { 1073 1006 err = -EBADFD; 1007 + goto done; 1008 + } 1009 + 1010 + switch (l2cap_pi(sk)->mode) { 1011 + case L2CAP_MODE_BASIC: 1012 + break; 1013 + case L2CAP_MODE_ERTM: 1014 + case L2CAP_MODE_STREAMING: 1015 + if (enable_ertm) 1016 + break; 1017 + /* fall through */ 1018 + default: 1019 + err = -ENOTSUPP; 1074 1020 goto done; 1075 1021 } 1076 1022 ··· 1197 1117 return 0; 1198 1118 } 1199 1119 1200 - static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len) 1120 + static void l2cap_monitor_timeout(unsigned long arg) 1121 + { 1122 + struct sock *sk = (void *) arg; 1123 + u16 control; 1124 + 1125 + bh_lock_sock(sk); 1126 + if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) { 1127 + l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk); 1128 + return; 1129 + } 1130 + 1131 + l2cap_pi(sk)->retry_count++; 1132 + __mod_monitor_timer(); 1133 + 1134 + control = L2CAP_CTRL_POLL; 1135 + l2cap_send_rr_or_rnr(l2cap_pi(sk), control); 1136 + bh_unlock_sock(sk); 1137 + } 1138 + 1139 + static void l2cap_retrans_timeout(unsigned long arg) 1140 + { 1141 + struct sock *sk = (void *) arg; 1142 + u16 control; 1143 + 1144 + bh_lock_sock(sk); 1145 + l2cap_pi(sk)->retry_count = 1; 1146 + __mod_monitor_timer(); 1147 + 1148 + l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F; 1149 + 1150 + control = L2CAP_CTRL_POLL; 1151 + l2cap_send_rr_or_rnr(l2cap_pi(sk), control); 1152 + bh_unlock_sock(sk); 1153 + } 1154 + 1155 + static void l2cap_drop_acked_frames(struct sock *sk) 1156 + { 1157 + struct sk_buff *skb; 1158 + 1159 + while ((skb = skb_peek(TX_QUEUE(sk)))) { 1160 + if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq) 1161 + break; 1162 + 1163 + skb = skb_dequeue(TX_QUEUE(sk)); 1164 + kfree_skb(skb); 1165 + 1166 + l2cap_pi(sk)->unacked_frames--; 1167 + } 1168 + 1169 + if (!l2cap_pi(sk)->unacked_frames) 1170 + del_timer(&l2cap_pi(sk)->retrans_timer); 1171 + 1172 + return; 1173 + } 1174 + 1175 + static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb) 1176 + { 1177 + struct l2cap_pinfo *pi = l2cap_pi(sk); 1178 + int err; 1179 + 1180 + BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len); 1181 + 1182 + err = hci_send_acl(pi->conn->hcon, skb, 0); 1183 + if (err < 0) 1184 + kfree_skb(skb); 1185 + 1186 + return err; 1187 + } 1188 + 1189 + static int l2cap_streaming_send(struct sock *sk) 1190 + { 1191 + struct sk_buff *skb, *tx_skb; 1192 + struct l2cap_pinfo *pi = l2cap_pi(sk); 1193 + u16 control, fcs; 1194 + int err; 1195 + 1196 + while ((skb = sk->sk_send_head)) { 1197 + tx_skb = skb_clone(skb, GFP_ATOMIC); 1198 + 1199 + control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1200 + control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; 1201 + put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1202 + 1203 + if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) { 1204 + fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); 1205 + put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); 1206 + } 1207 + 1208 + err = l2cap_do_send(sk, tx_skb); 1209 + if (err < 0) { 1210 + l2cap_send_disconn_req(pi->conn, sk); 1211 + return err; 1212 + } 1213 + 1214 + pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; 1215 + 1216 + if (skb_queue_is_last(TX_QUEUE(sk), skb)) 1217 + sk->sk_send_head = NULL; 1218 + else 1219 + sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb); 1220 + 1221 + skb = skb_dequeue(TX_QUEUE(sk)); 1222 + kfree_skb(skb); 1223 + } 1224 + return 0; 1225 + } 1226 + 1227 + static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq) 1228 + { 1229 + struct l2cap_pinfo *pi = l2cap_pi(sk); 1230 + struct sk_buff *skb, *tx_skb; 1231 + u16 control, fcs; 1232 + int err; 1233 + 1234 + skb = skb_peek(TX_QUEUE(sk)); 1235 + do { 1236 + if (bt_cb(skb)->tx_seq != tx_seq) { 1237 + if (skb_queue_is_last(TX_QUEUE(sk), skb)) 1238 + break; 1239 + skb = skb_queue_next(TX_QUEUE(sk), skb); 1240 + continue; 1241 + } 1242 + 1243 + if (pi->remote_max_tx && 1244 + bt_cb(skb)->retries == pi->remote_max_tx) { 1245 + l2cap_send_disconn_req(pi->conn, sk); 1246 + break; 1247 + } 1248 + 1249 + tx_skb = skb_clone(skb, GFP_ATOMIC); 1250 + bt_cb(skb)->retries++; 1251 + control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1252 + control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1253 + | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1254 + put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1255 + 1256 + if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) { 1257 + fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); 1258 + put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); 1259 + } 1260 + 1261 + err = l2cap_do_send(sk, tx_skb); 1262 + if (err < 0) { 1263 + l2cap_send_disconn_req(pi->conn, sk); 1264 + return err; 1265 + } 1266 + break; 1267 + } while(1); 1268 + return 0; 1269 + } 1270 + 1271 + static int l2cap_ertm_send(struct sock *sk) 1272 + { 1273 + struct sk_buff *skb, *tx_skb; 1274 + struct l2cap_pinfo *pi = l2cap_pi(sk); 1275 + u16 control, fcs; 1276 + int err; 1277 + 1278 + if (pi->conn_state & L2CAP_CONN_WAIT_F) 1279 + return 0; 1280 + 1281 + while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) 1282 + && !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) { 1283 + tx_skb = skb_clone(skb, GFP_ATOMIC); 1284 + 1285 + if (pi->remote_max_tx && 1286 + bt_cb(skb)->retries == pi->remote_max_tx) { 1287 + l2cap_send_disconn_req(pi->conn, sk); 1288 + break; 1289 + } 1290 + 1291 + bt_cb(skb)->retries++; 1292 + 1293 + control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1294 + control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1295 + | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1296 + put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1297 + 1298 + 1299 + if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) { 1300 + fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2); 1301 + put_unaligned_le16(fcs, skb->data + tx_skb->len - 2); 1302 + } 1303 + 1304 + err = l2cap_do_send(sk, tx_skb); 1305 + if (err < 0) { 1306 + l2cap_send_disconn_req(pi->conn, sk); 1307 + return err; 1308 + } 1309 + __mod_retrans_timer(); 1310 + 1311 + bt_cb(skb)->tx_seq = pi->next_tx_seq; 1312 + pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; 1313 + 1314 + pi->unacked_frames++; 1315 + 1316 + if (skb_queue_is_last(TX_QUEUE(sk), skb)) 1317 + sk->sk_send_head = NULL; 1318 + else 1319 + sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb); 1320 + } 1321 + 1322 + return 0; 1323 + } 1324 + 1325 + static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb) 1201 1326 { 1202 1327 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1203 - struct sk_buff *skb, **frag; 1204 - int err, hlen, count, sent = 0; 1205 - struct l2cap_hdr *lh; 1206 - 1207 - BT_DBG("sk %p len %d", sk, len); 1208 - 1209 - /* First fragment (with L2CAP header) */ 1210 - if (sk->sk_type == SOCK_DGRAM) 1211 - hlen = L2CAP_HDR_SIZE + 2; 1212 - else 1213 - hlen = L2CAP_HDR_SIZE; 1214 - 1215 - count = min_t(unsigned int, (conn->mtu - hlen), len); 1216 - 1217 - skb = bt_skb_send_alloc(sk, hlen + count, 1218 - msg->msg_flags & MSG_DONTWAIT, &err); 1219 - if (!skb) 1220 - return err; 1221 - 1222 - /* Create L2CAP header */ 1223 - lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1224 - lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); 1225 - lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1226 - 1227 - if (sk->sk_type == SOCK_DGRAM) 1228 - put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2)); 1328 + struct sk_buff **frag; 1329 + int err, sent = 0; 1229 1330 1230 1331 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) { 1231 - err = -EFAULT; 1232 - goto fail; 1332 + return -EFAULT; 1233 1333 } 1234 1334 1235 1335 sent += count; ··· 1422 1162 1423 1163 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err); 1424 1164 if (!*frag) 1425 - goto fail; 1426 - 1427 - if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) { 1428 - err = -EFAULT; 1429 - goto fail; 1430 - } 1165 + return -EFAULT; 1166 + if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) 1167 + return -EFAULT; 1431 1168 1432 1169 sent += count; 1433 1170 len -= count; 1434 1171 1435 1172 frag = &(*frag)->next; 1436 1173 } 1437 - err = hci_send_acl(conn->hcon, skb, 0); 1438 - if (err < 0) 1439 - goto fail; 1440 1174 1441 1175 return sent; 1176 + } 1442 1177 1443 - fail: 1444 - kfree_skb(skb); 1445 - return err; 1178 + static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len) 1179 + { 1180 + struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1181 + struct sk_buff *skb; 1182 + int err, count, hlen = L2CAP_HDR_SIZE + 2; 1183 + struct l2cap_hdr *lh; 1184 + 1185 + BT_DBG("sk %p len %d", sk, (int)len); 1186 + 1187 + count = min_t(unsigned int, (conn->mtu - hlen), len); 1188 + skb = bt_skb_send_alloc(sk, count + hlen, 1189 + msg->msg_flags & MSG_DONTWAIT, &err); 1190 + if (!skb) 1191 + return ERR_PTR(-ENOMEM); 1192 + 1193 + /* Create L2CAP header */ 1194 + lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1195 + lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); 1196 + lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1197 + put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2)); 1198 + 1199 + err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1200 + if (unlikely(err < 0)) { 1201 + kfree_skb(skb); 1202 + return ERR_PTR(err); 1203 + } 1204 + return skb; 1205 + } 1206 + 1207 + static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len) 1208 + { 1209 + struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1210 + struct sk_buff *skb; 1211 + int err, count, hlen = L2CAP_HDR_SIZE; 1212 + struct l2cap_hdr *lh; 1213 + 1214 + BT_DBG("sk %p len %d", sk, (int)len); 1215 + 1216 + count = min_t(unsigned int, (conn->mtu - hlen), len); 1217 + skb = bt_skb_send_alloc(sk, count + hlen, 1218 + msg->msg_flags & MSG_DONTWAIT, &err); 1219 + if (!skb) 1220 + return ERR_PTR(-ENOMEM); 1221 + 1222 + /* Create L2CAP header */ 1223 + lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1224 + lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); 1225 + lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1226 + 1227 + err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1228 + if (unlikely(err < 0)) { 1229 + kfree_skb(skb); 1230 + return ERR_PTR(err); 1231 + } 1232 + return skb; 1233 + } 1234 + 1235 + static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen) 1236 + { 1237 + struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1238 + struct sk_buff *skb; 1239 + int err, count, hlen = L2CAP_HDR_SIZE + 2; 1240 + struct l2cap_hdr *lh; 1241 + 1242 + BT_DBG("sk %p len %d", sk, (int)len); 1243 + 1244 + if (sdulen) 1245 + hlen += 2; 1246 + 1247 + if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) 1248 + hlen += 2; 1249 + 1250 + count = min_t(unsigned int, (conn->mtu - hlen), len); 1251 + skb = bt_skb_send_alloc(sk, count + hlen, 1252 + msg->msg_flags & MSG_DONTWAIT, &err); 1253 + if (!skb) 1254 + return ERR_PTR(-ENOMEM); 1255 + 1256 + /* Create L2CAP header */ 1257 + lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1258 + lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); 1259 + lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1260 + put_unaligned_le16(control, skb_put(skb, 2)); 1261 + if (sdulen) 1262 + put_unaligned_le16(sdulen, skb_put(skb, 2)); 1263 + 1264 + err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1265 + if (unlikely(err < 0)) { 1266 + kfree_skb(skb); 1267 + return ERR_PTR(err); 1268 + } 1269 + 1270 + if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) 1271 + put_unaligned_le16(0, skb_put(skb, 2)); 1272 + 1273 + bt_cb(skb)->retries = 0; 1274 + return skb; 1275 + } 1276 + 1277 + static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len) 1278 + { 1279 + struct l2cap_pinfo *pi = l2cap_pi(sk); 1280 + struct sk_buff *skb; 1281 + struct sk_buff_head sar_queue; 1282 + u16 control; 1283 + size_t size = 0; 1284 + 1285 + __skb_queue_head_init(&sar_queue); 1286 + control = L2CAP_SDU_START; 1287 + skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len); 1288 + if (IS_ERR(skb)) 1289 + return PTR_ERR(skb); 1290 + 1291 + __skb_queue_tail(&sar_queue, skb); 1292 + len -= pi->max_pdu_size; 1293 + size +=pi->max_pdu_size; 1294 + control = 0; 1295 + 1296 + while (len > 0) { 1297 + size_t buflen; 1298 + 1299 + if (len > pi->max_pdu_size) { 1300 + control |= L2CAP_SDU_CONTINUE; 1301 + buflen = pi->max_pdu_size; 1302 + } else { 1303 + control |= L2CAP_SDU_END; 1304 + buflen = len; 1305 + } 1306 + 1307 + skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0); 1308 + if (IS_ERR(skb)) { 1309 + skb_queue_purge(&sar_queue); 1310 + return PTR_ERR(skb); 1311 + } 1312 + 1313 + __skb_queue_tail(&sar_queue, skb); 1314 + len -= buflen; 1315 + size += buflen; 1316 + control = 0; 1317 + } 1318 + skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk)); 1319 + if (sk->sk_send_head == NULL) 1320 + sk->sk_send_head = sar_queue.next; 1321 + 1322 + return size; 1446 1323 } 1447 1324 1448 1325 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) 1449 1326 { 1450 1327 struct sock *sk = sock->sk; 1451 - int err = 0; 1328 + struct l2cap_pinfo *pi = l2cap_pi(sk); 1329 + struct sk_buff *skb; 1330 + u16 control; 1331 + int err; 1452 1332 1453 1333 BT_DBG("sock %p, sk %p", sock, sk); 1454 1334 ··· 1600 1200 return -EOPNOTSUPP; 1601 1201 1602 1202 /* Check outgoing MTU */ 1603 - if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu) 1203 + if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC 1204 + && len > pi->omtu) 1604 1205 return -EINVAL; 1605 1206 1606 1207 lock_sock(sk); 1607 1208 1608 - if (sk->sk_state == BT_CONNECTED) 1609 - err = l2cap_do_send(sk, msg, len); 1610 - else 1209 + if (sk->sk_state != BT_CONNECTED) { 1611 1210 err = -ENOTCONN; 1211 + goto done; 1212 + } 1612 1213 1214 + /* Connectionless channel */ 1215 + if (sk->sk_type == SOCK_DGRAM) { 1216 + skb = l2cap_create_connless_pdu(sk, msg, len); 1217 + err = l2cap_do_send(sk, skb); 1218 + goto done; 1219 + } 1220 + 1221 + switch (pi->mode) { 1222 + case L2CAP_MODE_BASIC: 1223 + /* Create a basic PDU */ 1224 + skb = l2cap_create_basic_pdu(sk, msg, len); 1225 + if (IS_ERR(skb)) { 1226 + err = PTR_ERR(skb); 1227 + goto done; 1228 + } 1229 + 1230 + err = l2cap_do_send(sk, skb); 1231 + if (!err) 1232 + err = len; 1233 + break; 1234 + 1235 + case L2CAP_MODE_ERTM: 1236 + case L2CAP_MODE_STREAMING: 1237 + /* Entire SDU fits into one PDU */ 1238 + if (len <= pi->max_pdu_size) { 1239 + control = L2CAP_SDU_UNSEGMENTED; 1240 + skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0); 1241 + if (IS_ERR(skb)) { 1242 + err = PTR_ERR(skb); 1243 + goto done; 1244 + } 1245 + __skb_queue_tail(TX_QUEUE(sk), skb); 1246 + if (sk->sk_send_head == NULL) 1247 + sk->sk_send_head = skb; 1248 + } else { 1249 + /* Segment SDU into multiples PDUs */ 1250 + err = l2cap_sar_segment_sdu(sk, msg, len); 1251 + if (err < 0) 1252 + goto done; 1253 + } 1254 + 1255 + if (pi->mode == L2CAP_MODE_STREAMING) 1256 + err = l2cap_streaming_send(sk); 1257 + else 1258 + err = l2cap_ertm_send(sk); 1259 + 1260 + if (!err) 1261 + err = len; 1262 + break; 1263 + 1264 + default: 1265 + BT_DBG("bad state %1.1x", pi->mode); 1266 + err = -EINVAL; 1267 + } 1268 + 1269 + done: 1613 1270 release_sock(sk); 1614 1271 return err; 1615 1272 } ··· 1714 1257 opts.imtu = l2cap_pi(sk)->imtu; 1715 1258 opts.omtu = l2cap_pi(sk)->omtu; 1716 1259 opts.flush_to = l2cap_pi(sk)->flush_to; 1717 - opts.mode = L2CAP_MODE_BASIC; 1260 + opts.mode = l2cap_pi(sk)->mode; 1261 + opts.fcs = l2cap_pi(sk)->fcs; 1718 1262 1719 1263 len = min_t(unsigned int, sizeof(opts), optlen); 1720 1264 if (copy_from_user((char *) &opts, optval, len)) { ··· 1723 1265 break; 1724 1266 } 1725 1267 1726 - l2cap_pi(sk)->imtu = opts.imtu; 1727 - l2cap_pi(sk)->omtu = opts.omtu; 1268 + l2cap_pi(sk)->imtu = opts.imtu; 1269 + l2cap_pi(sk)->omtu = opts.omtu; 1270 + l2cap_pi(sk)->mode = opts.mode; 1271 + l2cap_pi(sk)->fcs = opts.fcs; 1728 1272 break; 1729 1273 1730 1274 case L2CAP_LM: ··· 1839 1379 opts.imtu = l2cap_pi(sk)->imtu; 1840 1380 opts.omtu = l2cap_pi(sk)->omtu; 1841 1381 opts.flush_to = l2cap_pi(sk)->flush_to; 1842 - opts.mode = L2CAP_MODE_BASIC; 1382 + opts.mode = l2cap_pi(sk)->mode; 1383 + opts.fcs = l2cap_pi(sk)->fcs; 1843 1384 1844 1385 len = min_t(unsigned int, len, sizeof(opts)); 1845 1386 if (copy_to_user(optval, (char *) &opts, len)) ··· 2169 1708 *ptr += L2CAP_CONF_OPT_SIZE + len; 2170 1709 } 2171 1710 1711 + static int l2cap_mode_supported(__u8 mode, __u32 feat_mask) 1712 + { 1713 + u32 local_feat_mask = l2cap_feat_mask; 1714 + if (enable_ertm) 1715 + local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; 1716 + 1717 + switch (mode) { 1718 + case L2CAP_MODE_ERTM: 1719 + return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask; 1720 + case L2CAP_MODE_STREAMING: 1721 + return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask; 1722 + default: 1723 + return 0x00; 1724 + } 1725 + } 1726 + 1727 + static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) 1728 + { 1729 + switch (mode) { 1730 + case L2CAP_MODE_STREAMING: 1731 + case L2CAP_MODE_ERTM: 1732 + if (l2cap_mode_supported(mode, remote_feat_mask)) 1733 + return mode; 1734 + /* fall through */ 1735 + default: 1736 + return L2CAP_MODE_BASIC; 1737 + } 1738 + } 1739 + 2172 1740 static int l2cap_build_conf_req(struct sock *sk, void *data) 2173 1741 { 2174 1742 struct l2cap_pinfo *pi = l2cap_pi(sk); 2175 1743 struct l2cap_conf_req *req = data; 1744 + struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM }; 2176 1745 void *ptr = req->data; 2177 1746 2178 1747 BT_DBG("sk %p", sk); 2179 1748 2180 - if (pi->imtu != L2CAP_DEFAULT_MTU) 2181 - l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); 1749 + if (pi->num_conf_req || pi->num_conf_rsp) 1750 + goto done; 1751 + 1752 + switch (pi->mode) { 1753 + case L2CAP_MODE_STREAMING: 1754 + case L2CAP_MODE_ERTM: 1755 + pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; 1756 + if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask)) 1757 + l2cap_send_disconn_req(pi->conn, sk); 1758 + break; 1759 + default: 1760 + pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask); 1761 + break; 1762 + } 1763 + 1764 + done: 1765 + switch (pi->mode) { 1766 + case L2CAP_MODE_BASIC: 1767 + if (pi->imtu != L2CAP_DEFAULT_MTU) 1768 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); 1769 + break; 1770 + 1771 + case L2CAP_MODE_ERTM: 1772 + rfc.mode = L2CAP_MODE_ERTM; 1773 + rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW; 1774 + rfc.max_transmit = L2CAP_DEFAULT_MAX_TX; 1775 + rfc.retrans_timeout = 0; 1776 + rfc.monitor_timeout = 0; 1777 + rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); 1778 + 1779 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1780 + sizeof(rfc), (unsigned long) &rfc); 1781 + 1782 + if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 1783 + break; 1784 + 1785 + if (pi->fcs == L2CAP_FCS_NONE || 1786 + pi->conf_state & L2CAP_CONF_NO_FCS_RECV) { 1787 + pi->fcs = L2CAP_FCS_NONE; 1788 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs); 1789 + } 1790 + break; 1791 + 1792 + case L2CAP_MODE_STREAMING: 1793 + rfc.mode = L2CAP_MODE_STREAMING; 1794 + rfc.txwin_size = 0; 1795 + rfc.max_transmit = 0; 1796 + rfc.retrans_timeout = 0; 1797 + rfc.monitor_timeout = 0; 1798 + rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); 1799 + 1800 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1801 + sizeof(rfc), (unsigned long) &rfc); 1802 + 1803 + if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 1804 + break; 1805 + 1806 + if (pi->fcs == L2CAP_FCS_NONE || 1807 + pi->conf_state & L2CAP_CONF_NO_FCS_RECV) { 1808 + pi->fcs = L2CAP_FCS_NONE; 1809 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs); 1810 + } 1811 + break; 1812 + } 2182 1813 2183 1814 /* FIXME: Need actual value of the flush timeout */ 2184 1815 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO) ··· 2320 1767 memcpy(&rfc, (void *) val, olen); 2321 1768 break; 2322 1769 1770 + case L2CAP_CONF_FCS: 1771 + if (val == L2CAP_FCS_NONE) 1772 + pi->conf_state |= L2CAP_CONF_NO_FCS_RECV; 1773 + 1774 + break; 1775 + 2323 1776 default: 2324 1777 if (hint) 2325 1778 break; ··· 2336 1777 } 2337 1778 } 2338 1779 1780 + if (pi->num_conf_rsp || pi->num_conf_req) 1781 + goto done; 1782 + 1783 + switch (pi->mode) { 1784 + case L2CAP_MODE_STREAMING: 1785 + case L2CAP_MODE_ERTM: 1786 + pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; 1787 + if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask)) 1788 + return -ECONNREFUSED; 1789 + break; 1790 + default: 1791 + pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask); 1792 + break; 1793 + } 1794 + 1795 + done: 1796 + if (pi->mode != rfc.mode) { 1797 + result = L2CAP_CONF_UNACCEPT; 1798 + rfc.mode = pi->mode; 1799 + 1800 + if (pi->num_conf_rsp == 1) 1801 + return -ECONNREFUSED; 1802 + 1803 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1804 + sizeof(rfc), (unsigned long) &rfc); 1805 + } 1806 + 1807 + 2339 1808 if (result == L2CAP_CONF_SUCCESS) { 2340 1809 /* Configure output options and let the other side know 2341 1810 * which ones we don't like. */ 2342 1811 2343 - if (rfc.mode == L2CAP_MODE_BASIC) { 2344 - if (mtu < pi->omtu) 2345 - result = L2CAP_CONF_UNACCEPT; 2346 - else { 2347 - pi->omtu = mtu; 2348 - pi->conf_state |= L2CAP_CONF_OUTPUT_DONE; 2349 - } 1812 + if (mtu < L2CAP_DEFAULT_MIN_MTU) 1813 + result = L2CAP_CONF_UNACCEPT; 1814 + else { 1815 + pi->omtu = mtu; 1816 + pi->conf_state |= L2CAP_CONF_MTU_DONE; 1817 + } 1818 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); 2350 1819 2351 - l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); 2352 - } else { 1820 + switch (rfc.mode) { 1821 + case L2CAP_MODE_BASIC: 1822 + pi->fcs = L2CAP_FCS_NONE; 1823 + pi->conf_state |= L2CAP_CONF_MODE_DONE; 1824 + break; 1825 + 1826 + case L2CAP_MODE_ERTM: 1827 + pi->remote_tx_win = rfc.txwin_size; 1828 + pi->remote_max_tx = rfc.max_transmit; 1829 + pi->max_pdu_size = rfc.max_pdu_size; 1830 + 1831 + rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; 1832 + rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; 1833 + 1834 + pi->conf_state |= L2CAP_CONF_MODE_DONE; 1835 + break; 1836 + 1837 + case L2CAP_MODE_STREAMING: 1838 + pi->remote_tx_win = rfc.txwin_size; 1839 + pi->max_pdu_size = rfc.max_pdu_size; 1840 + 1841 + pi->conf_state |= L2CAP_CONF_MODE_DONE; 1842 + break; 1843 + 1844 + default: 2353 1845 result = L2CAP_CONF_UNACCEPT; 2354 1846 2355 1847 memset(&rfc, 0, sizeof(rfc)); 2356 - rfc.mode = L2CAP_MODE_BASIC; 2357 - 2358 - l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2359 - sizeof(rfc), (unsigned long) &rfc); 1848 + rfc.mode = pi->mode; 2360 1849 } 2361 - } 2362 1850 1851 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1852 + sizeof(rfc), (unsigned long) &rfc); 1853 + 1854 + if (result == L2CAP_CONF_SUCCESS) 1855 + pi->conf_state |= L2CAP_CONF_OUTPUT_DONE; 1856 + } 2363 1857 rsp->scid = cpu_to_le16(pi->dcid); 2364 1858 rsp->result = cpu_to_le16(result); 2365 1859 rsp->flags = cpu_to_le16(0x0000); 1860 + 1861 + return ptr - data; 1862 + } 1863 + 1864 + static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result) 1865 + { 1866 + struct l2cap_pinfo *pi = l2cap_pi(sk); 1867 + struct l2cap_conf_req *req = data; 1868 + void *ptr = req->data; 1869 + int type, olen; 1870 + unsigned long val; 1871 + struct l2cap_conf_rfc rfc; 1872 + 1873 + BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data); 1874 + 1875 + while (len >= L2CAP_CONF_OPT_SIZE) { 1876 + len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); 1877 + 1878 + switch (type) { 1879 + case L2CAP_CONF_MTU: 1880 + if (val < L2CAP_DEFAULT_MIN_MTU) { 1881 + *result = L2CAP_CONF_UNACCEPT; 1882 + pi->omtu = L2CAP_DEFAULT_MIN_MTU; 1883 + } else 1884 + pi->omtu = val; 1885 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); 1886 + break; 1887 + 1888 + case L2CAP_CONF_FLUSH_TO: 1889 + pi->flush_to = val; 1890 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 1891 + 2, pi->flush_to); 1892 + break; 1893 + 1894 + case L2CAP_CONF_RFC: 1895 + if (olen == sizeof(rfc)) 1896 + memcpy(&rfc, (void *)val, olen); 1897 + 1898 + if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) && 1899 + rfc.mode != pi->mode) 1900 + return -ECONNREFUSED; 1901 + 1902 + pi->mode = rfc.mode; 1903 + pi->fcs = 0; 1904 + 1905 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1906 + sizeof(rfc), (unsigned long) &rfc); 1907 + break; 1908 + } 1909 + } 1910 + 1911 + if (*result == L2CAP_CONF_SUCCESS) { 1912 + switch (rfc.mode) { 1913 + case L2CAP_MODE_ERTM: 1914 + pi->remote_tx_win = rfc.txwin_size; 1915 + pi->retrans_timeout = rfc.retrans_timeout; 1916 + pi->monitor_timeout = rfc.monitor_timeout; 1917 + pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size); 1918 + break; 1919 + case L2CAP_MODE_STREAMING: 1920 + pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size); 1921 + break; 1922 + } 1923 + } 1924 + 1925 + req->dcid = cpu_to_le16(pi->dcid); 1926 + req->flags = cpu_to_le16(0x0000); 2366 1927 2367 1928 return ptr - data; 2368 1929 } ··· 2673 1994 2674 1995 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2675 1996 l2cap_build_conf_req(sk, req), req); 1997 + l2cap_pi(sk)->num_conf_req++; 2676 1998 break; 2677 1999 2678 2000 case L2CAP_CR_PEND: ··· 2732 2052 2733 2053 /* Complete config. */ 2734 2054 len = l2cap_parse_conf_req(sk, rsp); 2735 - if (len < 0) 2055 + if (len < 0) { 2056 + l2cap_send_disconn_req(conn, sk); 2736 2057 goto unlock; 2058 + } 2737 2059 2738 2060 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); 2061 + l2cap_pi(sk)->num_conf_rsp++; 2739 2062 2740 2063 /* Reset config buffer. */ 2741 2064 l2cap_pi(sk)->conf_len = 0; ··· 2747 2064 goto unlock; 2748 2065 2749 2066 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { 2067 + if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) 2068 + || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE) 2069 + l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16; 2070 + 2750 2071 sk->sk_state = BT_CONNECTED; 2072 + l2cap_pi(sk)->next_tx_seq = 0; 2073 + l2cap_pi(sk)->expected_ack_seq = 0; 2074 + l2cap_pi(sk)->unacked_frames = 0; 2075 + 2076 + setup_timer(&l2cap_pi(sk)->retrans_timer, 2077 + l2cap_retrans_timeout, (unsigned long) sk); 2078 + setup_timer(&l2cap_pi(sk)->monitor_timer, 2079 + l2cap_monitor_timeout, (unsigned long) sk); 2080 + 2081 + __skb_queue_head_init(TX_QUEUE(sk)); 2082 + __skb_queue_head_init(SREJ_QUEUE(sk)); 2751 2083 l2cap_chan_ready(sk); 2752 2084 goto unlock; 2753 2085 } ··· 2771 2073 u8 buf[64]; 2772 2074 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2773 2075 l2cap_build_conf_req(sk, buf), buf); 2076 + l2cap_pi(sk)->num_conf_req++; 2774 2077 } 2775 2078 2776 2079 unlock: ··· 2801 2102 break; 2802 2103 2803 2104 case L2CAP_CONF_UNACCEPT: 2804 - if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) { 2805 - char req[128]; 2806 - /* It does not make sense to adjust L2CAP parameters 2807 - * that are currently defined in the spec. We simply 2808 - * resend config request that we sent earlier. It is 2809 - * stupid, but it helps qualification testing which 2810 - * expects at least some response from us. */ 2811 - l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2812 - l2cap_build_conf_req(sk, req), req); 2813 - goto done; 2105 + if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { 2106 + int len = cmd->len - sizeof(*rsp); 2107 + char req[64]; 2108 + 2109 + /* throw out any old stored conf requests */ 2110 + result = L2CAP_CONF_SUCCESS; 2111 + len = l2cap_parse_conf_rsp(sk, rsp->data, 2112 + len, req, &result); 2113 + if (len < 0) { 2114 + l2cap_send_disconn_req(conn, sk); 2115 + goto done; 2116 + } 2117 + 2118 + l2cap_send_cmd(conn, l2cap_get_ident(conn), 2119 + L2CAP_CONF_REQ, len, req); 2120 + l2cap_pi(sk)->num_conf_req++; 2121 + if (result != L2CAP_CONF_SUCCESS) 2122 + goto done; 2123 + break; 2814 2124 } 2815 2125 2816 2126 default: 2817 2127 sk->sk_state = BT_DISCONN; 2818 2128 sk->sk_err = ECONNRESET; 2819 2129 l2cap_sock_set_timer(sk, HZ * 5); 2820 - { 2821 - struct l2cap_disconn_req req; 2822 - req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid); 2823 - req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 2824 - l2cap_send_cmd(conn, l2cap_get_ident(conn), 2825 - L2CAP_DISCONN_REQ, sizeof(req), &req); 2826 - } 2130 + l2cap_send_disconn_req(conn, sk); 2827 2131 goto done; 2828 2132 } 2829 2133 ··· 2836 2134 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; 2837 2135 2838 2136 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { 2137 + if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) 2138 + || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE) 2139 + l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16; 2140 + 2839 2141 sk->sk_state = BT_CONNECTED; 2142 + l2cap_pi(sk)->expected_tx_seq = 0; 2143 + l2cap_pi(sk)->buffer_seq = 0; 2144 + l2cap_pi(sk)->num_to_ack = 0; 2145 + __skb_queue_head_init(TX_QUEUE(sk)); 2146 + __skb_queue_head_init(SREJ_QUEUE(sk)); 2840 2147 l2cap_chan_ready(sk); 2841 2148 } 2842 2149 ··· 2876 2165 2877 2166 sk->sk_shutdown = SHUTDOWN_MASK; 2878 2167 2168 + skb_queue_purge(TX_QUEUE(sk)); 2169 + skb_queue_purge(SREJ_QUEUE(sk)); 2170 + del_timer(&l2cap_pi(sk)->retrans_timer); 2171 + del_timer(&l2cap_pi(sk)->monitor_timer); 2172 + 2879 2173 l2cap_chan_del(sk, ECONNRESET); 2880 2174 bh_unlock_sock(sk); 2881 2175 ··· 2903 2187 if (!sk) 2904 2188 return 0; 2905 2189 2190 + skb_queue_purge(TX_QUEUE(sk)); 2191 + skb_queue_purge(SREJ_QUEUE(sk)); 2192 + del_timer(&l2cap_pi(sk)->retrans_timer); 2193 + del_timer(&l2cap_pi(sk)->monitor_timer); 2194 + 2906 2195 l2cap_chan_del(sk, 0); 2907 2196 bh_unlock_sock(sk); 2908 2197 ··· 2926 2205 2927 2206 if (type == L2CAP_IT_FEAT_MASK) { 2928 2207 u8 buf[8]; 2208 + u32 feat_mask = l2cap_feat_mask; 2929 2209 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 2930 2210 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 2931 2211 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 2932 - put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data); 2212 + if (enable_ertm) 2213 + feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING 2214 + | L2CAP_FEAT_FCS; 2215 + put_unaligned_le32(feat_mask, rsp->data); 2933 2216 l2cap_send_cmd(conn, cmd->ident, 2934 2217 L2CAP_INFO_RSP, sizeof(buf), buf); 2935 2218 } else if (type == L2CAP_IT_FIXED_CHAN) { ··· 3084 2359 kfree_skb(skb); 3085 2360 } 3086 2361 2362 + static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb) 2363 + { 2364 + u16 our_fcs, rcv_fcs; 2365 + int hdr_size = L2CAP_HDR_SIZE + 2; 2366 + 2367 + if (pi->fcs == L2CAP_FCS_CRC16) { 2368 + skb_trim(skb, skb->len - 2); 2369 + rcv_fcs = get_unaligned_le16(skb->data + skb->len); 2370 + our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); 2371 + 2372 + if (our_fcs != rcv_fcs) 2373 + return -EINVAL; 2374 + } 2375 + return 0; 2376 + } 2377 + 2378 + static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar) 2379 + { 2380 + struct sk_buff *next_skb; 2381 + 2382 + bt_cb(skb)->tx_seq = tx_seq; 2383 + bt_cb(skb)->sar = sar; 2384 + 2385 + next_skb = skb_peek(SREJ_QUEUE(sk)); 2386 + if (!next_skb) { 2387 + __skb_queue_tail(SREJ_QUEUE(sk), skb); 2388 + return; 2389 + } 2390 + 2391 + do { 2392 + if (bt_cb(next_skb)->tx_seq > tx_seq) { 2393 + __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb); 2394 + return; 2395 + } 2396 + 2397 + if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb)) 2398 + break; 2399 + 2400 + } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb))); 2401 + 2402 + __skb_queue_tail(SREJ_QUEUE(sk), skb); 2403 + } 2404 + 2405 + static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control) 2406 + { 2407 + struct l2cap_pinfo *pi = l2cap_pi(sk); 2408 + struct sk_buff *_skb; 2409 + int err = -EINVAL; 2410 + 2411 + switch (control & L2CAP_CTRL_SAR) { 2412 + case L2CAP_SDU_UNSEGMENTED: 2413 + if (pi->conn_state & L2CAP_CONN_SAR_SDU) { 2414 + kfree_skb(pi->sdu); 2415 + break; 2416 + } 2417 + 2418 + err = sock_queue_rcv_skb(sk, skb); 2419 + if (!err) 2420 + return 0; 2421 + 2422 + break; 2423 + 2424 + case L2CAP_SDU_START: 2425 + if (pi->conn_state & L2CAP_CONN_SAR_SDU) { 2426 + kfree_skb(pi->sdu); 2427 + break; 2428 + } 2429 + 2430 + pi->sdu_len = get_unaligned_le16(skb->data); 2431 + skb_pull(skb, 2); 2432 + 2433 + pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC); 2434 + if (!pi->sdu) { 2435 + err = -ENOMEM; 2436 + break; 2437 + } 2438 + 2439 + memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 2440 + 2441 + pi->conn_state |= L2CAP_CONN_SAR_SDU; 2442 + pi->partial_sdu_len = skb->len; 2443 + err = 0; 2444 + break; 2445 + 2446 + case L2CAP_SDU_CONTINUE: 2447 + if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) 2448 + break; 2449 + 2450 + memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 2451 + 2452 + pi->partial_sdu_len += skb->len; 2453 + if (pi->partial_sdu_len > pi->sdu_len) 2454 + kfree_skb(pi->sdu); 2455 + else 2456 + err = 0; 2457 + 2458 + break; 2459 + 2460 + case L2CAP_SDU_END: 2461 + if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) 2462 + break; 2463 + 2464 + memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 2465 + 2466 + pi->conn_state &= ~L2CAP_CONN_SAR_SDU; 2467 + pi->partial_sdu_len += skb->len; 2468 + 2469 + if (pi->partial_sdu_len == pi->sdu_len) { 2470 + _skb = skb_clone(pi->sdu, GFP_ATOMIC); 2471 + err = sock_queue_rcv_skb(sk, _skb); 2472 + if (err < 0) 2473 + kfree_skb(_skb); 2474 + } 2475 + kfree_skb(pi->sdu); 2476 + err = 0; 2477 + 2478 + break; 2479 + } 2480 + 2481 + kfree_skb(skb); 2482 + return err; 2483 + } 2484 + 2485 + static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq) 2486 + { 2487 + struct sk_buff *skb; 2488 + u16 control = 0; 2489 + 2490 + while((skb = skb_peek(SREJ_QUEUE(sk)))) { 2491 + if (bt_cb(skb)->tx_seq != tx_seq) 2492 + break; 2493 + 2494 + skb = skb_dequeue(SREJ_QUEUE(sk)); 2495 + control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; 2496 + l2cap_sar_reassembly_sdu(sk, skb, control); 2497 + l2cap_pi(sk)->buffer_seq_srej = 2498 + (l2cap_pi(sk)->buffer_seq_srej + 1) % 64; 2499 + tx_seq++; 2500 + } 2501 + } 2502 + 2503 + static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq) 2504 + { 2505 + struct l2cap_pinfo *pi = l2cap_pi(sk); 2506 + struct srej_list *l, *tmp; 2507 + u16 control; 2508 + 2509 + list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) { 2510 + if (l->tx_seq == tx_seq) { 2511 + list_del(&l->list); 2512 + kfree(l); 2513 + return; 2514 + } 2515 + control = L2CAP_SUPER_SELECT_REJECT; 2516 + control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; 2517 + l2cap_send_sframe(pi, control); 2518 + list_del(&l->list); 2519 + list_add_tail(&l->list, SREJ_LIST(sk)); 2520 + } 2521 + } 2522 + 2523 + static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq) 2524 + { 2525 + struct l2cap_pinfo *pi = l2cap_pi(sk); 2526 + struct srej_list *new; 2527 + u16 control; 2528 + 2529 + while (tx_seq != pi->expected_tx_seq) { 2530 + control = L2CAP_SUPER_SELECT_REJECT; 2531 + control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; 2532 + if (pi->conn_state & L2CAP_CONN_SEND_PBIT) { 2533 + control |= L2CAP_CTRL_POLL; 2534 + pi->conn_state &= ~L2CAP_CONN_SEND_PBIT; 2535 + } 2536 + l2cap_send_sframe(pi, control); 2537 + 2538 + new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); 2539 + new->tx_seq = pi->expected_tx_seq++; 2540 + list_add_tail(&new->list, SREJ_LIST(sk)); 2541 + } 2542 + pi->expected_tx_seq++; 2543 + } 2544 + 2545 + static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) 2546 + { 2547 + struct l2cap_pinfo *pi = l2cap_pi(sk); 2548 + u8 tx_seq = __get_txseq(rx_control); 2549 + u16 tx_control = 0; 2550 + u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT; 2551 + int err = 0; 2552 + 2553 + BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len); 2554 + 2555 + if (tx_seq == pi->expected_tx_seq) 2556 + goto expected; 2557 + 2558 + if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { 2559 + struct srej_list *first; 2560 + 2561 + first = list_first_entry(SREJ_LIST(sk), 2562 + struct srej_list, list); 2563 + if (tx_seq == first->tx_seq) { 2564 + l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); 2565 + l2cap_check_srej_gap(sk, tx_seq); 2566 + 2567 + list_del(&first->list); 2568 + kfree(first); 2569 + 2570 + if (list_empty(SREJ_LIST(sk))) { 2571 + pi->buffer_seq = pi->buffer_seq_srej; 2572 + pi->conn_state &= ~L2CAP_CONN_SREJ_SENT; 2573 + } 2574 + } else { 2575 + struct srej_list *l; 2576 + l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); 2577 + 2578 + list_for_each_entry(l, SREJ_LIST(sk), list) { 2579 + if (l->tx_seq == tx_seq) { 2580 + l2cap_resend_srejframe(sk, tx_seq); 2581 + return 0; 2582 + } 2583 + } 2584 + l2cap_send_srejframe(sk, tx_seq); 2585 + } 2586 + } else { 2587 + pi->conn_state |= L2CAP_CONN_SREJ_SENT; 2588 + 2589 + INIT_LIST_HEAD(SREJ_LIST(sk)); 2590 + pi->buffer_seq_srej = pi->buffer_seq; 2591 + 2592 + __skb_queue_head_init(SREJ_QUEUE(sk)); 2593 + l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); 2594 + 2595 + pi->conn_state |= L2CAP_CONN_SEND_PBIT; 2596 + 2597 + l2cap_send_srejframe(sk, tx_seq); 2598 + } 2599 + return 0; 2600 + 2601 + expected: 2602 + pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; 2603 + 2604 + if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { 2605 + l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); 2606 + return 0; 2607 + } 2608 + 2609 + pi->buffer_seq = (pi->buffer_seq + 1) % 64; 2610 + 2611 + err = l2cap_sar_reassembly_sdu(sk, skb, rx_control); 2612 + if (err < 0) 2613 + return err; 2614 + 2615 + pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK; 2616 + if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) { 2617 + tx_control |= L2CAP_SUPER_RCV_READY; 2618 + tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 2619 + l2cap_send_sframe(pi, tx_control); 2620 + } 2621 + return 0; 2622 + } 2623 + 2624 + static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) 2625 + { 2626 + struct l2cap_pinfo *pi = l2cap_pi(sk); 2627 + u8 tx_seq = __get_reqseq(rx_control); 2628 + 2629 + BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len); 2630 + 2631 + switch (rx_control & L2CAP_CTRL_SUPERVISE) { 2632 + case L2CAP_SUPER_RCV_READY: 2633 + if (rx_control & L2CAP_CTRL_POLL) { 2634 + u16 control = L2CAP_CTRL_FINAL; 2635 + control |= L2CAP_SUPER_RCV_READY | 2636 + (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT); 2637 + l2cap_send_sframe(l2cap_pi(sk), control); 2638 + pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 2639 + 2640 + } else if (rx_control & L2CAP_CTRL_FINAL) { 2641 + pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 2642 + pi->expected_ack_seq = tx_seq; 2643 + l2cap_drop_acked_frames(sk); 2644 + 2645 + if (!(pi->conn_state & L2CAP_CONN_WAIT_F)) 2646 + break; 2647 + 2648 + pi->conn_state &= ~L2CAP_CONN_WAIT_F; 2649 + del_timer(&pi->monitor_timer); 2650 + 2651 + if (pi->unacked_frames > 0) 2652 + __mod_retrans_timer(); 2653 + } else { 2654 + pi->expected_ack_seq = tx_seq; 2655 + l2cap_drop_acked_frames(sk); 2656 + 2657 + if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) 2658 + && (pi->unacked_frames > 0)) 2659 + __mod_retrans_timer(); 2660 + 2661 + l2cap_ertm_send(sk); 2662 + pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 2663 + } 2664 + break; 2665 + 2666 + case L2CAP_SUPER_REJECT: 2667 + pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 2668 + 2669 + pi->expected_ack_seq = __get_reqseq(rx_control); 2670 + l2cap_drop_acked_frames(sk); 2671 + 2672 + sk->sk_send_head = TX_QUEUE(sk)->next; 2673 + pi->next_tx_seq = pi->expected_ack_seq; 2674 + 2675 + l2cap_ertm_send(sk); 2676 + 2677 + break; 2678 + 2679 + case L2CAP_SUPER_SELECT_REJECT: 2680 + pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 2681 + 2682 + if (rx_control & L2CAP_CTRL_POLL) { 2683 + l2cap_retransmit_frame(sk, tx_seq); 2684 + pi->expected_ack_seq = tx_seq; 2685 + l2cap_drop_acked_frames(sk); 2686 + l2cap_ertm_send(sk); 2687 + if (pi->conn_state & L2CAP_CONN_WAIT_F) { 2688 + pi->srej_save_reqseq = tx_seq; 2689 + pi->conn_state |= L2CAP_CONN_SREJ_ACT; 2690 + } 2691 + } else if (rx_control & L2CAP_CTRL_FINAL) { 2692 + if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) && 2693 + pi->srej_save_reqseq == tx_seq) 2694 + pi->srej_save_reqseq &= ~L2CAP_CONN_SREJ_ACT; 2695 + else 2696 + l2cap_retransmit_frame(sk, tx_seq); 2697 + } 2698 + else { 2699 + l2cap_retransmit_frame(sk, tx_seq); 2700 + if (pi->conn_state & L2CAP_CONN_WAIT_F) { 2701 + pi->srej_save_reqseq = tx_seq; 2702 + pi->conn_state |= L2CAP_CONN_SREJ_ACT; 2703 + } 2704 + } 2705 + break; 2706 + 2707 + case L2CAP_SUPER_RCV_NOT_READY: 2708 + pi->conn_state |= L2CAP_CONN_REMOTE_BUSY; 2709 + pi->expected_ack_seq = tx_seq; 2710 + l2cap_drop_acked_frames(sk); 2711 + 2712 + del_timer(&l2cap_pi(sk)->retrans_timer); 2713 + if (rx_control & L2CAP_CTRL_POLL) { 2714 + u16 control = L2CAP_CTRL_FINAL; 2715 + l2cap_send_rr_or_rnr(l2cap_pi(sk), control); 2716 + } 2717 + break; 2718 + } 2719 + 2720 + return 0; 2721 + } 2722 + 3087 2723 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) 3088 2724 { 3089 2725 struct sock *sk; 2726 + struct l2cap_pinfo *pi; 2727 + u16 control, len; 2728 + u8 tx_seq; 2729 + int err; 3090 2730 3091 2731 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); 3092 2732 if (!sk) { ··· 3459 2369 goto drop; 3460 2370 } 3461 2371 2372 + pi = l2cap_pi(sk); 2373 + 3462 2374 BT_DBG("sk %p, len %d", sk, skb->len); 3463 2375 3464 2376 if (sk->sk_state != BT_CONNECTED) 3465 2377 goto drop; 3466 2378 3467 - if (l2cap_pi(sk)->imtu < skb->len) 3468 - goto drop; 2379 + switch (pi->mode) { 2380 + case L2CAP_MODE_BASIC: 2381 + /* If socket recv buffers overflows we drop data here 2382 + * which is *bad* because L2CAP has to be reliable. 2383 + * But we don't have any other choice. L2CAP doesn't 2384 + * provide flow control mechanism. */ 3469 2385 3470 - /* If socket recv buffers overflows we drop data here 3471 - * which is *bad* because L2CAP has to be reliable. 3472 - * But we don't have any other choice. L2CAP doesn't 3473 - * provide flow control mechanism. */ 2386 + if (pi->imtu < skb->len) 2387 + goto drop; 3474 2388 3475 - if (!sock_queue_rcv_skb(sk, skb)) 2389 + if (!sock_queue_rcv_skb(sk, skb)) 2390 + goto done; 2391 + break; 2392 + 2393 + case L2CAP_MODE_ERTM: 2394 + control = get_unaligned_le16(skb->data); 2395 + skb_pull(skb, 2); 2396 + len = skb->len; 2397 + 2398 + if (__is_sar_start(control)) 2399 + len -= 2; 2400 + 2401 + if (pi->fcs == L2CAP_FCS_CRC16) 2402 + len -= 2; 2403 + 2404 + /* 2405 + * We can just drop the corrupted I-frame here. 2406 + * Receiver will miss it and start proper recovery 2407 + * procedures and ask retransmission. 2408 + */ 2409 + if (len > L2CAP_DEFAULT_MAX_PDU_SIZE) 2410 + goto drop; 2411 + 2412 + if (l2cap_check_fcs(pi, skb)) 2413 + goto drop; 2414 + 2415 + if (__is_iframe(control)) 2416 + err = l2cap_data_channel_iframe(sk, control, skb); 2417 + else 2418 + err = l2cap_data_channel_sframe(sk, control, skb); 2419 + 2420 + if (!err) 2421 + goto done; 2422 + break; 2423 + 2424 + case L2CAP_MODE_STREAMING: 2425 + control = get_unaligned_le16(skb->data); 2426 + skb_pull(skb, 2); 2427 + len = skb->len; 2428 + 2429 + if (__is_sar_start(control)) 2430 + len -= 2; 2431 + 2432 + if (pi->fcs == L2CAP_FCS_CRC16) 2433 + len -= 2; 2434 + 2435 + if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control)) 2436 + goto drop; 2437 + 2438 + if (l2cap_check_fcs(pi, skb)) 2439 + goto drop; 2440 + 2441 + tx_seq = __get_txseq(control); 2442 + 2443 + if (pi->expected_tx_seq == tx_seq) 2444 + pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; 2445 + else 2446 + pi->expected_tx_seq = tx_seq + 1; 2447 + 2448 + err = l2cap_sar_reassembly_sdu(sk, skb, control); 2449 + 3476 2450 goto done; 2451 + 2452 + default: 2453 + BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode); 2454 + break; 2455 + } 3477 2456 3478 2457 drop: 3479 2458 kfree_skb(skb); ··· 3592 2433 cid = __le16_to_cpu(lh->cid); 3593 2434 len = __le16_to_cpu(lh->len); 3594 2435 2436 + if (len != skb->len) { 2437 + kfree_skb(skb); 2438 + return; 2439 + } 2440 + 3595 2441 BT_DBG("len %d, cid 0x%4.4x", len, cid); 3596 2442 3597 2443 switch (cid) { ··· 3605 2441 break; 3606 2442 3607 2443 case L2CAP_CID_CONN_LESS: 3608 - psm = get_unaligned((__le16 *) skb->data); 2444 + psm = get_unaligned_le16(skb->data); 3609 2445 skb_pull(skb, 2); 3610 2446 l2cap_conless_channel(conn, psm, skb); 3611 2447 break; ··· 3991 2827 3992 2828 module_init(l2cap_init); 3993 2829 module_exit(l2cap_exit); 2830 + 2831 + module_param(enable_ertm, bool, 0644); 2832 + MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode"); 3994 2833 3995 2834 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 3996 2835 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
+56 -13
net/bluetooth/rfcomm/core.c
··· 244 244 auth_type); 245 245 } 246 246 247 + static void rfcomm_session_timeout(unsigned long arg) 248 + { 249 + struct rfcomm_session *s = (void *) arg; 250 + 251 + BT_DBG("session %p state %ld", s, s->state); 252 + 253 + set_bit(RFCOMM_TIMED_OUT, &s->flags); 254 + rfcomm_session_put(s); 255 + rfcomm_schedule(RFCOMM_SCHED_TIMEO); 256 + } 257 + 258 + static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout) 259 + { 260 + BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout); 261 + 262 + if (!mod_timer(&s->timer, jiffies + timeout)) 263 + rfcomm_session_hold(s); 264 + } 265 + 266 + static void rfcomm_session_clear_timer(struct rfcomm_session *s) 267 + { 268 + BT_DBG("session %p state %ld", s, s->state); 269 + 270 + if (timer_pending(&s->timer) && del_timer(&s->timer)) 271 + rfcomm_session_put(s); 272 + } 273 + 247 274 /* ---- RFCOMM DLCs ---- */ 248 275 static void rfcomm_dlc_timeout(unsigned long arg) 249 276 { ··· 347 320 348 321 rfcomm_session_hold(s); 349 322 323 + rfcomm_session_clear_timer(s); 350 324 rfcomm_dlc_hold(d); 351 325 list_add(&d->list, &s->dlcs); 352 326 d->session = s; ··· 362 334 list_del(&d->list); 363 335 d->session = NULL; 364 336 rfcomm_dlc_put(d); 337 + 338 + if (list_empty(&s->dlcs)) 339 + rfcomm_session_set_timer(s, RFCOMM_IDLE_TIMEOUT); 365 340 366 341 rfcomm_session_put(s); 367 342 } ··· 598 567 599 568 BT_DBG("session %p sock %p", s, sock); 600 569 570 + setup_timer(&s->timer, rfcomm_session_timeout, (unsigned long) s); 571 + 601 572 INIT_LIST_HEAD(&s->dlcs); 602 573 s->state = state; 603 574 s->sock = sock; ··· 631 598 if (state == BT_CONNECTED) 632 599 rfcomm_send_disc(s, 0); 633 600 601 + rfcomm_session_clear_timer(s); 634 602 sock_release(s->sock); 635 603 kfree(s); 636 604 ··· 673 639 __rfcomm_dlc_close(d, err); 674 640 } 675 641 642 + rfcomm_session_clear_timer(s); 676 643 rfcomm_session_put(s); 677 644 } 678 645 ··· 1914 1879 struct rfcomm_session *s; 1915 1880 s = list_entry(p, struct rfcomm_session, list); 1916 1881 1882 + if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { 1883 + s->state = BT_DISCONN; 1884 + rfcomm_send_disc(s, 0); 1885 + continue; 1886 + } 1887 + 1917 1888 if (s->state == BT_LISTEN) { 1918 1889 rfcomm_accept_connection(s); 1919 1890 continue; ··· 2121 2080 /* ---- Initialization ---- */ 2122 2081 static int __init rfcomm_init(void) 2123 2082 { 2124 - int ret; 2083 + int err; 2125 2084 2126 2085 l2cap_load(); 2127 2086 ··· 2129 2088 2130 2089 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); 2131 2090 if (IS_ERR(rfcomm_thread)) { 2132 - ret = PTR_ERR(rfcomm_thread); 2133 - goto out_thread; 2091 + err = PTR_ERR(rfcomm_thread); 2092 + goto unregister; 2134 2093 } 2135 2094 2136 2095 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) 2137 2096 BT_ERR("Failed to create RFCOMM info file"); 2138 2097 2139 - ret = rfcomm_init_ttys(); 2140 - if (ret) 2141 - goto out_tty; 2098 + err = rfcomm_init_ttys(); 2099 + if (err < 0) 2100 + goto stop; 2142 2101 2143 - ret = rfcomm_init_sockets(); 2144 - if (ret) 2145 - goto out_sock; 2102 + err = rfcomm_init_sockets(); 2103 + if (err < 0) 2104 + goto cleanup; 2146 2105 2147 2106 BT_INFO("RFCOMM ver %s", VERSION); 2148 2107 2149 2108 return 0; 2150 2109 2151 - out_sock: 2110 + cleanup: 2152 2111 rfcomm_cleanup_ttys(); 2153 - out_tty: 2112 + 2113 + stop: 2154 2114 kthread_stop(rfcomm_thread); 2155 - out_thread: 2115 + 2116 + unregister: 2156 2117 hci_unregister_cb(&rfcomm_cb); 2157 2118 2158 - return ret; 2119 + return err; 2159 2120 } 2160 2121 2161 2122 static void __exit rfcomm_exit(void)
+34 -15
net/bluetooth/sco.c
··· 359 359 sock_put(sk); 360 360 } 361 361 362 - /* Close socket. 363 - * Must be called on unlocked socket. 364 - */ 365 - static void sco_sock_close(struct sock *sk) 362 + static void __sco_sock_close(struct sock *sk) 366 363 { 367 - struct sco_conn *conn; 368 - 369 - sco_sock_clear_timer(sk); 370 - 371 - lock_sock(sk); 372 - 373 - conn = sco_pi(sk)->conn; 374 - 375 - BT_DBG("sk %p state %d conn %p socket %p", sk, sk->sk_state, conn, sk->sk_socket); 364 + BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); 376 365 377 366 switch (sk->sk_state) { 378 367 case BT_LISTEN: ··· 379 390 sock_set_flag(sk, SOCK_ZAPPED); 380 391 break; 381 392 } 393 + } 382 394 395 + /* Must be called on unlocked socket. */ 396 + static void sco_sock_close(struct sock *sk) 397 + { 398 + sco_sock_clear_timer(sk); 399 + lock_sock(sk); 400 + __sco_sock_close(sk); 383 401 release_sock(sk); 384 - 385 402 sco_sock_kill(sk); 386 403 } 387 404 ··· 743 748 return err; 744 749 } 745 750 751 + static int sco_sock_shutdown(struct socket *sock, int how) 752 + { 753 + struct sock *sk = sock->sk; 754 + int err = 0; 755 + 756 + BT_DBG("sock %p, sk %p", sock, sk); 757 + 758 + if (!sk) 759 + return 0; 760 + 761 + lock_sock(sk); 762 + if (!sk->sk_shutdown) { 763 + sk->sk_shutdown = SHUTDOWN_MASK; 764 + sco_sock_clear_timer(sk); 765 + __sco_sock_close(sk); 766 + 767 + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 768 + err = bt_sock_wait_state(sk, BT_CLOSED, 769 + sk->sk_lingertime); 770 + } 771 + release_sock(sk); 772 + return err; 773 + } 774 + 746 775 static int sco_sock_release(struct socket *sock) 747 776 { 748 777 struct sock *sk = sock->sk; ··· 988 969 .ioctl = bt_sock_ioctl, 989 970 .mmap = sock_no_mmap, 990 971 .socketpair = sock_no_socketpair, 991 - .shutdown = sock_no_shutdown, 972 + .shutdown = sco_sock_shutdown, 992 973 .setsockopt = sco_sock_setsockopt, 993 974 .getsockopt = sco_sock_getsockopt 994 975 };