Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25*/
26
27/* Bluetooth L2CAP core. */
28
29#include <linux/module.h>
30
31#include <linux/types.h>
32#include <linux/capability.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
43#include <linux/list.h>
44#include <linux/device.h>
45#include <linux/debugfs.h>
46#include <linux/seq_file.h>
47#include <linux/uaccess.h>
48#include <linux/crc16.h>
49#include <net/sock.h>
50
51#include <asm/system.h>
52#include <asm/unaligned.h>
53
54#include <net/bluetooth/bluetooth.h>
55#include <net/bluetooth/hci_core.h>
56#include <net/bluetooth/l2cap.h>
57#include <net/bluetooth/smp.h>
58
59int disable_ertm;
60
61static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62static u8 l2cap_fixed_chan[8] = { 0x02, };
63
64static LIST_HEAD(chan_list);
65static DEFINE_RWLOCK(chan_list_lock);
66
67static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
70 void *data);
71static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
74
75static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76
77/* ---- L2CAP channels ---- */
78
79static inline void chan_hold(struct l2cap_chan *c)
80{
81 atomic_inc(&c->refcnt);
82}
83
84static inline void chan_put(struct l2cap_chan *c)
85{
86 if (atomic_dec_and_test(&c->refcnt))
87 kfree(c);
88}
89
90static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
91{
92 struct l2cap_chan *c;
93
94 list_for_each_entry(c, &conn->chan_l, list) {
95 if (c->dcid == cid)
96 return c;
97 }
98 return NULL;
99
100}
101
102static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103{
104 struct l2cap_chan *c;
105
106 list_for_each_entry(c, &conn->chan_l, list) {
107 if (c->scid == cid)
108 return c;
109 }
110 return NULL;
111}
112
113/* Find channel with given SCID.
114 * Returns locked socket */
115static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
116{
117 struct l2cap_chan *c;
118
119 read_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
121 if (c)
122 bh_lock_sock(c->sk);
123 read_unlock(&conn->chan_lock);
124 return c;
125}
126
127static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
128{
129 struct l2cap_chan *c;
130
131 list_for_each_entry(c, &conn->chan_l, list) {
132 if (c->ident == ident)
133 return c;
134 }
135 return NULL;
136}
137
138static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
139{
140 struct l2cap_chan *c;
141
142 read_lock(&conn->chan_lock);
143 c = __l2cap_get_chan_by_ident(conn, ident);
144 if (c)
145 bh_lock_sock(c->sk);
146 read_unlock(&conn->chan_lock);
147 return c;
148}
149
150static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
151{
152 struct l2cap_chan *c;
153
154 list_for_each_entry(c, &chan_list, global_l) {
155 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
156 goto found;
157 }
158
159 c = NULL;
160found:
161 return c;
162}
163
164int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
165{
166 int err;
167
168 write_lock_bh(&chan_list_lock);
169
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
171 err = -EADDRINUSE;
172 goto done;
173 }
174
175 if (psm) {
176 chan->psm = psm;
177 chan->sport = psm;
178 err = 0;
179 } else {
180 u16 p;
181
182 err = -EINVAL;
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
187 err = 0;
188 break;
189 }
190 }
191
192done:
193 write_unlock_bh(&chan_list_lock);
194 return err;
195}
196
197int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
198{
199 write_lock_bh(&chan_list_lock);
200
201 chan->scid = scid;
202
203 write_unlock_bh(&chan_list_lock);
204
205 return 0;
206}
207
208static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
209{
210 u16 cid = L2CAP_CID_DYN_START;
211
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
214 return cid;
215 }
216
217 return 0;
218}
219
220static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
221{
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
223
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
225 chan_hold(chan);
226}
227
228static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
229{
230 BT_DBG("chan %p state %d", chan, chan->state);
231
232 if (timer_pending(timer) && del_timer(timer))
233 chan_put(chan);
234}
235
236static void l2cap_state_change(struct l2cap_chan *chan, int state)
237{
238 chan->state = state;
239 chan->ops->state_change(chan->data, state);
240}
241
242static void l2cap_chan_timeout(unsigned long arg)
243{
244 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 struct sock *sk = chan->sk;
246 int reason;
247
248 BT_DBG("chan %p state %d", chan, chan->state);
249
250 bh_lock_sock(sk);
251
252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
255 bh_unlock_sock(sk);
256 chan_put(chan);
257 return;
258 }
259
260 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 reason = ECONNREFUSED;
262 else if (chan->state == BT_CONNECT &&
263 chan->sec_level != BT_SECURITY_SDP)
264 reason = ECONNREFUSED;
265 else
266 reason = ETIMEDOUT;
267
268 l2cap_chan_close(chan, reason);
269
270 bh_unlock_sock(sk);
271
272 chan->ops->close(chan->data);
273 chan_put(chan);
274}
275
276struct l2cap_chan *l2cap_chan_create(struct sock *sk)
277{
278 struct l2cap_chan *chan;
279
280 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
281 if (!chan)
282 return NULL;
283
284 chan->sk = sk;
285
286 write_lock_bh(&chan_list_lock);
287 list_add(&chan->global_l, &chan_list);
288 write_unlock_bh(&chan_list_lock);
289
290 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
291
292 chan->state = BT_OPEN;
293
294 atomic_set(&chan->refcnt, 1);
295
296 return chan;
297}
298
299void l2cap_chan_destroy(struct l2cap_chan *chan)
300{
301 write_lock_bh(&chan_list_lock);
302 list_del(&chan->global_l);
303 write_unlock_bh(&chan_list_lock);
304
305 chan_put(chan);
306}
307
308static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
309{
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
311 chan->psm, chan->dcid);
312
313 conn->disc_reason = 0x13;
314
315 chan->conn = conn;
316
317 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
318 if (conn->hcon->type == LE_LINK) {
319 /* LE connection */
320 chan->omtu = L2CAP_LE_DEFAULT_MTU;
321 chan->scid = L2CAP_CID_LE_DATA;
322 chan->dcid = L2CAP_CID_LE_DATA;
323 } else {
324 /* Alloc CID for connection-oriented socket */
325 chan->scid = l2cap_alloc_cid(conn);
326 chan->omtu = L2CAP_DEFAULT_MTU;
327 }
328 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
329 /* Connectionless socket */
330 chan->scid = L2CAP_CID_CONN_LESS;
331 chan->dcid = L2CAP_CID_CONN_LESS;
332 chan->omtu = L2CAP_DEFAULT_MTU;
333 } else {
334 /* Raw socket can send/recv signalling messages only */
335 chan->scid = L2CAP_CID_SIGNALING;
336 chan->dcid = L2CAP_CID_SIGNALING;
337 chan->omtu = L2CAP_DEFAULT_MTU;
338 }
339
340 chan_hold(chan);
341
342 list_add(&chan->list, &conn->chan_l);
343}
344
345/* Delete channel.
346 * Must be called on the locked socket. */
347static void l2cap_chan_del(struct l2cap_chan *chan, int err)
348{
349 struct sock *sk = chan->sk;
350 struct l2cap_conn *conn = chan->conn;
351 struct sock *parent = bt_sk(sk)->parent;
352
353 __clear_chan_timer(chan);
354
355 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
356
357 if (conn) {
358 /* Delete from channel list */
359 write_lock_bh(&conn->chan_lock);
360 list_del(&chan->list);
361 write_unlock_bh(&conn->chan_lock);
362 chan_put(chan);
363
364 chan->conn = NULL;
365 hci_conn_put(conn->hcon);
366 }
367
368 l2cap_state_change(chan, BT_CLOSED);
369 sock_set_flag(sk, SOCK_ZAPPED);
370
371 if (err)
372 sk->sk_err = err;
373
374 if (parent) {
375 bt_accept_unlink(sk);
376 parent->sk_data_ready(parent, 0);
377 } else
378 sk->sk_state_change(sk);
379
380 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
381 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
382 return;
383
384 skb_queue_purge(&chan->tx_q);
385
386 if (chan->mode == L2CAP_MODE_ERTM) {
387 struct srej_list *l, *tmp;
388
389 __clear_retrans_timer(chan);
390 __clear_monitor_timer(chan);
391 __clear_ack_timer(chan);
392
393 skb_queue_purge(&chan->srej_q);
394
395 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
396 list_del(&l->list);
397 kfree(l);
398 }
399 }
400}
401
402static void l2cap_chan_cleanup_listen(struct sock *parent)
403{
404 struct sock *sk;
405
406 BT_DBG("parent %p", parent);
407
408 /* Close not yet accepted channels */
409 while ((sk = bt_accept_dequeue(parent, NULL))) {
410 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 __clear_chan_timer(chan);
412 lock_sock(sk);
413 l2cap_chan_close(chan, ECONNRESET);
414 release_sock(sk);
415 chan->ops->close(chan->data);
416 }
417}
418
419void l2cap_chan_close(struct l2cap_chan *chan, int reason)
420{
421 struct l2cap_conn *conn = chan->conn;
422 struct sock *sk = chan->sk;
423
424 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
425
426 switch (chan->state) {
427 case BT_LISTEN:
428 l2cap_chan_cleanup_listen(sk);
429
430 l2cap_state_change(chan, BT_CLOSED);
431 sock_set_flag(sk, SOCK_ZAPPED);
432 break;
433
434 case BT_CONNECTED:
435 case BT_CONFIG:
436 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 conn->hcon->type == ACL_LINK) {
438 __clear_chan_timer(chan);
439 __set_chan_timer(chan, sk->sk_sndtimeo);
440 l2cap_send_disconn_req(conn, chan, reason);
441 } else
442 l2cap_chan_del(chan, reason);
443 break;
444
445 case BT_CONNECT2:
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 struct l2cap_conn_rsp rsp;
449 __u16 result;
450
451 if (bt_sk(sk)->defer_setup)
452 result = L2CAP_CR_SEC_BLOCK;
453 else
454 result = L2CAP_CR_BAD_PSM;
455 l2cap_state_change(chan, BT_DISCONN);
456
457 rsp.scid = cpu_to_le16(chan->dcid);
458 rsp.dcid = cpu_to_le16(chan->scid);
459 rsp.result = cpu_to_le16(result);
460 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
462 sizeof(rsp), &rsp);
463 }
464
465 l2cap_chan_del(chan, reason);
466 break;
467
468 case BT_CONNECT:
469 case BT_DISCONN:
470 l2cap_chan_del(chan, reason);
471 break;
472
473 default:
474 sock_set_flag(sk, SOCK_ZAPPED);
475 break;
476 }
477}
478
479static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
480{
481 if (chan->chan_type == L2CAP_CHAN_RAW) {
482 switch (chan->sec_level) {
483 case BT_SECURITY_HIGH:
484 return HCI_AT_DEDICATED_BONDING_MITM;
485 case BT_SECURITY_MEDIUM:
486 return HCI_AT_DEDICATED_BONDING;
487 default:
488 return HCI_AT_NO_BONDING;
489 }
490 } else if (chan->psm == cpu_to_le16(0x0001)) {
491 if (chan->sec_level == BT_SECURITY_LOW)
492 chan->sec_level = BT_SECURITY_SDP;
493
494 if (chan->sec_level == BT_SECURITY_HIGH)
495 return HCI_AT_NO_BONDING_MITM;
496 else
497 return HCI_AT_NO_BONDING;
498 } else {
499 switch (chan->sec_level) {
500 case BT_SECURITY_HIGH:
501 return HCI_AT_GENERAL_BONDING_MITM;
502 case BT_SECURITY_MEDIUM:
503 return HCI_AT_GENERAL_BONDING;
504 default:
505 return HCI_AT_NO_BONDING;
506 }
507 }
508}
509
510/* Service level security */
511static inline int l2cap_check_security(struct l2cap_chan *chan)
512{
513 struct l2cap_conn *conn = chan->conn;
514 __u8 auth_type;
515
516 auth_type = l2cap_get_auth_type(chan);
517
518 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
519}
520
521static u8 l2cap_get_ident(struct l2cap_conn *conn)
522{
523 u8 id;
524
525 /* Get next available identificator.
526 * 1 - 128 are used by kernel.
527 * 129 - 199 are reserved.
528 * 200 - 254 are used by utilities like l2ping, etc.
529 */
530
531 spin_lock_bh(&conn->lock);
532
533 if (++conn->tx_ident > 128)
534 conn->tx_ident = 1;
535
536 id = conn->tx_ident;
537
538 spin_unlock_bh(&conn->lock);
539
540 return id;
541}
542
543static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
544{
545 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
546 u8 flags;
547
548 BT_DBG("code 0x%2.2x", code);
549
550 if (!skb)
551 return;
552
553 if (lmp_no_flush_capable(conn->hcon->hdev))
554 flags = ACL_START_NO_FLUSH;
555 else
556 flags = ACL_START;
557
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
559
560 hci_send_acl(conn->hcon, skb, flags);
561}
562
563static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
564{
565 struct sk_buff *skb;
566 struct l2cap_hdr *lh;
567 struct l2cap_conn *conn = chan->conn;
568 int count, hlen = L2CAP_HDR_SIZE + 2;
569 u8 flags;
570
571 if (chan->state != BT_CONNECTED)
572 return;
573
574 if (chan->fcs == L2CAP_FCS_CRC16)
575 hlen += 2;
576
577 BT_DBG("chan %p, control 0x%2.2x", chan, control);
578
579 count = min_t(unsigned int, conn->mtu, hlen);
580 control |= L2CAP_CTRL_FRAME_TYPE;
581
582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
583 control |= L2CAP_CTRL_FINAL;
584
585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
586 control |= L2CAP_CTRL_POLL;
587
588 skb = bt_skb_alloc(count, GFP_ATOMIC);
589 if (!skb)
590 return;
591
592 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
593 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
594 lh->cid = cpu_to_le16(chan->dcid);
595 put_unaligned_le16(control, skb_put(skb, 2));
596
597 if (chan->fcs == L2CAP_FCS_CRC16) {
598 u16 fcs = crc16(0, (u8 *)lh, count - 2);
599 put_unaligned_le16(fcs, skb_put(skb, 2));
600 }
601
602 if (lmp_no_flush_capable(conn->hcon->hdev))
603 flags = ACL_START_NO_FLUSH;
604 else
605 flags = ACL_START;
606
607 bt_cb(skb)->force_active = chan->force_active;
608
609 hci_send_acl(chan->conn->hcon, skb, flags);
610}
611
612static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
613{
614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
615 control |= L2CAP_SUPER_RCV_NOT_READY;
616 set_bit(CONN_RNR_SENT, &chan->conn_state);
617 } else
618 control |= L2CAP_SUPER_RCV_READY;
619
620 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
621
622 l2cap_send_sframe(chan, control);
623}
624
625static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
626{
627 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
628}
629
630static void l2cap_do_start(struct l2cap_chan *chan)
631{
632 struct l2cap_conn *conn = chan->conn;
633
634 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
635 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
636 return;
637
638 if (l2cap_check_security(chan) &&
639 __l2cap_no_conn_pending(chan)) {
640 struct l2cap_conn_req req;
641 req.scid = cpu_to_le16(chan->scid);
642 req.psm = chan->psm;
643
644 chan->ident = l2cap_get_ident(conn);
645 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
646
647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
648 sizeof(req), &req);
649 }
650 } else {
651 struct l2cap_info_req req;
652 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
653
654 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
655 conn->info_ident = l2cap_get_ident(conn);
656
657 mod_timer(&conn->info_timer, jiffies +
658 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
659
660 l2cap_send_cmd(conn, conn->info_ident,
661 L2CAP_INFO_REQ, sizeof(req), &req);
662 }
663}
664
665static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
666{
667 u32 local_feat_mask = l2cap_feat_mask;
668 if (!disable_ertm)
669 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
670
671 switch (mode) {
672 case L2CAP_MODE_ERTM:
673 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
674 case L2CAP_MODE_STREAMING:
675 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
676 default:
677 return 0x00;
678 }
679}
680
681static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
682{
683 struct sock *sk;
684 struct l2cap_disconn_req req;
685
686 if (!conn)
687 return;
688
689 sk = chan->sk;
690
691 if (chan->mode == L2CAP_MODE_ERTM) {
692 __clear_retrans_timer(chan);
693 __clear_monitor_timer(chan);
694 __clear_ack_timer(chan);
695 }
696
697 req.dcid = cpu_to_le16(chan->dcid);
698 req.scid = cpu_to_le16(chan->scid);
699 l2cap_send_cmd(conn, l2cap_get_ident(conn),
700 L2CAP_DISCONN_REQ, sizeof(req), &req);
701
702 l2cap_state_change(chan, BT_DISCONN);
703 sk->sk_err = err;
704}
705
706/* ---- L2CAP connections ---- */
707static void l2cap_conn_start(struct l2cap_conn *conn)
708{
709 struct l2cap_chan *chan, *tmp;
710
711 BT_DBG("conn %p", conn);
712
713 read_lock(&conn->chan_lock);
714
715 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
716 struct sock *sk = chan->sk;
717
718 bh_lock_sock(sk);
719
720 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
721 bh_unlock_sock(sk);
722 continue;
723 }
724
725 if (chan->state == BT_CONNECT) {
726 struct l2cap_conn_req req;
727
728 if (!l2cap_check_security(chan) ||
729 !__l2cap_no_conn_pending(chan)) {
730 bh_unlock_sock(sk);
731 continue;
732 }
733
734 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
735 && test_bit(CONF_STATE2_DEVICE,
736 &chan->conf_state)) {
737 /* l2cap_chan_close() calls list_del(chan)
738 * so release the lock */
739 read_unlock(&conn->chan_lock);
740 l2cap_chan_close(chan, ECONNRESET);
741 read_lock(&conn->chan_lock);
742 bh_unlock_sock(sk);
743 continue;
744 }
745
746 req.scid = cpu_to_le16(chan->scid);
747 req.psm = chan->psm;
748
749 chan->ident = l2cap_get_ident(conn);
750 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
751
752 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
753 sizeof(req), &req);
754
755 } else if (chan->state == BT_CONNECT2) {
756 struct l2cap_conn_rsp rsp;
757 char buf[128];
758 rsp.scid = cpu_to_le16(chan->dcid);
759 rsp.dcid = cpu_to_le16(chan->scid);
760
761 if (l2cap_check_security(chan)) {
762 if (bt_sk(sk)->defer_setup) {
763 struct sock *parent = bt_sk(sk)->parent;
764 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
765 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
766 if (parent)
767 parent->sk_data_ready(parent, 0);
768
769 } else {
770 l2cap_state_change(chan, BT_CONFIG);
771 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
773 }
774 } else {
775 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
776 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
777 }
778
779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
780 sizeof(rsp), &rsp);
781
782 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
783 rsp.result != L2CAP_CR_SUCCESS) {
784 bh_unlock_sock(sk);
785 continue;
786 }
787
788 set_bit(CONF_REQ_SENT, &chan->conf_state);
789 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
790 l2cap_build_conf_req(chan, buf), buf);
791 chan->num_conf_req++;
792 }
793
794 bh_unlock_sock(sk);
795 }
796
797 read_unlock(&conn->chan_lock);
798}
799
800/* Find socket with cid and source bdaddr.
801 * Returns closest match, locked.
802 */
803static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
804{
805 struct l2cap_chan *c, *c1 = NULL;
806
807 read_lock(&chan_list_lock);
808
809 list_for_each_entry(c, &chan_list, global_l) {
810 struct sock *sk = c->sk;
811
812 if (state && c->state != state)
813 continue;
814
815 if (c->scid == cid) {
816 /* Exact match. */
817 if (!bacmp(&bt_sk(sk)->src, src)) {
818 read_unlock(&chan_list_lock);
819 return c;
820 }
821
822 /* Closest match */
823 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
824 c1 = c;
825 }
826 }
827
828 read_unlock(&chan_list_lock);
829
830 return c1;
831}
832
833static void l2cap_le_conn_ready(struct l2cap_conn *conn)
834{
835 struct sock *parent, *sk;
836 struct l2cap_chan *chan, *pchan;
837
838 BT_DBG("");
839
840 /* Check if we have socket listening on cid */
841 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
842 conn->src);
843 if (!pchan)
844 return;
845
846 parent = pchan->sk;
847
848 bh_lock_sock(parent);
849
850 /* Check for backlog size */
851 if (sk_acceptq_is_full(parent)) {
852 BT_DBG("backlog full %d", parent->sk_ack_backlog);
853 goto clean;
854 }
855
856 chan = pchan->ops->new_connection(pchan->data);
857 if (!chan)
858 goto clean;
859
860 sk = chan->sk;
861
862 write_lock_bh(&conn->chan_lock);
863
864 hci_conn_hold(conn->hcon);
865
866 bacpy(&bt_sk(sk)->src, conn->src);
867 bacpy(&bt_sk(sk)->dst, conn->dst);
868
869 bt_accept_enqueue(parent, sk);
870
871 __l2cap_chan_add(conn, chan);
872
873 __set_chan_timer(chan, sk->sk_sndtimeo);
874
875 l2cap_state_change(chan, BT_CONNECTED);
876 parent->sk_data_ready(parent, 0);
877
878 write_unlock_bh(&conn->chan_lock);
879
880clean:
881 bh_unlock_sock(parent);
882}
883
884static void l2cap_chan_ready(struct sock *sk)
885{
886 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
887 struct sock *parent = bt_sk(sk)->parent;
888
889 BT_DBG("sk %p, parent %p", sk, parent);
890
891 chan->conf_state = 0;
892 __clear_chan_timer(chan);
893
894 l2cap_state_change(chan, BT_CONNECTED);
895 sk->sk_state_change(sk);
896
897 if (parent)
898 parent->sk_data_ready(parent, 0);
899}
900
901static void l2cap_conn_ready(struct l2cap_conn *conn)
902{
903 struct l2cap_chan *chan;
904
905 BT_DBG("conn %p", conn);
906
907 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
908 l2cap_le_conn_ready(conn);
909
910 if (conn->hcon->out && conn->hcon->type == LE_LINK)
911 smp_conn_security(conn, conn->hcon->pending_sec_level);
912
913 read_lock(&conn->chan_lock);
914
915 list_for_each_entry(chan, &conn->chan_l, list) {
916 struct sock *sk = chan->sk;
917
918 bh_lock_sock(sk);
919
920 if (conn->hcon->type == LE_LINK) {
921 if (smp_conn_security(conn, chan->sec_level))
922 l2cap_chan_ready(sk);
923
924 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
925 __clear_chan_timer(chan);
926 l2cap_state_change(chan, BT_CONNECTED);
927 sk->sk_state_change(sk);
928
929 } else if (chan->state == BT_CONNECT)
930 l2cap_do_start(chan);
931
932 bh_unlock_sock(sk);
933 }
934
935 read_unlock(&conn->chan_lock);
936}
937
938/* Notify sockets that we cannot guaranty reliability anymore */
939static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
940{
941 struct l2cap_chan *chan;
942
943 BT_DBG("conn %p", conn);
944
945 read_lock(&conn->chan_lock);
946
947 list_for_each_entry(chan, &conn->chan_l, list) {
948 struct sock *sk = chan->sk;
949
950 if (chan->force_reliable)
951 sk->sk_err = err;
952 }
953
954 read_unlock(&conn->chan_lock);
955}
956
957static void l2cap_info_timeout(unsigned long arg)
958{
959 struct l2cap_conn *conn = (void *) arg;
960
961 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
962 conn->info_ident = 0;
963
964 l2cap_conn_start(conn);
965}
966
967static void l2cap_conn_del(struct hci_conn *hcon, int err)
968{
969 struct l2cap_conn *conn = hcon->l2cap_data;
970 struct l2cap_chan *chan, *l;
971 struct sock *sk;
972
973 if (!conn)
974 return;
975
976 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
977
978 kfree_skb(conn->rx_skb);
979
980 /* Kill channels */
981 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
982 sk = chan->sk;
983 bh_lock_sock(sk);
984 l2cap_chan_del(chan, err);
985 bh_unlock_sock(sk);
986 chan->ops->close(chan->data);
987 }
988
989 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
990 del_timer_sync(&conn->info_timer);
991
992 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
993 del_timer(&conn->security_timer);
994 smp_chan_destroy(conn);
995 }
996
997 hcon->l2cap_data = NULL;
998 kfree(conn);
999}
1000
1001static void security_timeout(unsigned long arg)
1002{
1003 struct l2cap_conn *conn = (void *) arg;
1004
1005 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1006}
1007
1008static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1009{
1010 struct l2cap_conn *conn = hcon->l2cap_data;
1011
1012 if (conn || status)
1013 return conn;
1014
1015 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1016 if (!conn)
1017 return NULL;
1018
1019 hcon->l2cap_data = conn;
1020 conn->hcon = hcon;
1021
1022 BT_DBG("hcon %p conn %p", hcon, conn);
1023
1024 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1025 conn->mtu = hcon->hdev->le_mtu;
1026 else
1027 conn->mtu = hcon->hdev->acl_mtu;
1028
1029 conn->src = &hcon->hdev->bdaddr;
1030 conn->dst = &hcon->dst;
1031
1032 conn->feat_mask = 0;
1033
1034 spin_lock_init(&conn->lock);
1035 rwlock_init(&conn->chan_lock);
1036
1037 INIT_LIST_HEAD(&conn->chan_l);
1038
1039 if (hcon->type == LE_LINK)
1040 setup_timer(&conn->security_timer, security_timeout,
1041 (unsigned long) conn);
1042 else
1043 setup_timer(&conn->info_timer, l2cap_info_timeout,
1044 (unsigned long) conn);
1045
1046 conn->disc_reason = 0x13;
1047
1048 return conn;
1049}
1050
1051static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1052{
1053 write_lock_bh(&conn->chan_lock);
1054 __l2cap_chan_add(conn, chan);
1055 write_unlock_bh(&conn->chan_lock);
1056}
1057
1058/* ---- Socket interface ---- */
1059
1060/* Find socket with psm and source bdaddr.
1061 * Returns closest match.
1062 */
1063static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1064{
1065 struct l2cap_chan *c, *c1 = NULL;
1066
1067 read_lock(&chan_list_lock);
1068
1069 list_for_each_entry(c, &chan_list, global_l) {
1070 struct sock *sk = c->sk;
1071
1072 if (state && c->state != state)
1073 continue;
1074
1075 if (c->psm == psm) {
1076 /* Exact match. */
1077 if (!bacmp(&bt_sk(sk)->src, src)) {
1078 read_unlock(&chan_list_lock);
1079 return c;
1080 }
1081
1082 /* Closest match */
1083 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1084 c1 = c;
1085 }
1086 }
1087
1088 read_unlock(&chan_list_lock);
1089
1090 return c1;
1091}
1092
1093int l2cap_chan_connect(struct l2cap_chan *chan)
1094{
1095 struct sock *sk = chan->sk;
1096 bdaddr_t *src = &bt_sk(sk)->src;
1097 bdaddr_t *dst = &bt_sk(sk)->dst;
1098 struct l2cap_conn *conn;
1099 struct hci_conn *hcon;
1100 struct hci_dev *hdev;
1101 __u8 auth_type;
1102 int err;
1103
1104 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1105 chan->psm);
1106
1107 hdev = hci_get_route(dst, src);
1108 if (!hdev)
1109 return -EHOSTUNREACH;
1110
1111 hci_dev_lock_bh(hdev);
1112
1113 auth_type = l2cap_get_auth_type(chan);
1114
1115 if (chan->dcid == L2CAP_CID_LE_DATA)
1116 hcon = hci_connect(hdev, LE_LINK, dst,
1117 chan->sec_level, auth_type);
1118 else
1119 hcon = hci_connect(hdev, ACL_LINK, dst,
1120 chan->sec_level, auth_type);
1121
1122 if (IS_ERR(hcon)) {
1123 err = PTR_ERR(hcon);
1124 goto done;
1125 }
1126
1127 conn = l2cap_conn_add(hcon, 0);
1128 if (!conn) {
1129 hci_conn_put(hcon);
1130 err = -ENOMEM;
1131 goto done;
1132 }
1133
1134 /* Update source addr of the socket */
1135 bacpy(src, conn->src);
1136
1137 l2cap_chan_add(conn, chan);
1138
1139 l2cap_state_change(chan, BT_CONNECT);
1140 __set_chan_timer(chan, sk->sk_sndtimeo);
1141
1142 if (hcon->state == BT_CONNECTED) {
1143 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1144 __clear_chan_timer(chan);
1145 if (l2cap_check_security(chan))
1146 l2cap_state_change(chan, BT_CONNECTED);
1147 } else
1148 l2cap_do_start(chan);
1149 }
1150
1151 err = 0;
1152
1153done:
1154 hci_dev_unlock_bh(hdev);
1155 hci_dev_put(hdev);
1156 return err;
1157}
1158
1159int __l2cap_wait_ack(struct sock *sk)
1160{
1161 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1162 DECLARE_WAITQUEUE(wait, current);
1163 int err = 0;
1164 int timeo = HZ/5;
1165
1166 add_wait_queue(sk_sleep(sk), &wait);
1167 set_current_state(TASK_INTERRUPTIBLE);
1168 while (chan->unacked_frames > 0 && chan->conn) {
1169 if (!timeo)
1170 timeo = HZ/5;
1171
1172 if (signal_pending(current)) {
1173 err = sock_intr_errno(timeo);
1174 break;
1175 }
1176
1177 release_sock(sk);
1178 timeo = schedule_timeout(timeo);
1179 lock_sock(sk);
1180 set_current_state(TASK_INTERRUPTIBLE);
1181
1182 err = sock_error(sk);
1183 if (err)
1184 break;
1185 }
1186 set_current_state(TASK_RUNNING);
1187 remove_wait_queue(sk_sleep(sk), &wait);
1188 return err;
1189}
1190
1191static void l2cap_monitor_timeout(unsigned long arg)
1192{
1193 struct l2cap_chan *chan = (void *) arg;
1194 struct sock *sk = chan->sk;
1195
1196 BT_DBG("chan %p", chan);
1197
1198 bh_lock_sock(sk);
1199 if (chan->retry_count >= chan->remote_max_tx) {
1200 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1201 bh_unlock_sock(sk);
1202 return;
1203 }
1204
1205 chan->retry_count++;
1206 __set_monitor_timer(chan);
1207
1208 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1209 bh_unlock_sock(sk);
1210}
1211
1212static void l2cap_retrans_timeout(unsigned long arg)
1213{
1214 struct l2cap_chan *chan = (void *) arg;
1215 struct sock *sk = chan->sk;
1216
1217 BT_DBG("chan %p", chan);
1218
1219 bh_lock_sock(sk);
1220 chan->retry_count = 1;
1221 __set_monitor_timer(chan);
1222
1223 set_bit(CONN_WAIT_F, &chan->conn_state);
1224
1225 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1226 bh_unlock_sock(sk);
1227}
1228
1229static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1230{
1231 struct sk_buff *skb;
1232
1233 while ((skb = skb_peek(&chan->tx_q)) &&
1234 chan->unacked_frames) {
1235 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1236 break;
1237
1238 skb = skb_dequeue(&chan->tx_q);
1239 kfree_skb(skb);
1240
1241 chan->unacked_frames--;
1242 }
1243
1244 if (!chan->unacked_frames)
1245 __clear_retrans_timer(chan);
1246}
1247
1248static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1249{
1250 struct hci_conn *hcon = chan->conn->hcon;
1251 u16 flags;
1252
1253 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1254
1255 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1256 flags = ACL_START_NO_FLUSH;
1257 else
1258 flags = ACL_START;
1259
1260 bt_cb(skb)->force_active = chan->force_active;
1261 hci_send_acl(hcon, skb, flags);
1262}
1263
1264static void l2cap_streaming_send(struct l2cap_chan *chan)
1265{
1266 struct sk_buff *skb;
1267 u16 control, fcs;
1268
1269 while ((skb = skb_dequeue(&chan->tx_q))) {
1270 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1271 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1272 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1273
1274 if (chan->fcs == L2CAP_FCS_CRC16) {
1275 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1276 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1277 }
1278
1279 l2cap_do_send(chan, skb);
1280
1281 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1282 }
1283}
1284
1285static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1286{
1287 struct sk_buff *skb, *tx_skb;
1288 u16 control, fcs;
1289
1290 skb = skb_peek(&chan->tx_q);
1291 if (!skb)
1292 return;
1293
1294 do {
1295 if (bt_cb(skb)->tx_seq == tx_seq)
1296 break;
1297
1298 if (skb_queue_is_last(&chan->tx_q, skb))
1299 return;
1300
1301 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1302
1303 if (chan->remote_max_tx &&
1304 bt_cb(skb)->retries == chan->remote_max_tx) {
1305 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1306 return;
1307 }
1308
1309 tx_skb = skb_clone(skb, GFP_ATOMIC);
1310 bt_cb(skb)->retries++;
1311 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1312 control &= L2CAP_CTRL_SAR;
1313
1314 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1315 control |= L2CAP_CTRL_FINAL;
1316
1317 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1318 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1319
1320 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1321
1322 if (chan->fcs == L2CAP_FCS_CRC16) {
1323 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1324 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1325 }
1326
1327 l2cap_do_send(chan, tx_skb);
1328}
1329
1330static int l2cap_ertm_send(struct l2cap_chan *chan)
1331{
1332 struct sk_buff *skb, *tx_skb;
1333 u16 control, fcs;
1334 int nsent = 0;
1335
1336 if (chan->state != BT_CONNECTED)
1337 return -ENOTCONN;
1338
1339 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1340
1341 if (chan->remote_max_tx &&
1342 bt_cb(skb)->retries == chan->remote_max_tx) {
1343 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1344 break;
1345 }
1346
1347 tx_skb = skb_clone(skb, GFP_ATOMIC);
1348
1349 bt_cb(skb)->retries++;
1350
1351 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1352 control &= L2CAP_CTRL_SAR;
1353
1354 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1355 control |= L2CAP_CTRL_FINAL;
1356
1357 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1358 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1359 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1360
1361
1362 if (chan->fcs == L2CAP_FCS_CRC16) {
1363 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1364 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1365 }
1366
1367 l2cap_do_send(chan, tx_skb);
1368
1369 __set_retrans_timer(chan);
1370
1371 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1372 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1373
1374 if (bt_cb(skb)->retries == 1)
1375 chan->unacked_frames++;
1376
1377 chan->frames_sent++;
1378
1379 if (skb_queue_is_last(&chan->tx_q, skb))
1380 chan->tx_send_head = NULL;
1381 else
1382 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1383
1384 nsent++;
1385 }
1386
1387 return nsent;
1388}
1389
1390static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1391{
1392 int ret;
1393
1394 if (!skb_queue_empty(&chan->tx_q))
1395 chan->tx_send_head = chan->tx_q.next;
1396
1397 chan->next_tx_seq = chan->expected_ack_seq;
1398 ret = l2cap_ertm_send(chan);
1399 return ret;
1400}
1401
1402static void l2cap_send_ack(struct l2cap_chan *chan)
1403{
1404 u16 control = 0;
1405
1406 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1407
1408 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1409 control |= L2CAP_SUPER_RCV_NOT_READY;
1410 set_bit(CONN_RNR_SENT, &chan->conn_state);
1411 l2cap_send_sframe(chan, control);
1412 return;
1413 }
1414
1415 if (l2cap_ertm_send(chan) > 0)
1416 return;
1417
1418 control |= L2CAP_SUPER_RCV_READY;
1419 l2cap_send_sframe(chan, control);
1420}
1421
1422static void l2cap_send_srejtail(struct l2cap_chan *chan)
1423{
1424 struct srej_list *tail;
1425 u16 control;
1426
1427 control = L2CAP_SUPER_SELECT_REJECT;
1428 control |= L2CAP_CTRL_FINAL;
1429
1430 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1431 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1432
1433 l2cap_send_sframe(chan, control);
1434}
1435
1436static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1437{
1438 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1439 struct sk_buff **frag;
1440 int err, sent = 0;
1441
1442 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1443 return -EFAULT;
1444
1445 sent += count;
1446 len -= count;
1447
1448 /* Continuation fragments (no L2CAP header) */
1449 frag = &skb_shinfo(skb)->frag_list;
1450 while (len) {
1451 count = min_t(unsigned int, conn->mtu, len);
1452
1453 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1454 if (!*frag)
1455 return err;
1456 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1457 return -EFAULT;
1458
1459 sent += count;
1460 len -= count;
1461
1462 frag = &(*frag)->next;
1463 }
1464
1465 return sent;
1466}
1467
1468static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1469{
1470 struct sock *sk = chan->sk;
1471 struct l2cap_conn *conn = chan->conn;
1472 struct sk_buff *skb;
1473 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1474 struct l2cap_hdr *lh;
1475
1476 BT_DBG("sk %p len %d", sk, (int)len);
1477
1478 count = min_t(unsigned int, (conn->mtu - hlen), len);
1479 skb = bt_skb_send_alloc(sk, count + hlen,
1480 msg->msg_flags & MSG_DONTWAIT, &err);
1481 if (!skb)
1482 return ERR_PTR(err);
1483
1484 /* Create L2CAP header */
1485 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1486 lh->cid = cpu_to_le16(chan->dcid);
1487 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1488 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1489
1490 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1491 if (unlikely(err < 0)) {
1492 kfree_skb(skb);
1493 return ERR_PTR(err);
1494 }
1495 return skb;
1496}
1497
1498static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1499{
1500 struct sock *sk = chan->sk;
1501 struct l2cap_conn *conn = chan->conn;
1502 struct sk_buff *skb;
1503 int err, count, hlen = L2CAP_HDR_SIZE;
1504 struct l2cap_hdr *lh;
1505
1506 BT_DBG("sk %p len %d", sk, (int)len);
1507
1508 count = min_t(unsigned int, (conn->mtu - hlen), len);
1509 skb = bt_skb_send_alloc(sk, count + hlen,
1510 msg->msg_flags & MSG_DONTWAIT, &err);
1511 if (!skb)
1512 return ERR_PTR(err);
1513
1514 /* Create L2CAP header */
1515 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1516 lh->cid = cpu_to_le16(chan->dcid);
1517 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1518
1519 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1520 if (unlikely(err < 0)) {
1521 kfree_skb(skb);
1522 return ERR_PTR(err);
1523 }
1524 return skb;
1525}
1526
1527static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1528 struct msghdr *msg, size_t len,
1529 u16 control, u16 sdulen)
1530{
1531 struct sock *sk = chan->sk;
1532 struct l2cap_conn *conn = chan->conn;
1533 struct sk_buff *skb;
1534 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1535 struct l2cap_hdr *lh;
1536
1537 BT_DBG("sk %p len %d", sk, (int)len);
1538
1539 if (!conn)
1540 return ERR_PTR(-ENOTCONN);
1541
1542 if (sdulen)
1543 hlen += 2;
1544
1545 if (chan->fcs == L2CAP_FCS_CRC16)
1546 hlen += 2;
1547
1548 count = min_t(unsigned int, (conn->mtu - hlen), len);
1549 skb = bt_skb_send_alloc(sk, count + hlen,
1550 msg->msg_flags & MSG_DONTWAIT, &err);
1551 if (!skb)
1552 return ERR_PTR(err);
1553
1554 /* Create L2CAP header */
1555 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1556 lh->cid = cpu_to_le16(chan->dcid);
1557 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1558 put_unaligned_le16(control, skb_put(skb, 2));
1559 if (sdulen)
1560 put_unaligned_le16(sdulen, skb_put(skb, 2));
1561
1562 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1563 if (unlikely(err < 0)) {
1564 kfree_skb(skb);
1565 return ERR_PTR(err);
1566 }
1567
1568 if (chan->fcs == L2CAP_FCS_CRC16)
1569 put_unaligned_le16(0, skb_put(skb, 2));
1570
1571 bt_cb(skb)->retries = 0;
1572 return skb;
1573}
1574
1575static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1576{
1577 struct sk_buff *skb;
1578 struct sk_buff_head sar_queue;
1579 u16 control;
1580 size_t size = 0;
1581
1582 skb_queue_head_init(&sar_queue);
1583 control = L2CAP_SDU_START;
1584 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1585 if (IS_ERR(skb))
1586 return PTR_ERR(skb);
1587
1588 __skb_queue_tail(&sar_queue, skb);
1589 len -= chan->remote_mps;
1590 size += chan->remote_mps;
1591
1592 while (len > 0) {
1593 size_t buflen;
1594
1595 if (len > chan->remote_mps) {
1596 control = L2CAP_SDU_CONTINUE;
1597 buflen = chan->remote_mps;
1598 } else {
1599 control = L2CAP_SDU_END;
1600 buflen = len;
1601 }
1602
1603 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1604 if (IS_ERR(skb)) {
1605 skb_queue_purge(&sar_queue);
1606 return PTR_ERR(skb);
1607 }
1608
1609 __skb_queue_tail(&sar_queue, skb);
1610 len -= buflen;
1611 size += buflen;
1612 }
1613 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1614 if (chan->tx_send_head == NULL)
1615 chan->tx_send_head = sar_queue.next;
1616
1617 return size;
1618}
1619
1620int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1621{
1622 struct sk_buff *skb;
1623 u16 control;
1624 int err;
1625
1626 /* Connectionless channel */
1627 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1628 skb = l2cap_create_connless_pdu(chan, msg, len);
1629 if (IS_ERR(skb))
1630 return PTR_ERR(skb);
1631
1632 l2cap_do_send(chan, skb);
1633 return len;
1634 }
1635
1636 switch (chan->mode) {
1637 case L2CAP_MODE_BASIC:
1638 /* Check outgoing MTU */
1639 if (len > chan->omtu)
1640 return -EMSGSIZE;
1641
1642 /* Create a basic PDU */
1643 skb = l2cap_create_basic_pdu(chan, msg, len);
1644 if (IS_ERR(skb))
1645 return PTR_ERR(skb);
1646
1647 l2cap_do_send(chan, skb);
1648 err = len;
1649 break;
1650
1651 case L2CAP_MODE_ERTM:
1652 case L2CAP_MODE_STREAMING:
1653 /* Entire SDU fits into one PDU */
1654 if (len <= chan->remote_mps) {
1655 control = L2CAP_SDU_UNSEGMENTED;
1656 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1657 0);
1658 if (IS_ERR(skb))
1659 return PTR_ERR(skb);
1660
1661 __skb_queue_tail(&chan->tx_q, skb);
1662
1663 if (chan->tx_send_head == NULL)
1664 chan->tx_send_head = skb;
1665
1666 } else {
1667 /* Segment SDU into multiples PDUs */
1668 err = l2cap_sar_segment_sdu(chan, msg, len);
1669 if (err < 0)
1670 return err;
1671 }
1672
1673 if (chan->mode == L2CAP_MODE_STREAMING) {
1674 l2cap_streaming_send(chan);
1675 err = len;
1676 break;
1677 }
1678
1679 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1680 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1681 err = len;
1682 break;
1683 }
1684
1685 err = l2cap_ertm_send(chan);
1686 if (err >= 0)
1687 err = len;
1688
1689 break;
1690
1691 default:
1692 BT_DBG("bad state %1.1x", chan->mode);
1693 err = -EBADFD;
1694 }
1695
1696 return err;
1697}
1698
1699/* Copy frame to all raw sockets on that connection */
1700static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1701{
1702 struct sk_buff *nskb;
1703 struct l2cap_chan *chan;
1704
1705 BT_DBG("conn %p", conn);
1706
1707 read_lock(&conn->chan_lock);
1708 list_for_each_entry(chan, &conn->chan_l, list) {
1709 struct sock *sk = chan->sk;
1710 if (chan->chan_type != L2CAP_CHAN_RAW)
1711 continue;
1712
1713 /* Don't send frame to the socket it came from */
1714 if (skb->sk == sk)
1715 continue;
1716 nskb = skb_clone(skb, GFP_ATOMIC);
1717 if (!nskb)
1718 continue;
1719
1720 if (chan->ops->recv(chan->data, nskb))
1721 kfree_skb(nskb);
1722 }
1723 read_unlock(&conn->chan_lock);
1724}
1725
1726/* ---- L2CAP signalling commands ---- */
1727static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1728 u8 code, u8 ident, u16 dlen, void *data)
1729{
1730 struct sk_buff *skb, **frag;
1731 struct l2cap_cmd_hdr *cmd;
1732 struct l2cap_hdr *lh;
1733 int len, count;
1734
1735 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1736 conn, code, ident, dlen);
1737
1738 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1739 count = min_t(unsigned int, conn->mtu, len);
1740
1741 skb = bt_skb_alloc(count, GFP_ATOMIC);
1742 if (!skb)
1743 return NULL;
1744
1745 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1746 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1747
1748 if (conn->hcon->type == LE_LINK)
1749 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1750 else
1751 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1752
1753 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1754 cmd->code = code;
1755 cmd->ident = ident;
1756 cmd->len = cpu_to_le16(dlen);
1757
1758 if (dlen) {
1759 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1760 memcpy(skb_put(skb, count), data, count);
1761 data += count;
1762 }
1763
1764 len -= skb->len;
1765
1766 /* Continuation fragments (no L2CAP header) */
1767 frag = &skb_shinfo(skb)->frag_list;
1768 while (len) {
1769 count = min_t(unsigned int, conn->mtu, len);
1770
1771 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1772 if (!*frag)
1773 goto fail;
1774
1775 memcpy(skb_put(*frag, count), data, count);
1776
1777 len -= count;
1778 data += count;
1779
1780 frag = &(*frag)->next;
1781 }
1782
1783 return skb;
1784
1785fail:
1786 kfree_skb(skb);
1787 return NULL;
1788}
1789
1790static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1791{
1792 struct l2cap_conf_opt *opt = *ptr;
1793 int len;
1794
1795 len = L2CAP_CONF_OPT_SIZE + opt->len;
1796 *ptr += len;
1797
1798 *type = opt->type;
1799 *olen = opt->len;
1800
1801 switch (opt->len) {
1802 case 1:
1803 *val = *((u8 *) opt->val);
1804 break;
1805
1806 case 2:
1807 *val = get_unaligned_le16(opt->val);
1808 break;
1809
1810 case 4:
1811 *val = get_unaligned_le32(opt->val);
1812 break;
1813
1814 default:
1815 *val = (unsigned long) opt->val;
1816 break;
1817 }
1818
1819 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1820 return len;
1821}
1822
1823static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1824{
1825 struct l2cap_conf_opt *opt = *ptr;
1826
1827 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1828
1829 opt->type = type;
1830 opt->len = len;
1831
1832 switch (len) {
1833 case 1:
1834 *((u8 *) opt->val) = val;
1835 break;
1836
1837 case 2:
1838 put_unaligned_le16(val, opt->val);
1839 break;
1840
1841 case 4:
1842 put_unaligned_le32(val, opt->val);
1843 break;
1844
1845 default:
1846 memcpy(opt->val, (void *) val, len);
1847 break;
1848 }
1849
1850 *ptr += L2CAP_CONF_OPT_SIZE + len;
1851}
1852
1853static void l2cap_ack_timeout(unsigned long arg)
1854{
1855 struct l2cap_chan *chan = (void *) arg;
1856
1857 bh_lock_sock(chan->sk);
1858 l2cap_send_ack(chan);
1859 bh_unlock_sock(chan->sk);
1860}
1861
1862static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1863{
1864 struct sock *sk = chan->sk;
1865
1866 chan->expected_ack_seq = 0;
1867 chan->unacked_frames = 0;
1868 chan->buffer_seq = 0;
1869 chan->num_acked = 0;
1870 chan->frames_sent = 0;
1871
1872 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1873 (unsigned long) chan);
1874 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1875 (unsigned long) chan);
1876 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1877
1878 skb_queue_head_init(&chan->srej_q);
1879
1880 INIT_LIST_HEAD(&chan->srej_l);
1881
1882
1883 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1884}
1885
1886static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1887{
1888 switch (mode) {
1889 case L2CAP_MODE_STREAMING:
1890 case L2CAP_MODE_ERTM:
1891 if (l2cap_mode_supported(mode, remote_feat_mask))
1892 return mode;
1893 /* fall through */
1894 default:
1895 return L2CAP_MODE_BASIC;
1896 }
1897}
1898
1899static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1900{
1901 struct l2cap_conf_req *req = data;
1902 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1903 void *ptr = req->data;
1904
1905 BT_DBG("chan %p", chan);
1906
1907 if (chan->num_conf_req || chan->num_conf_rsp)
1908 goto done;
1909
1910 switch (chan->mode) {
1911 case L2CAP_MODE_STREAMING:
1912 case L2CAP_MODE_ERTM:
1913 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1914 break;
1915
1916 /* fall through */
1917 default:
1918 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1919 break;
1920 }
1921
1922done:
1923 if (chan->imtu != L2CAP_DEFAULT_MTU)
1924 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1925
1926 switch (chan->mode) {
1927 case L2CAP_MODE_BASIC:
1928 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1929 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1930 break;
1931
1932 rfc.mode = L2CAP_MODE_BASIC;
1933 rfc.txwin_size = 0;
1934 rfc.max_transmit = 0;
1935 rfc.retrans_timeout = 0;
1936 rfc.monitor_timeout = 0;
1937 rfc.max_pdu_size = 0;
1938
1939 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1940 (unsigned long) &rfc);
1941 break;
1942
1943 case L2CAP_MODE_ERTM:
1944 rfc.mode = L2CAP_MODE_ERTM;
1945 rfc.txwin_size = chan->tx_win;
1946 rfc.max_transmit = chan->max_tx;
1947 rfc.retrans_timeout = 0;
1948 rfc.monitor_timeout = 0;
1949 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1950 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1951 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1952
1953 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1954 (unsigned long) &rfc);
1955
1956 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1957 break;
1958
1959 if (chan->fcs == L2CAP_FCS_NONE ||
1960 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1961 chan->fcs = L2CAP_FCS_NONE;
1962 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1963 }
1964 break;
1965
1966 case L2CAP_MODE_STREAMING:
1967 rfc.mode = L2CAP_MODE_STREAMING;
1968 rfc.txwin_size = 0;
1969 rfc.max_transmit = 0;
1970 rfc.retrans_timeout = 0;
1971 rfc.monitor_timeout = 0;
1972 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1973 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1974 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1975
1976 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1977 (unsigned long) &rfc);
1978
1979 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1980 break;
1981
1982 if (chan->fcs == L2CAP_FCS_NONE ||
1983 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1984 chan->fcs = L2CAP_FCS_NONE;
1985 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1986 }
1987 break;
1988 }
1989
1990 req->dcid = cpu_to_le16(chan->dcid);
1991 req->flags = cpu_to_le16(0);
1992
1993 return ptr - data;
1994}
1995
1996static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1997{
1998 struct l2cap_conf_rsp *rsp = data;
1999 void *ptr = rsp->data;
2000 void *req = chan->conf_req;
2001 int len = chan->conf_len;
2002 int type, hint, olen;
2003 unsigned long val;
2004 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2005 u16 mtu = L2CAP_DEFAULT_MTU;
2006 u16 result = L2CAP_CONF_SUCCESS;
2007
2008 BT_DBG("chan %p", chan);
2009
2010 while (len >= L2CAP_CONF_OPT_SIZE) {
2011 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2012
2013 hint = type & L2CAP_CONF_HINT;
2014 type &= L2CAP_CONF_MASK;
2015
2016 switch (type) {
2017 case L2CAP_CONF_MTU:
2018 mtu = val;
2019 break;
2020
2021 case L2CAP_CONF_FLUSH_TO:
2022 chan->flush_to = val;
2023 break;
2024
2025 case L2CAP_CONF_QOS:
2026 break;
2027
2028 case L2CAP_CONF_RFC:
2029 if (olen == sizeof(rfc))
2030 memcpy(&rfc, (void *) val, olen);
2031 break;
2032
2033 case L2CAP_CONF_FCS:
2034 if (val == L2CAP_FCS_NONE)
2035 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2036
2037 break;
2038
2039 default:
2040 if (hint)
2041 break;
2042
2043 result = L2CAP_CONF_UNKNOWN;
2044 *((u8 *) ptr++) = type;
2045 break;
2046 }
2047 }
2048
2049 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2050 goto done;
2051
2052 switch (chan->mode) {
2053 case L2CAP_MODE_STREAMING:
2054 case L2CAP_MODE_ERTM:
2055 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2056 chan->mode = l2cap_select_mode(rfc.mode,
2057 chan->conn->feat_mask);
2058 break;
2059 }
2060
2061 if (chan->mode != rfc.mode)
2062 return -ECONNREFUSED;
2063
2064 break;
2065 }
2066
2067done:
2068 if (chan->mode != rfc.mode) {
2069 result = L2CAP_CONF_UNACCEPT;
2070 rfc.mode = chan->mode;
2071
2072 if (chan->num_conf_rsp == 1)
2073 return -ECONNREFUSED;
2074
2075 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2076 sizeof(rfc), (unsigned long) &rfc);
2077 }
2078
2079
2080 if (result == L2CAP_CONF_SUCCESS) {
2081 /* Configure output options and let the other side know
2082 * which ones we don't like. */
2083
2084 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2085 result = L2CAP_CONF_UNACCEPT;
2086 else {
2087 chan->omtu = mtu;
2088 set_bit(CONF_MTU_DONE, &chan->conf_state);
2089 }
2090 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2091
2092 switch (rfc.mode) {
2093 case L2CAP_MODE_BASIC:
2094 chan->fcs = L2CAP_FCS_NONE;
2095 set_bit(CONF_MODE_DONE, &chan->conf_state);
2096 break;
2097
2098 case L2CAP_MODE_ERTM:
2099 chan->remote_tx_win = rfc.txwin_size;
2100 chan->remote_max_tx = rfc.max_transmit;
2101
2102 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2103 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2104
2105 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2106
2107 rfc.retrans_timeout =
2108 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2109 rfc.monitor_timeout =
2110 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2111
2112 set_bit(CONF_MODE_DONE, &chan->conf_state);
2113
2114 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2115 sizeof(rfc), (unsigned long) &rfc);
2116
2117 break;
2118
2119 case L2CAP_MODE_STREAMING:
2120 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2121 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2122
2123 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2124
2125 set_bit(CONF_MODE_DONE, &chan->conf_state);
2126
2127 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2128 sizeof(rfc), (unsigned long) &rfc);
2129
2130 break;
2131
2132 default:
2133 result = L2CAP_CONF_UNACCEPT;
2134
2135 memset(&rfc, 0, sizeof(rfc));
2136 rfc.mode = chan->mode;
2137 }
2138
2139 if (result == L2CAP_CONF_SUCCESS)
2140 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2141 }
2142 rsp->scid = cpu_to_le16(chan->dcid);
2143 rsp->result = cpu_to_le16(result);
2144 rsp->flags = cpu_to_le16(0x0000);
2145
2146 return ptr - data;
2147}
2148
2149static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2150{
2151 struct l2cap_conf_req *req = data;
2152 void *ptr = req->data;
2153 int type, olen;
2154 unsigned long val;
2155 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2156
2157 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2158
2159 while (len >= L2CAP_CONF_OPT_SIZE) {
2160 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2161
2162 switch (type) {
2163 case L2CAP_CONF_MTU:
2164 if (val < L2CAP_DEFAULT_MIN_MTU) {
2165 *result = L2CAP_CONF_UNACCEPT;
2166 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2167 } else
2168 chan->imtu = val;
2169 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2170 break;
2171
2172 case L2CAP_CONF_FLUSH_TO:
2173 chan->flush_to = val;
2174 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2175 2, chan->flush_to);
2176 break;
2177
2178 case L2CAP_CONF_RFC:
2179 if (olen == sizeof(rfc))
2180 memcpy(&rfc, (void *)val, olen);
2181
2182 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2183 rfc.mode != chan->mode)
2184 return -ECONNREFUSED;
2185
2186 chan->fcs = 0;
2187
2188 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2189 sizeof(rfc), (unsigned long) &rfc);
2190 break;
2191 }
2192 }
2193
2194 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2195 return -ECONNREFUSED;
2196
2197 chan->mode = rfc.mode;
2198
2199 if (*result == L2CAP_CONF_SUCCESS) {
2200 switch (rfc.mode) {
2201 case L2CAP_MODE_ERTM:
2202 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2203 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2204 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2205 break;
2206 case L2CAP_MODE_STREAMING:
2207 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2208 }
2209 }
2210
2211 req->dcid = cpu_to_le16(chan->dcid);
2212 req->flags = cpu_to_le16(0x0000);
2213
2214 return ptr - data;
2215}
2216
2217static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2218{
2219 struct l2cap_conf_rsp *rsp = data;
2220 void *ptr = rsp->data;
2221
2222 BT_DBG("chan %p", chan);
2223
2224 rsp->scid = cpu_to_le16(chan->dcid);
2225 rsp->result = cpu_to_le16(result);
2226 rsp->flags = cpu_to_le16(flags);
2227
2228 return ptr - data;
2229}
2230
2231void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2232{
2233 struct l2cap_conn_rsp rsp;
2234 struct l2cap_conn *conn = chan->conn;
2235 u8 buf[128];
2236
2237 rsp.scid = cpu_to_le16(chan->dcid);
2238 rsp.dcid = cpu_to_le16(chan->scid);
2239 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2240 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2241 l2cap_send_cmd(conn, chan->ident,
2242 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2243
2244 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2245 return;
2246
2247 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2248 l2cap_build_conf_req(chan, buf), buf);
2249 chan->num_conf_req++;
2250}
2251
2252static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2253{
2254 int type, olen;
2255 unsigned long val;
2256 struct l2cap_conf_rfc rfc;
2257
2258 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2259
2260 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2261 return;
2262
2263 while (len >= L2CAP_CONF_OPT_SIZE) {
2264 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2265
2266 switch (type) {
2267 case L2CAP_CONF_RFC:
2268 if (olen == sizeof(rfc))
2269 memcpy(&rfc, (void *)val, olen);
2270 goto done;
2271 }
2272 }
2273
2274 /* Use sane default values in case a misbehaving remote device
2275 * did not send an RFC option.
2276 */
2277 rfc.mode = chan->mode;
2278 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2279 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2280 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2281
2282 BT_ERR("Expected RFC option was not found, using defaults");
2283
2284done:
2285 switch (rfc.mode) {
2286 case L2CAP_MODE_ERTM:
2287 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2288 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2289 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2290 break;
2291 case L2CAP_MODE_STREAMING:
2292 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2293 }
2294}
2295
2296static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2297{
2298 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2299
2300 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2301 return 0;
2302
2303 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2304 cmd->ident == conn->info_ident) {
2305 del_timer(&conn->info_timer);
2306
2307 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2308 conn->info_ident = 0;
2309
2310 l2cap_conn_start(conn);
2311 }
2312
2313 return 0;
2314}
2315
2316static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2317{
2318 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2319 struct l2cap_conn_rsp rsp;
2320 struct l2cap_chan *chan = NULL, *pchan;
2321 struct sock *parent, *sk = NULL;
2322 int result, status = L2CAP_CS_NO_INFO;
2323
2324 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2325 __le16 psm = req->psm;
2326
2327 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2328
2329 /* Check if we have socket listening on psm */
2330 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2331 if (!pchan) {
2332 result = L2CAP_CR_BAD_PSM;
2333 goto sendresp;
2334 }
2335
2336 parent = pchan->sk;
2337
2338 bh_lock_sock(parent);
2339
2340 /* Check if the ACL is secure enough (if not SDP) */
2341 if (psm != cpu_to_le16(0x0001) &&
2342 !hci_conn_check_link_mode(conn->hcon)) {
2343 conn->disc_reason = 0x05;
2344 result = L2CAP_CR_SEC_BLOCK;
2345 goto response;
2346 }
2347
2348 result = L2CAP_CR_NO_MEM;
2349
2350 /* Check for backlog size */
2351 if (sk_acceptq_is_full(parent)) {
2352 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2353 goto response;
2354 }
2355
2356 chan = pchan->ops->new_connection(pchan->data);
2357 if (!chan)
2358 goto response;
2359
2360 sk = chan->sk;
2361
2362 write_lock_bh(&conn->chan_lock);
2363
2364 /* Check if we already have channel with that dcid */
2365 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2366 write_unlock_bh(&conn->chan_lock);
2367 sock_set_flag(sk, SOCK_ZAPPED);
2368 chan->ops->close(chan->data);
2369 goto response;
2370 }
2371
2372 hci_conn_hold(conn->hcon);
2373
2374 bacpy(&bt_sk(sk)->src, conn->src);
2375 bacpy(&bt_sk(sk)->dst, conn->dst);
2376 chan->psm = psm;
2377 chan->dcid = scid;
2378
2379 bt_accept_enqueue(parent, sk);
2380
2381 __l2cap_chan_add(conn, chan);
2382
2383 dcid = chan->scid;
2384
2385 __set_chan_timer(chan, sk->sk_sndtimeo);
2386
2387 chan->ident = cmd->ident;
2388
2389 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2390 if (l2cap_check_security(chan)) {
2391 if (bt_sk(sk)->defer_setup) {
2392 l2cap_state_change(chan, BT_CONNECT2);
2393 result = L2CAP_CR_PEND;
2394 status = L2CAP_CS_AUTHOR_PEND;
2395 parent->sk_data_ready(parent, 0);
2396 } else {
2397 l2cap_state_change(chan, BT_CONFIG);
2398 result = L2CAP_CR_SUCCESS;
2399 status = L2CAP_CS_NO_INFO;
2400 }
2401 } else {
2402 l2cap_state_change(chan, BT_CONNECT2);
2403 result = L2CAP_CR_PEND;
2404 status = L2CAP_CS_AUTHEN_PEND;
2405 }
2406 } else {
2407 l2cap_state_change(chan, BT_CONNECT2);
2408 result = L2CAP_CR_PEND;
2409 status = L2CAP_CS_NO_INFO;
2410 }
2411
2412 write_unlock_bh(&conn->chan_lock);
2413
2414response:
2415 bh_unlock_sock(parent);
2416
2417sendresp:
2418 rsp.scid = cpu_to_le16(scid);
2419 rsp.dcid = cpu_to_le16(dcid);
2420 rsp.result = cpu_to_le16(result);
2421 rsp.status = cpu_to_le16(status);
2422 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2423
2424 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2425 struct l2cap_info_req info;
2426 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2427
2428 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2429 conn->info_ident = l2cap_get_ident(conn);
2430
2431 mod_timer(&conn->info_timer, jiffies +
2432 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2433
2434 l2cap_send_cmd(conn, conn->info_ident,
2435 L2CAP_INFO_REQ, sizeof(info), &info);
2436 }
2437
2438 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2439 result == L2CAP_CR_SUCCESS) {
2440 u8 buf[128];
2441 set_bit(CONF_REQ_SENT, &chan->conf_state);
2442 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2443 l2cap_build_conf_req(chan, buf), buf);
2444 chan->num_conf_req++;
2445 }
2446
2447 return 0;
2448}
2449
2450static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2451{
2452 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2453 u16 scid, dcid, result, status;
2454 struct l2cap_chan *chan;
2455 struct sock *sk;
2456 u8 req[128];
2457
2458 scid = __le16_to_cpu(rsp->scid);
2459 dcid = __le16_to_cpu(rsp->dcid);
2460 result = __le16_to_cpu(rsp->result);
2461 status = __le16_to_cpu(rsp->status);
2462
2463 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2464
2465 if (scid) {
2466 chan = l2cap_get_chan_by_scid(conn, scid);
2467 if (!chan)
2468 return -EFAULT;
2469 } else {
2470 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2471 if (!chan)
2472 return -EFAULT;
2473 }
2474
2475 sk = chan->sk;
2476
2477 switch (result) {
2478 case L2CAP_CR_SUCCESS:
2479 l2cap_state_change(chan, BT_CONFIG);
2480 chan->ident = 0;
2481 chan->dcid = dcid;
2482 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2483
2484 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2485 break;
2486
2487 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2488 l2cap_build_conf_req(chan, req), req);
2489 chan->num_conf_req++;
2490 break;
2491
2492 case L2CAP_CR_PEND:
2493 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2494 break;
2495
2496 default:
2497 /* don't delete l2cap channel if sk is owned by user */
2498 if (sock_owned_by_user(sk)) {
2499 l2cap_state_change(chan, BT_DISCONN);
2500 __clear_chan_timer(chan);
2501 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2502 break;
2503 }
2504
2505 l2cap_chan_del(chan, ECONNREFUSED);
2506 break;
2507 }
2508
2509 bh_unlock_sock(sk);
2510 return 0;
2511}
2512
2513static inline void set_default_fcs(struct l2cap_chan *chan)
2514{
2515 /* FCS is enabled only in ERTM or streaming mode, if one or both
2516 * sides request it.
2517 */
2518 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2519 chan->fcs = L2CAP_FCS_NONE;
2520 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2521 chan->fcs = L2CAP_FCS_CRC16;
2522}
2523
2524static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2525{
2526 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2527 u16 dcid, flags;
2528 u8 rsp[64];
2529 struct l2cap_chan *chan;
2530 struct sock *sk;
2531 int len;
2532
2533 dcid = __le16_to_cpu(req->dcid);
2534 flags = __le16_to_cpu(req->flags);
2535
2536 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2537
2538 chan = l2cap_get_chan_by_scid(conn, dcid);
2539 if (!chan)
2540 return -ENOENT;
2541
2542 sk = chan->sk;
2543
2544 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2545 struct l2cap_cmd_rej_cid rej;
2546
2547 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2548 rej.scid = cpu_to_le16(chan->scid);
2549 rej.dcid = cpu_to_le16(chan->dcid);
2550
2551 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2552 sizeof(rej), &rej);
2553 goto unlock;
2554 }
2555
2556 /* Reject if config buffer is too small. */
2557 len = cmd_len - sizeof(*req);
2558 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2559 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2560 l2cap_build_conf_rsp(chan, rsp,
2561 L2CAP_CONF_REJECT, flags), rsp);
2562 goto unlock;
2563 }
2564
2565 /* Store config. */
2566 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2567 chan->conf_len += len;
2568
2569 if (flags & 0x0001) {
2570 /* Incomplete config. Send empty response. */
2571 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2572 l2cap_build_conf_rsp(chan, rsp,
2573 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2574 goto unlock;
2575 }
2576
2577 /* Complete config. */
2578 len = l2cap_parse_conf_req(chan, rsp);
2579 if (len < 0) {
2580 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2581 goto unlock;
2582 }
2583
2584 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2585 chan->num_conf_rsp++;
2586
2587 /* Reset config buffer. */
2588 chan->conf_len = 0;
2589
2590 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2591 goto unlock;
2592
2593 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2594 set_default_fcs(chan);
2595
2596 l2cap_state_change(chan, BT_CONNECTED);
2597
2598 chan->next_tx_seq = 0;
2599 chan->expected_tx_seq = 0;
2600 skb_queue_head_init(&chan->tx_q);
2601 if (chan->mode == L2CAP_MODE_ERTM)
2602 l2cap_ertm_init(chan);
2603
2604 l2cap_chan_ready(sk);
2605 goto unlock;
2606 }
2607
2608 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2609 u8 buf[64];
2610 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2611 l2cap_build_conf_req(chan, buf), buf);
2612 chan->num_conf_req++;
2613 }
2614
2615unlock:
2616 bh_unlock_sock(sk);
2617 return 0;
2618}
2619
2620static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2621{
2622 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2623 u16 scid, flags, result;
2624 struct l2cap_chan *chan;
2625 struct sock *sk;
2626 int len = cmd->len - sizeof(*rsp);
2627
2628 scid = __le16_to_cpu(rsp->scid);
2629 flags = __le16_to_cpu(rsp->flags);
2630 result = __le16_to_cpu(rsp->result);
2631
2632 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2633 scid, flags, result);
2634
2635 chan = l2cap_get_chan_by_scid(conn, scid);
2636 if (!chan)
2637 return 0;
2638
2639 sk = chan->sk;
2640
2641 switch (result) {
2642 case L2CAP_CONF_SUCCESS:
2643 l2cap_conf_rfc_get(chan, rsp->data, len);
2644 break;
2645
2646 case L2CAP_CONF_UNACCEPT:
2647 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2648 char req[64];
2649
2650 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2651 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2652 goto done;
2653 }
2654
2655 /* throw out any old stored conf requests */
2656 result = L2CAP_CONF_SUCCESS;
2657 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2658 req, &result);
2659 if (len < 0) {
2660 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2661 goto done;
2662 }
2663
2664 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2665 L2CAP_CONF_REQ, len, req);
2666 chan->num_conf_req++;
2667 if (result != L2CAP_CONF_SUCCESS)
2668 goto done;
2669 break;
2670 }
2671
2672 default:
2673 sk->sk_err = ECONNRESET;
2674 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2675 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2676 goto done;
2677 }
2678
2679 if (flags & 0x01)
2680 goto done;
2681
2682 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2683
2684 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2685 set_default_fcs(chan);
2686
2687 l2cap_state_change(chan, BT_CONNECTED);
2688 chan->next_tx_seq = 0;
2689 chan->expected_tx_seq = 0;
2690 skb_queue_head_init(&chan->tx_q);
2691 if (chan->mode == L2CAP_MODE_ERTM)
2692 l2cap_ertm_init(chan);
2693
2694 l2cap_chan_ready(sk);
2695 }
2696
2697done:
2698 bh_unlock_sock(sk);
2699 return 0;
2700}
2701
2702static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2703{
2704 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2705 struct l2cap_disconn_rsp rsp;
2706 u16 dcid, scid;
2707 struct l2cap_chan *chan;
2708 struct sock *sk;
2709
2710 scid = __le16_to_cpu(req->scid);
2711 dcid = __le16_to_cpu(req->dcid);
2712
2713 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2714
2715 chan = l2cap_get_chan_by_scid(conn, dcid);
2716 if (!chan)
2717 return 0;
2718
2719 sk = chan->sk;
2720
2721 rsp.dcid = cpu_to_le16(chan->scid);
2722 rsp.scid = cpu_to_le16(chan->dcid);
2723 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2724
2725 sk->sk_shutdown = SHUTDOWN_MASK;
2726
2727 /* don't delete l2cap channel if sk is owned by user */
2728 if (sock_owned_by_user(sk)) {
2729 l2cap_state_change(chan, BT_DISCONN);
2730 __clear_chan_timer(chan);
2731 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2732 bh_unlock_sock(sk);
2733 return 0;
2734 }
2735
2736 l2cap_chan_del(chan, ECONNRESET);
2737 bh_unlock_sock(sk);
2738
2739 chan->ops->close(chan->data);
2740 return 0;
2741}
2742
2743static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2744{
2745 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2746 u16 dcid, scid;
2747 struct l2cap_chan *chan;
2748 struct sock *sk;
2749
2750 scid = __le16_to_cpu(rsp->scid);
2751 dcid = __le16_to_cpu(rsp->dcid);
2752
2753 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2754
2755 chan = l2cap_get_chan_by_scid(conn, scid);
2756 if (!chan)
2757 return 0;
2758
2759 sk = chan->sk;
2760
2761 /* don't delete l2cap channel if sk is owned by user */
2762 if (sock_owned_by_user(sk)) {
2763 l2cap_state_change(chan,BT_DISCONN);
2764 __clear_chan_timer(chan);
2765 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2766 bh_unlock_sock(sk);
2767 return 0;
2768 }
2769
2770 l2cap_chan_del(chan, 0);
2771 bh_unlock_sock(sk);
2772
2773 chan->ops->close(chan->data);
2774 return 0;
2775}
2776
2777static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2778{
2779 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2780 u16 type;
2781
2782 type = __le16_to_cpu(req->type);
2783
2784 BT_DBG("type 0x%4.4x", type);
2785
2786 if (type == L2CAP_IT_FEAT_MASK) {
2787 u8 buf[8];
2788 u32 feat_mask = l2cap_feat_mask;
2789 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2790 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2791 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2792 if (!disable_ertm)
2793 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2794 | L2CAP_FEAT_FCS;
2795 put_unaligned_le32(feat_mask, rsp->data);
2796 l2cap_send_cmd(conn, cmd->ident,
2797 L2CAP_INFO_RSP, sizeof(buf), buf);
2798 } else if (type == L2CAP_IT_FIXED_CHAN) {
2799 u8 buf[12];
2800 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2801 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2802 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2803 memcpy(buf + 4, l2cap_fixed_chan, 8);
2804 l2cap_send_cmd(conn, cmd->ident,
2805 L2CAP_INFO_RSP, sizeof(buf), buf);
2806 } else {
2807 struct l2cap_info_rsp rsp;
2808 rsp.type = cpu_to_le16(type);
2809 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2810 l2cap_send_cmd(conn, cmd->ident,
2811 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2812 }
2813
2814 return 0;
2815}
2816
2817static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2818{
2819 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2820 u16 type, result;
2821
2822 type = __le16_to_cpu(rsp->type);
2823 result = __le16_to_cpu(rsp->result);
2824
2825 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2826
2827 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2828 if (cmd->ident != conn->info_ident ||
2829 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2830 return 0;
2831
2832 del_timer(&conn->info_timer);
2833
2834 if (result != L2CAP_IR_SUCCESS) {
2835 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2836 conn->info_ident = 0;
2837
2838 l2cap_conn_start(conn);
2839
2840 return 0;
2841 }
2842
2843 if (type == L2CAP_IT_FEAT_MASK) {
2844 conn->feat_mask = get_unaligned_le32(rsp->data);
2845
2846 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2847 struct l2cap_info_req req;
2848 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2849
2850 conn->info_ident = l2cap_get_ident(conn);
2851
2852 l2cap_send_cmd(conn, conn->info_ident,
2853 L2CAP_INFO_REQ, sizeof(req), &req);
2854 } else {
2855 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2856 conn->info_ident = 0;
2857
2858 l2cap_conn_start(conn);
2859 }
2860 } else if (type == L2CAP_IT_FIXED_CHAN) {
2861 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2862 conn->info_ident = 0;
2863
2864 l2cap_conn_start(conn);
2865 }
2866
2867 return 0;
2868}
2869
2870static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2871 u16 to_multiplier)
2872{
2873 u16 max_latency;
2874
2875 if (min > max || min < 6 || max > 3200)
2876 return -EINVAL;
2877
2878 if (to_multiplier < 10 || to_multiplier > 3200)
2879 return -EINVAL;
2880
2881 if (max >= to_multiplier * 8)
2882 return -EINVAL;
2883
2884 max_latency = (to_multiplier * 8 / max) - 1;
2885 if (latency > 499 || latency > max_latency)
2886 return -EINVAL;
2887
2888 return 0;
2889}
2890
2891static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2892 struct l2cap_cmd_hdr *cmd, u8 *data)
2893{
2894 struct hci_conn *hcon = conn->hcon;
2895 struct l2cap_conn_param_update_req *req;
2896 struct l2cap_conn_param_update_rsp rsp;
2897 u16 min, max, latency, to_multiplier, cmd_len;
2898 int err;
2899
2900 if (!(hcon->link_mode & HCI_LM_MASTER))
2901 return -EINVAL;
2902
2903 cmd_len = __le16_to_cpu(cmd->len);
2904 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2905 return -EPROTO;
2906
2907 req = (struct l2cap_conn_param_update_req *) data;
2908 min = __le16_to_cpu(req->min);
2909 max = __le16_to_cpu(req->max);
2910 latency = __le16_to_cpu(req->latency);
2911 to_multiplier = __le16_to_cpu(req->to_multiplier);
2912
2913 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2914 min, max, latency, to_multiplier);
2915
2916 memset(&rsp, 0, sizeof(rsp));
2917
2918 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2919 if (err)
2920 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2921 else
2922 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2923
2924 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2925 sizeof(rsp), &rsp);
2926
2927 if (!err)
2928 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2929
2930 return 0;
2931}
2932
2933static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2934 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2935{
2936 int err = 0;
2937
2938 switch (cmd->code) {
2939 case L2CAP_COMMAND_REJ:
2940 l2cap_command_rej(conn, cmd, data);
2941 break;
2942
2943 case L2CAP_CONN_REQ:
2944 err = l2cap_connect_req(conn, cmd, data);
2945 break;
2946
2947 case L2CAP_CONN_RSP:
2948 err = l2cap_connect_rsp(conn, cmd, data);
2949 break;
2950
2951 case L2CAP_CONF_REQ:
2952 err = l2cap_config_req(conn, cmd, cmd_len, data);
2953 break;
2954
2955 case L2CAP_CONF_RSP:
2956 err = l2cap_config_rsp(conn, cmd, data);
2957 break;
2958
2959 case L2CAP_DISCONN_REQ:
2960 err = l2cap_disconnect_req(conn, cmd, data);
2961 break;
2962
2963 case L2CAP_DISCONN_RSP:
2964 err = l2cap_disconnect_rsp(conn, cmd, data);
2965 break;
2966
2967 case L2CAP_ECHO_REQ:
2968 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2969 break;
2970
2971 case L2CAP_ECHO_RSP:
2972 break;
2973
2974 case L2CAP_INFO_REQ:
2975 err = l2cap_information_req(conn, cmd, data);
2976 break;
2977
2978 case L2CAP_INFO_RSP:
2979 err = l2cap_information_rsp(conn, cmd, data);
2980 break;
2981
2982 default:
2983 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2984 err = -EINVAL;
2985 break;
2986 }
2987
2988 return err;
2989}
2990
2991static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2992 struct l2cap_cmd_hdr *cmd, u8 *data)
2993{
2994 switch (cmd->code) {
2995 case L2CAP_COMMAND_REJ:
2996 return 0;
2997
2998 case L2CAP_CONN_PARAM_UPDATE_REQ:
2999 return l2cap_conn_param_update_req(conn, cmd, data);
3000
3001 case L2CAP_CONN_PARAM_UPDATE_RSP:
3002 return 0;
3003
3004 default:
3005 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3006 return -EINVAL;
3007 }
3008}
3009
3010static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3011 struct sk_buff *skb)
3012{
3013 u8 *data = skb->data;
3014 int len = skb->len;
3015 struct l2cap_cmd_hdr cmd;
3016 int err;
3017
3018 l2cap_raw_recv(conn, skb);
3019
3020 while (len >= L2CAP_CMD_HDR_SIZE) {
3021 u16 cmd_len;
3022 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3023 data += L2CAP_CMD_HDR_SIZE;
3024 len -= L2CAP_CMD_HDR_SIZE;
3025
3026 cmd_len = le16_to_cpu(cmd.len);
3027
3028 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3029
3030 if (cmd_len > len || !cmd.ident) {
3031 BT_DBG("corrupted command");
3032 break;
3033 }
3034
3035 if (conn->hcon->type == LE_LINK)
3036 err = l2cap_le_sig_cmd(conn, &cmd, data);
3037 else
3038 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3039
3040 if (err) {
3041 struct l2cap_cmd_rej_unk rej;
3042
3043 BT_ERR("Wrong link type (%d)", err);
3044
3045 /* FIXME: Map err to a valid reason */
3046 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3047 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3048 }
3049
3050 data += cmd_len;
3051 len -= cmd_len;
3052 }
3053
3054 kfree_skb(skb);
3055}
3056
3057static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3058{
3059 u16 our_fcs, rcv_fcs;
3060 int hdr_size = L2CAP_HDR_SIZE + 2;
3061
3062 if (chan->fcs == L2CAP_FCS_CRC16) {
3063 skb_trim(skb, skb->len - 2);
3064 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3065 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3066
3067 if (our_fcs != rcv_fcs)
3068 return -EBADMSG;
3069 }
3070 return 0;
3071}
3072
3073static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3074{
3075 u16 control = 0;
3076
3077 chan->frames_sent = 0;
3078
3079 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3080
3081 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3082 control |= L2CAP_SUPER_RCV_NOT_READY;
3083 l2cap_send_sframe(chan, control);
3084 set_bit(CONN_RNR_SENT, &chan->conn_state);
3085 }
3086
3087 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3088 l2cap_retransmit_frames(chan);
3089
3090 l2cap_ertm_send(chan);
3091
3092 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3093 chan->frames_sent == 0) {
3094 control |= L2CAP_SUPER_RCV_READY;
3095 l2cap_send_sframe(chan, control);
3096 }
3097}
3098
3099static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3100{
3101 struct sk_buff *next_skb;
3102 int tx_seq_offset, next_tx_seq_offset;
3103
3104 bt_cb(skb)->tx_seq = tx_seq;
3105 bt_cb(skb)->sar = sar;
3106
3107 next_skb = skb_peek(&chan->srej_q);
3108 if (!next_skb) {
3109 __skb_queue_tail(&chan->srej_q, skb);
3110 return 0;
3111 }
3112
3113 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3114 if (tx_seq_offset < 0)
3115 tx_seq_offset += 64;
3116
3117 do {
3118 if (bt_cb(next_skb)->tx_seq == tx_seq)
3119 return -EINVAL;
3120
3121 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3122 chan->buffer_seq) % 64;
3123 if (next_tx_seq_offset < 0)
3124 next_tx_seq_offset += 64;
3125
3126 if (next_tx_seq_offset > tx_seq_offset) {
3127 __skb_queue_before(&chan->srej_q, next_skb, skb);
3128 return 0;
3129 }
3130
3131 if (skb_queue_is_last(&chan->srej_q, next_skb))
3132 break;
3133
3134 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3135
3136 __skb_queue_tail(&chan->srej_q, skb);
3137
3138 return 0;
3139}
3140
3141static void append_skb_frag(struct sk_buff *skb,
3142 struct sk_buff *new_frag, struct sk_buff **last_frag)
3143{
3144 /* skb->len reflects data in skb as well as all fragments
3145 * skb->data_len reflects only data in fragments
3146 */
3147 if (!skb_has_frag_list(skb))
3148 skb_shinfo(skb)->frag_list = new_frag;
3149
3150 new_frag->next = NULL;
3151
3152 (*last_frag)->next = new_frag;
3153 *last_frag = new_frag;
3154
3155 skb->len += new_frag->len;
3156 skb->data_len += new_frag->len;
3157 skb->truesize += new_frag->truesize;
3158}
3159
3160static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3161{
3162 int err = -EINVAL;
3163
3164 switch (control & L2CAP_CTRL_SAR) {
3165 case L2CAP_SDU_UNSEGMENTED:
3166 if (chan->sdu)
3167 break;
3168
3169 err = chan->ops->recv(chan->data, skb);
3170 break;
3171
3172 case L2CAP_SDU_START:
3173 if (chan->sdu)
3174 break;
3175
3176 chan->sdu_len = get_unaligned_le16(skb->data);
3177 skb_pull(skb, 2);
3178
3179 if (chan->sdu_len > chan->imtu) {
3180 err = -EMSGSIZE;
3181 break;
3182 }
3183
3184 if (skb->len >= chan->sdu_len)
3185 break;
3186
3187 chan->sdu = skb;
3188 chan->sdu_last_frag = skb;
3189
3190 skb = NULL;
3191 err = 0;
3192 break;
3193
3194 case L2CAP_SDU_CONTINUE:
3195 if (!chan->sdu)
3196 break;
3197
3198 append_skb_frag(chan->sdu, skb,
3199 &chan->sdu_last_frag);
3200 skb = NULL;
3201
3202 if (chan->sdu->len >= chan->sdu_len)
3203 break;
3204
3205 err = 0;
3206 break;
3207
3208 case L2CAP_SDU_END:
3209 if (!chan->sdu)
3210 break;
3211
3212 append_skb_frag(chan->sdu, skb,
3213 &chan->sdu_last_frag);
3214 skb = NULL;
3215
3216 if (chan->sdu->len != chan->sdu_len)
3217 break;
3218
3219 err = chan->ops->recv(chan->data, chan->sdu);
3220
3221 if (!err) {
3222 /* Reassembly complete */
3223 chan->sdu = NULL;
3224 chan->sdu_last_frag = NULL;
3225 chan->sdu_len = 0;
3226 }
3227 break;
3228 }
3229
3230 if (err) {
3231 kfree_skb(skb);
3232 kfree_skb(chan->sdu);
3233 chan->sdu = NULL;
3234 chan->sdu_last_frag = NULL;
3235 chan->sdu_len = 0;
3236 }
3237
3238 return err;
3239}
3240
3241static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3242{
3243 u16 control;
3244
3245 BT_DBG("chan %p, Enter local busy", chan);
3246
3247 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3248
3249 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3250 control |= L2CAP_SUPER_RCV_NOT_READY;
3251 l2cap_send_sframe(chan, control);
3252
3253 set_bit(CONN_RNR_SENT, &chan->conn_state);
3254
3255 __clear_ack_timer(chan);
3256}
3257
3258static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3259{
3260 u16 control;
3261
3262 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3263 goto done;
3264
3265 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3266 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3267 l2cap_send_sframe(chan, control);
3268 chan->retry_count = 1;
3269
3270 __clear_retrans_timer(chan);
3271 __set_monitor_timer(chan);
3272
3273 set_bit(CONN_WAIT_F, &chan->conn_state);
3274
3275done:
3276 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3277 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3278
3279 BT_DBG("chan %p, Exit local busy", chan);
3280}
3281
3282void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3283{
3284 if (chan->mode == L2CAP_MODE_ERTM) {
3285 if (busy)
3286 l2cap_ertm_enter_local_busy(chan);
3287 else
3288 l2cap_ertm_exit_local_busy(chan);
3289 }
3290}
3291
3292static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3293{
3294 struct sk_buff *skb;
3295 u16 control;
3296
3297 while ((skb = skb_peek(&chan->srej_q)) &&
3298 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3299 int err;
3300
3301 if (bt_cb(skb)->tx_seq != tx_seq)
3302 break;
3303
3304 skb = skb_dequeue(&chan->srej_q);
3305 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3306 err = l2cap_reassemble_sdu(chan, skb, control);
3307
3308 if (err < 0) {
3309 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3310 break;
3311 }
3312
3313 chan->buffer_seq_srej =
3314 (chan->buffer_seq_srej + 1) % 64;
3315 tx_seq = (tx_seq + 1) % 64;
3316 }
3317}
3318
3319static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3320{
3321 struct srej_list *l, *tmp;
3322 u16 control;
3323
3324 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3325 if (l->tx_seq == tx_seq) {
3326 list_del(&l->list);
3327 kfree(l);
3328 return;
3329 }
3330 control = L2CAP_SUPER_SELECT_REJECT;
3331 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3332 l2cap_send_sframe(chan, control);
3333 list_del(&l->list);
3334 list_add_tail(&l->list, &chan->srej_l);
3335 }
3336}
3337
3338static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3339{
3340 struct srej_list *new;
3341 u16 control;
3342
3343 while (tx_seq != chan->expected_tx_seq) {
3344 control = L2CAP_SUPER_SELECT_REJECT;
3345 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3346 l2cap_send_sframe(chan, control);
3347
3348 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3349 new->tx_seq = chan->expected_tx_seq;
3350 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3351 list_add_tail(&new->list, &chan->srej_l);
3352 }
3353 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3354}
3355
3356static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3357{
3358 u8 tx_seq = __get_txseq(rx_control);
3359 u8 req_seq = __get_reqseq(rx_control);
3360 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3361 int tx_seq_offset, expected_tx_seq_offset;
3362 int num_to_ack = (chan->tx_win/6) + 1;
3363 int err = 0;
3364
3365 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3366 tx_seq, rx_control);
3367
3368 if (L2CAP_CTRL_FINAL & rx_control &&
3369 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3370 __clear_monitor_timer(chan);
3371 if (chan->unacked_frames > 0)
3372 __set_retrans_timer(chan);
3373 clear_bit(CONN_WAIT_F, &chan->conn_state);
3374 }
3375
3376 chan->expected_ack_seq = req_seq;
3377 l2cap_drop_acked_frames(chan);
3378
3379 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3380 if (tx_seq_offset < 0)
3381 tx_seq_offset += 64;
3382
3383 /* invalid tx_seq */
3384 if (tx_seq_offset >= chan->tx_win) {
3385 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3386 goto drop;
3387 }
3388
3389 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3390 goto drop;
3391
3392 if (tx_seq == chan->expected_tx_seq)
3393 goto expected;
3394
3395 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3396 struct srej_list *first;
3397
3398 first = list_first_entry(&chan->srej_l,
3399 struct srej_list, list);
3400 if (tx_seq == first->tx_seq) {
3401 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3402 l2cap_check_srej_gap(chan, tx_seq);
3403
3404 list_del(&first->list);
3405 kfree(first);
3406
3407 if (list_empty(&chan->srej_l)) {
3408 chan->buffer_seq = chan->buffer_seq_srej;
3409 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3410 l2cap_send_ack(chan);
3411 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3412 }
3413 } else {
3414 struct srej_list *l;
3415
3416 /* duplicated tx_seq */
3417 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3418 goto drop;
3419
3420 list_for_each_entry(l, &chan->srej_l, list) {
3421 if (l->tx_seq == tx_seq) {
3422 l2cap_resend_srejframe(chan, tx_seq);
3423 return 0;
3424 }
3425 }
3426 l2cap_send_srejframe(chan, tx_seq);
3427 }
3428 } else {
3429 expected_tx_seq_offset =
3430 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3431 if (expected_tx_seq_offset < 0)
3432 expected_tx_seq_offset += 64;
3433
3434 /* duplicated tx_seq */
3435 if (tx_seq_offset < expected_tx_seq_offset)
3436 goto drop;
3437
3438 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3439
3440 BT_DBG("chan %p, Enter SREJ", chan);
3441
3442 INIT_LIST_HEAD(&chan->srej_l);
3443 chan->buffer_seq_srej = chan->buffer_seq;
3444
3445 __skb_queue_head_init(&chan->srej_q);
3446 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3447
3448 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3449
3450 l2cap_send_srejframe(chan, tx_seq);
3451
3452 __clear_ack_timer(chan);
3453 }
3454 return 0;
3455
3456expected:
3457 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3458
3459 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3460 bt_cb(skb)->tx_seq = tx_seq;
3461 bt_cb(skb)->sar = sar;
3462 __skb_queue_tail(&chan->srej_q, skb);
3463 return 0;
3464 }
3465
3466 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3467 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3468 if (err < 0) {
3469 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3470 return err;
3471 }
3472
3473 if (rx_control & L2CAP_CTRL_FINAL) {
3474 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3475 l2cap_retransmit_frames(chan);
3476 }
3477
3478 __set_ack_timer(chan);
3479
3480 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3481 if (chan->num_acked == num_to_ack - 1)
3482 l2cap_send_ack(chan);
3483
3484 return 0;
3485
3486drop:
3487 kfree_skb(skb);
3488 return 0;
3489}
3490
3491static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3492{
3493 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3494 rx_control);
3495
3496 chan->expected_ack_seq = __get_reqseq(rx_control);
3497 l2cap_drop_acked_frames(chan);
3498
3499 if (rx_control & L2CAP_CTRL_POLL) {
3500 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3501 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3502 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3503 (chan->unacked_frames > 0))
3504 __set_retrans_timer(chan);
3505
3506 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3507 l2cap_send_srejtail(chan);
3508 } else {
3509 l2cap_send_i_or_rr_or_rnr(chan);
3510 }
3511
3512 } else if (rx_control & L2CAP_CTRL_FINAL) {
3513 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3514
3515 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3516 l2cap_retransmit_frames(chan);
3517
3518 } else {
3519 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3520 (chan->unacked_frames > 0))
3521 __set_retrans_timer(chan);
3522
3523 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3524 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3525 l2cap_send_ack(chan);
3526 else
3527 l2cap_ertm_send(chan);
3528 }
3529}
3530
3531static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3532{
3533 u8 tx_seq = __get_reqseq(rx_control);
3534
3535 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3536
3537 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3538
3539 chan->expected_ack_seq = tx_seq;
3540 l2cap_drop_acked_frames(chan);
3541
3542 if (rx_control & L2CAP_CTRL_FINAL) {
3543 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3544 l2cap_retransmit_frames(chan);
3545 } else {
3546 l2cap_retransmit_frames(chan);
3547
3548 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3549 set_bit(CONN_REJ_ACT, &chan->conn_state);
3550 }
3551}
3552static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3553{
3554 u8 tx_seq = __get_reqseq(rx_control);
3555
3556 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3557
3558 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3559
3560 if (rx_control & L2CAP_CTRL_POLL) {
3561 chan->expected_ack_seq = tx_seq;
3562 l2cap_drop_acked_frames(chan);
3563
3564 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3565 l2cap_retransmit_one_frame(chan, tx_seq);
3566
3567 l2cap_ertm_send(chan);
3568
3569 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3570 chan->srej_save_reqseq = tx_seq;
3571 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3572 }
3573 } else if (rx_control & L2CAP_CTRL_FINAL) {
3574 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3575 chan->srej_save_reqseq == tx_seq)
3576 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3577 else
3578 l2cap_retransmit_one_frame(chan, tx_seq);
3579 } else {
3580 l2cap_retransmit_one_frame(chan, tx_seq);
3581 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3582 chan->srej_save_reqseq = tx_seq;
3583 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3584 }
3585 }
3586}
3587
3588static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3589{
3590 u8 tx_seq = __get_reqseq(rx_control);
3591
3592 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3593
3594 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3595 chan->expected_ack_seq = tx_seq;
3596 l2cap_drop_acked_frames(chan);
3597
3598 if (rx_control & L2CAP_CTRL_POLL)
3599 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3600
3601 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3602 __clear_retrans_timer(chan);
3603 if (rx_control & L2CAP_CTRL_POLL)
3604 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3605 return;
3606 }
3607
3608 if (rx_control & L2CAP_CTRL_POLL)
3609 l2cap_send_srejtail(chan);
3610 else
3611 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3612}
3613
3614static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3615{
3616 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3617
3618 if (L2CAP_CTRL_FINAL & rx_control &&
3619 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3620 __clear_monitor_timer(chan);
3621 if (chan->unacked_frames > 0)
3622 __set_retrans_timer(chan);
3623 clear_bit(CONN_WAIT_F, &chan->conn_state);
3624 }
3625
3626 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3627 case L2CAP_SUPER_RCV_READY:
3628 l2cap_data_channel_rrframe(chan, rx_control);
3629 break;
3630
3631 case L2CAP_SUPER_REJECT:
3632 l2cap_data_channel_rejframe(chan, rx_control);
3633 break;
3634
3635 case L2CAP_SUPER_SELECT_REJECT:
3636 l2cap_data_channel_srejframe(chan, rx_control);
3637 break;
3638
3639 case L2CAP_SUPER_RCV_NOT_READY:
3640 l2cap_data_channel_rnrframe(chan, rx_control);
3641 break;
3642 }
3643
3644 kfree_skb(skb);
3645 return 0;
3646}
3647
3648static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3649{
3650 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3651 u16 control;
3652 u8 req_seq;
3653 int len, next_tx_seq_offset, req_seq_offset;
3654
3655 control = get_unaligned_le16(skb->data);
3656 skb_pull(skb, 2);
3657 len = skb->len;
3658
3659 /*
3660 * We can just drop the corrupted I-frame here.
3661 * Receiver will miss it and start proper recovery
3662 * procedures and ask retransmission.
3663 */
3664 if (l2cap_check_fcs(chan, skb))
3665 goto drop;
3666
3667 if (__is_sar_start(control) && __is_iframe(control))
3668 len -= 2;
3669
3670 if (chan->fcs == L2CAP_FCS_CRC16)
3671 len -= 2;
3672
3673 if (len > chan->mps) {
3674 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3675 goto drop;
3676 }
3677
3678 req_seq = __get_reqseq(control);
3679 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3680 if (req_seq_offset < 0)
3681 req_seq_offset += 64;
3682
3683 next_tx_seq_offset =
3684 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3685 if (next_tx_seq_offset < 0)
3686 next_tx_seq_offset += 64;
3687
3688 /* check for invalid req-seq */
3689 if (req_seq_offset > next_tx_seq_offset) {
3690 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3691 goto drop;
3692 }
3693
3694 if (__is_iframe(control)) {
3695 if (len < 0) {
3696 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3697 goto drop;
3698 }
3699
3700 l2cap_data_channel_iframe(chan, control, skb);
3701 } else {
3702 if (len != 0) {
3703 BT_ERR("%d", len);
3704 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3705 goto drop;
3706 }
3707
3708 l2cap_data_channel_sframe(chan, control, skb);
3709 }
3710
3711 return 0;
3712
3713drop:
3714 kfree_skb(skb);
3715 return 0;
3716}
3717
3718static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3719{
3720 struct l2cap_chan *chan;
3721 struct sock *sk = NULL;
3722 u16 control;
3723 u8 tx_seq;
3724 int len;
3725
3726 chan = l2cap_get_chan_by_scid(conn, cid);
3727 if (!chan) {
3728 BT_DBG("unknown cid 0x%4.4x", cid);
3729 goto drop;
3730 }
3731
3732 sk = chan->sk;
3733
3734 BT_DBG("chan %p, len %d", chan, skb->len);
3735
3736 if (chan->state != BT_CONNECTED)
3737 goto drop;
3738
3739 switch (chan->mode) {
3740 case L2CAP_MODE_BASIC:
3741 /* If socket recv buffers overflows we drop data here
3742 * which is *bad* because L2CAP has to be reliable.
3743 * But we don't have any other choice. L2CAP doesn't
3744 * provide flow control mechanism. */
3745
3746 if (chan->imtu < skb->len)
3747 goto drop;
3748
3749 if (!chan->ops->recv(chan->data, skb))
3750 goto done;
3751 break;
3752
3753 case L2CAP_MODE_ERTM:
3754 if (!sock_owned_by_user(sk)) {
3755 l2cap_ertm_data_rcv(sk, skb);
3756 } else {
3757 if (sk_add_backlog(sk, skb))
3758 goto drop;
3759 }
3760
3761 goto done;
3762
3763 case L2CAP_MODE_STREAMING:
3764 control = get_unaligned_le16(skb->data);
3765 skb_pull(skb, 2);
3766 len = skb->len;
3767
3768 if (l2cap_check_fcs(chan, skb))
3769 goto drop;
3770
3771 if (__is_sar_start(control))
3772 len -= 2;
3773
3774 if (chan->fcs == L2CAP_FCS_CRC16)
3775 len -= 2;
3776
3777 if (len > chan->mps || len < 0 || __is_sframe(control))
3778 goto drop;
3779
3780 tx_seq = __get_txseq(control);
3781
3782 if (chan->expected_tx_seq != tx_seq) {
3783 /* Frame(s) missing - must discard partial SDU */
3784 kfree_skb(chan->sdu);
3785 chan->sdu = NULL;
3786 chan->sdu_last_frag = NULL;
3787 chan->sdu_len = 0;
3788
3789 /* TODO: Notify userland of missing data */
3790 }
3791
3792 chan->expected_tx_seq = (tx_seq + 1) % 64;
3793
3794 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3795 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3796
3797 goto done;
3798
3799 default:
3800 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3801 break;
3802 }
3803
3804drop:
3805 kfree_skb(skb);
3806
3807done:
3808 if (sk)
3809 bh_unlock_sock(sk);
3810
3811 return 0;
3812}
3813
3814static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3815{
3816 struct sock *sk = NULL;
3817 struct l2cap_chan *chan;
3818
3819 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3820 if (!chan)
3821 goto drop;
3822
3823 sk = chan->sk;
3824
3825 bh_lock_sock(sk);
3826
3827 BT_DBG("sk %p, len %d", sk, skb->len);
3828
3829 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3830 goto drop;
3831
3832 if (chan->imtu < skb->len)
3833 goto drop;
3834
3835 if (!chan->ops->recv(chan->data, skb))
3836 goto done;
3837
3838drop:
3839 kfree_skb(skb);
3840
3841done:
3842 if (sk)
3843 bh_unlock_sock(sk);
3844 return 0;
3845}
3846
3847static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3848{
3849 struct sock *sk = NULL;
3850 struct l2cap_chan *chan;
3851
3852 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3853 if (!chan)
3854 goto drop;
3855
3856 sk = chan->sk;
3857
3858 bh_lock_sock(sk);
3859
3860 BT_DBG("sk %p, len %d", sk, skb->len);
3861
3862 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3863 goto drop;
3864
3865 if (chan->imtu < skb->len)
3866 goto drop;
3867
3868 if (!chan->ops->recv(chan->data, skb))
3869 goto done;
3870
3871drop:
3872 kfree_skb(skb);
3873
3874done:
3875 if (sk)
3876 bh_unlock_sock(sk);
3877 return 0;
3878}
3879
3880static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3881{
3882 struct l2cap_hdr *lh = (void *) skb->data;
3883 u16 cid, len;
3884 __le16 psm;
3885
3886 skb_pull(skb, L2CAP_HDR_SIZE);
3887 cid = __le16_to_cpu(lh->cid);
3888 len = __le16_to_cpu(lh->len);
3889
3890 if (len != skb->len) {
3891 kfree_skb(skb);
3892 return;
3893 }
3894
3895 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3896
3897 switch (cid) {
3898 case L2CAP_CID_LE_SIGNALING:
3899 case L2CAP_CID_SIGNALING:
3900 l2cap_sig_channel(conn, skb);
3901 break;
3902
3903 case L2CAP_CID_CONN_LESS:
3904 psm = get_unaligned_le16(skb->data);
3905 skb_pull(skb, 2);
3906 l2cap_conless_channel(conn, psm, skb);
3907 break;
3908
3909 case L2CAP_CID_LE_DATA:
3910 l2cap_att_channel(conn, cid, skb);
3911 break;
3912
3913 case L2CAP_CID_SMP:
3914 if (smp_sig_channel(conn, skb))
3915 l2cap_conn_del(conn->hcon, EACCES);
3916 break;
3917
3918 default:
3919 l2cap_data_channel(conn, cid, skb);
3920 break;
3921 }
3922}
3923
3924/* ---- L2CAP interface with lower layer (HCI) ---- */
3925
3926static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3927{
3928 int exact = 0, lm1 = 0, lm2 = 0;
3929 struct l2cap_chan *c;
3930
3931 if (type != ACL_LINK)
3932 return -EINVAL;
3933
3934 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3935
3936 /* Find listening sockets and check their link_mode */
3937 read_lock(&chan_list_lock);
3938 list_for_each_entry(c, &chan_list, global_l) {
3939 struct sock *sk = c->sk;
3940
3941 if (c->state != BT_LISTEN)
3942 continue;
3943
3944 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3945 lm1 |= HCI_LM_ACCEPT;
3946 if (c->role_switch)
3947 lm1 |= HCI_LM_MASTER;
3948 exact++;
3949 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3950 lm2 |= HCI_LM_ACCEPT;
3951 if (c->role_switch)
3952 lm2 |= HCI_LM_MASTER;
3953 }
3954 }
3955 read_unlock(&chan_list_lock);
3956
3957 return exact ? lm1 : lm2;
3958}
3959
3960static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3961{
3962 struct l2cap_conn *conn;
3963
3964 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3965
3966 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3967 return -EINVAL;
3968
3969 if (!status) {
3970 conn = l2cap_conn_add(hcon, status);
3971 if (conn)
3972 l2cap_conn_ready(conn);
3973 } else
3974 l2cap_conn_del(hcon, bt_to_errno(status));
3975
3976 return 0;
3977}
3978
3979static int l2cap_disconn_ind(struct hci_conn *hcon)
3980{
3981 struct l2cap_conn *conn = hcon->l2cap_data;
3982
3983 BT_DBG("hcon %p", hcon);
3984
3985 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
3986 return 0x13;
3987
3988 return conn->disc_reason;
3989}
3990
3991static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3992{
3993 BT_DBG("hcon %p reason %d", hcon, reason);
3994
3995 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3996 return -EINVAL;
3997
3998 l2cap_conn_del(hcon, bt_to_errno(reason));
3999
4000 return 0;
4001}
4002
4003static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4004{
4005 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4006 return;
4007
4008 if (encrypt == 0x00) {
4009 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4010 __clear_chan_timer(chan);
4011 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4012 } else if (chan->sec_level == BT_SECURITY_HIGH)
4013 l2cap_chan_close(chan, ECONNREFUSED);
4014 } else {
4015 if (chan->sec_level == BT_SECURITY_MEDIUM)
4016 __clear_chan_timer(chan);
4017 }
4018}
4019
4020static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4021{
4022 struct l2cap_conn *conn = hcon->l2cap_data;
4023 struct l2cap_chan *chan;
4024
4025 if (!conn)
4026 return 0;
4027
4028 BT_DBG("conn %p", conn);
4029
4030 if (hcon->type == LE_LINK) {
4031 smp_distribute_keys(conn, 0);
4032 del_timer(&conn->security_timer);
4033 }
4034
4035 read_lock(&conn->chan_lock);
4036
4037 list_for_each_entry(chan, &conn->chan_l, list) {
4038 struct sock *sk = chan->sk;
4039
4040 bh_lock_sock(sk);
4041
4042 BT_DBG("chan->scid %d", chan->scid);
4043
4044 if (chan->scid == L2CAP_CID_LE_DATA) {
4045 if (!status && encrypt) {
4046 chan->sec_level = hcon->sec_level;
4047 l2cap_chan_ready(sk);
4048 }
4049
4050 bh_unlock_sock(sk);
4051 continue;
4052 }
4053
4054 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4055 bh_unlock_sock(sk);
4056 continue;
4057 }
4058
4059 if (!status && (chan->state == BT_CONNECTED ||
4060 chan->state == BT_CONFIG)) {
4061 l2cap_check_encryption(chan, encrypt);
4062 bh_unlock_sock(sk);
4063 continue;
4064 }
4065
4066 if (chan->state == BT_CONNECT) {
4067 if (!status) {
4068 struct l2cap_conn_req req;
4069 req.scid = cpu_to_le16(chan->scid);
4070 req.psm = chan->psm;
4071
4072 chan->ident = l2cap_get_ident(conn);
4073 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4074
4075 l2cap_send_cmd(conn, chan->ident,
4076 L2CAP_CONN_REQ, sizeof(req), &req);
4077 } else {
4078 __clear_chan_timer(chan);
4079 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4080 }
4081 } else if (chan->state == BT_CONNECT2) {
4082 struct l2cap_conn_rsp rsp;
4083 __u16 res, stat;
4084
4085 if (!status) {
4086 if (bt_sk(sk)->defer_setup) {
4087 struct sock *parent = bt_sk(sk)->parent;
4088 res = L2CAP_CR_PEND;
4089 stat = L2CAP_CS_AUTHOR_PEND;
4090 if (parent)
4091 parent->sk_data_ready(parent, 0);
4092 } else {
4093 l2cap_state_change(chan, BT_CONFIG);
4094 res = L2CAP_CR_SUCCESS;
4095 stat = L2CAP_CS_NO_INFO;
4096 }
4097 } else {
4098 l2cap_state_change(chan, BT_DISCONN);
4099 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4100 res = L2CAP_CR_SEC_BLOCK;
4101 stat = L2CAP_CS_NO_INFO;
4102 }
4103
4104 rsp.scid = cpu_to_le16(chan->dcid);
4105 rsp.dcid = cpu_to_le16(chan->scid);
4106 rsp.result = cpu_to_le16(res);
4107 rsp.status = cpu_to_le16(stat);
4108 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4109 sizeof(rsp), &rsp);
4110 }
4111
4112 bh_unlock_sock(sk);
4113 }
4114
4115 read_unlock(&conn->chan_lock);
4116
4117 return 0;
4118}
4119
4120static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4121{
4122 struct l2cap_conn *conn = hcon->l2cap_data;
4123
4124 if (!conn)
4125 conn = l2cap_conn_add(hcon, 0);
4126
4127 if (!conn)
4128 goto drop;
4129
4130 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4131
4132 if (!(flags & ACL_CONT)) {
4133 struct l2cap_hdr *hdr;
4134 struct l2cap_chan *chan;
4135 u16 cid;
4136 int len;
4137
4138 if (conn->rx_len) {
4139 BT_ERR("Unexpected start frame (len %d)", skb->len);
4140 kfree_skb(conn->rx_skb);
4141 conn->rx_skb = NULL;
4142 conn->rx_len = 0;
4143 l2cap_conn_unreliable(conn, ECOMM);
4144 }
4145
4146 /* Start fragment always begin with Basic L2CAP header */
4147 if (skb->len < L2CAP_HDR_SIZE) {
4148 BT_ERR("Frame is too short (len %d)", skb->len);
4149 l2cap_conn_unreliable(conn, ECOMM);
4150 goto drop;
4151 }
4152
4153 hdr = (struct l2cap_hdr *) skb->data;
4154 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4155 cid = __le16_to_cpu(hdr->cid);
4156
4157 if (len == skb->len) {
4158 /* Complete frame received */
4159 l2cap_recv_frame(conn, skb);
4160 return 0;
4161 }
4162
4163 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4164
4165 if (skb->len > len) {
4166 BT_ERR("Frame is too long (len %d, expected len %d)",
4167 skb->len, len);
4168 l2cap_conn_unreliable(conn, ECOMM);
4169 goto drop;
4170 }
4171
4172 chan = l2cap_get_chan_by_scid(conn, cid);
4173
4174 if (chan && chan->sk) {
4175 struct sock *sk = chan->sk;
4176
4177 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4178 BT_ERR("Frame exceeding recv MTU (len %d, "
4179 "MTU %d)", len,
4180 chan->imtu);
4181 bh_unlock_sock(sk);
4182 l2cap_conn_unreliable(conn, ECOMM);
4183 goto drop;
4184 }
4185 bh_unlock_sock(sk);
4186 }
4187
4188 /* Allocate skb for the complete frame (with header) */
4189 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4190 if (!conn->rx_skb)
4191 goto drop;
4192
4193 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4194 skb->len);
4195 conn->rx_len = len - skb->len;
4196 } else {
4197 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4198
4199 if (!conn->rx_len) {
4200 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4201 l2cap_conn_unreliable(conn, ECOMM);
4202 goto drop;
4203 }
4204
4205 if (skb->len > conn->rx_len) {
4206 BT_ERR("Fragment is too long (len %d, expected %d)",
4207 skb->len, conn->rx_len);
4208 kfree_skb(conn->rx_skb);
4209 conn->rx_skb = NULL;
4210 conn->rx_len = 0;
4211 l2cap_conn_unreliable(conn, ECOMM);
4212 goto drop;
4213 }
4214
4215 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4216 skb->len);
4217 conn->rx_len -= skb->len;
4218
4219 if (!conn->rx_len) {
4220 /* Complete frame received */
4221 l2cap_recv_frame(conn, conn->rx_skb);
4222 conn->rx_skb = NULL;
4223 }
4224 }
4225
4226drop:
4227 kfree_skb(skb);
4228 return 0;
4229}
4230
4231static int l2cap_debugfs_show(struct seq_file *f, void *p)
4232{
4233 struct l2cap_chan *c;
4234
4235 read_lock_bh(&chan_list_lock);
4236
4237 list_for_each_entry(c, &chan_list, global_l) {
4238 struct sock *sk = c->sk;
4239
4240 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4241 batostr(&bt_sk(sk)->src),
4242 batostr(&bt_sk(sk)->dst),
4243 c->state, __le16_to_cpu(c->psm),
4244 c->scid, c->dcid, c->imtu, c->omtu,
4245 c->sec_level, c->mode);
4246}
4247
4248 read_unlock_bh(&chan_list_lock);
4249
4250 return 0;
4251}
4252
4253static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4254{
4255 return single_open(file, l2cap_debugfs_show, inode->i_private);
4256}
4257
4258static const struct file_operations l2cap_debugfs_fops = {
4259 .open = l2cap_debugfs_open,
4260 .read = seq_read,
4261 .llseek = seq_lseek,
4262 .release = single_release,
4263};
4264
4265static struct dentry *l2cap_debugfs;
4266
4267static struct hci_proto l2cap_hci_proto = {
4268 .name = "L2CAP",
4269 .id = HCI_PROTO_L2CAP,
4270 .connect_ind = l2cap_connect_ind,
4271 .connect_cfm = l2cap_connect_cfm,
4272 .disconn_ind = l2cap_disconn_ind,
4273 .disconn_cfm = l2cap_disconn_cfm,
4274 .security_cfm = l2cap_security_cfm,
4275 .recv_acldata = l2cap_recv_acldata
4276};
4277
4278int __init l2cap_init(void)
4279{
4280 int err;
4281
4282 err = l2cap_init_sockets();
4283 if (err < 0)
4284 return err;
4285
4286 err = hci_register_proto(&l2cap_hci_proto);
4287 if (err < 0) {
4288 BT_ERR("L2CAP protocol registration failed");
4289 bt_sock_unregister(BTPROTO_L2CAP);
4290 goto error;
4291 }
4292
4293 if (bt_debugfs) {
4294 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4295 bt_debugfs, NULL, &l2cap_debugfs_fops);
4296 if (!l2cap_debugfs)
4297 BT_ERR("Failed to create L2CAP debug file");
4298 }
4299
4300 return 0;
4301
4302error:
4303 l2cap_cleanup_sockets();
4304 return err;
4305}
4306
4307void l2cap_exit(void)
4308{
4309 debugfs_remove(l2cap_debugfs);
4310
4311 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4312 BT_ERR("L2CAP protocol unregistration failed");
4313
4314 l2cap_cleanup_sockets();
4315}
4316
4317module_param(disable_ertm, bool, 0644);
4318MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");