Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27*/
28
29/* Bluetooth L2CAP core. */
30
31#include <linux/module.h>
32
33#include <linux/debugfs.h>
34#include <linux/crc16.h>
35#include <linux/filter.h>
36
37#include <net/bluetooth/bluetooth.h>
38#include <net/bluetooth/hci_core.h>
39#include <net/bluetooth/l2cap.h>
40
41#include "smp.h"
42#include "a2mp.h"
43#include "amp.h"
44
45#define LE_FLOWCTL_MAX_CREDITS 65535
46
47bool disable_ertm;
48
49static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50
51static LIST_HEAD(chan_list);
52static DEFINE_RWLOCK(chan_list_lock);
53
54static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
55 u8 code, u8 ident, u16 dlen, void *data);
56static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
57 void *data);
58static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
59static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
60
61static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
62 struct sk_buff_head *skbs, u8 event);
63
64static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
65{
66 if (link_type == LE_LINK) {
67 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
68 return BDADDR_LE_PUBLIC;
69 else
70 return BDADDR_LE_RANDOM;
71 }
72
73 return BDADDR_BREDR;
74}
75
76static inline u8 bdaddr_src_type(struct hci_conn *hcon)
77{
78 return bdaddr_type(hcon->type, hcon->src_type);
79}
80
81static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
82{
83 return bdaddr_type(hcon->type, hcon->dst_type);
84}
85
86/* ---- L2CAP channels ---- */
87
88static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
89 u16 cid)
90{
91 struct l2cap_chan *c;
92
93 list_for_each_entry(c, &conn->chan_l, list) {
94 if (c->dcid == cid)
95 return c;
96 }
97 return NULL;
98}
99
100static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
101 u16 cid)
102{
103 struct l2cap_chan *c;
104
105 list_for_each_entry(c, &conn->chan_l, list) {
106 if (c->scid == cid)
107 return c;
108 }
109 return NULL;
110}
111
112/* Find channel with given SCID.
113 * Returns locked channel. */
114static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
115 u16 cid)
116{
117 struct l2cap_chan *c;
118
119 mutex_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
121 if (c)
122 l2cap_chan_lock(c);
123 mutex_unlock(&conn->chan_lock);
124
125 return c;
126}
127
128/* Find channel with given DCID.
129 * Returns locked channel.
130 */
131static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
132 u16 cid)
133{
134 struct l2cap_chan *c;
135
136 mutex_lock(&conn->chan_lock);
137 c = __l2cap_get_chan_by_dcid(conn, cid);
138 if (c)
139 l2cap_chan_lock(c);
140 mutex_unlock(&conn->chan_lock);
141
142 return c;
143}
144
145static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
146 u8 ident)
147{
148 struct l2cap_chan *c;
149
150 list_for_each_entry(c, &conn->chan_l, list) {
151 if (c->ident == ident)
152 return c;
153 }
154 return NULL;
155}
156
157static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
158 u8 ident)
159{
160 struct l2cap_chan *c;
161
162 mutex_lock(&conn->chan_lock);
163 c = __l2cap_get_chan_by_ident(conn, ident);
164 if (c)
165 l2cap_chan_lock(c);
166 mutex_unlock(&conn->chan_lock);
167
168 return c;
169}
170
171static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
172{
173 struct l2cap_chan *c;
174
175 list_for_each_entry(c, &chan_list, global_l) {
176 if (c->sport == psm && !bacmp(&c->src, src))
177 return c;
178 }
179 return NULL;
180}
181
182int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183{
184 int err;
185
186 write_lock(&chan_list_lock);
187
188 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 err = -EADDRINUSE;
190 goto done;
191 }
192
193 if (psm) {
194 chan->psm = psm;
195 chan->sport = psm;
196 err = 0;
197 } else {
198 u16 p, start, end, incr;
199
200 if (chan->src_type == BDADDR_BREDR) {
201 start = L2CAP_PSM_DYN_START;
202 end = L2CAP_PSM_AUTO_END;
203 incr = 2;
204 } else {
205 start = L2CAP_PSM_LE_DYN_START;
206 end = L2CAP_PSM_LE_DYN_END;
207 incr = 1;
208 }
209
210 err = -EINVAL;
211 for (p = start; p <= end; p += incr)
212 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
213 chan->psm = cpu_to_le16(p);
214 chan->sport = cpu_to_le16(p);
215 err = 0;
216 break;
217 }
218 }
219
220done:
221 write_unlock(&chan_list_lock);
222 return err;
223}
224EXPORT_SYMBOL_GPL(l2cap_add_psm);
225
226int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
227{
228 write_lock(&chan_list_lock);
229
230 /* Override the defaults (which are for conn-oriented) */
231 chan->omtu = L2CAP_DEFAULT_MTU;
232 chan->chan_type = L2CAP_CHAN_FIXED;
233
234 chan->scid = scid;
235
236 write_unlock(&chan_list_lock);
237
238 return 0;
239}
240
241static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
242{
243 u16 cid, dyn_end;
244
245 if (conn->hcon->type == LE_LINK)
246 dyn_end = L2CAP_CID_LE_DYN_END;
247 else
248 dyn_end = L2CAP_CID_DYN_END;
249
250 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
251 if (!__l2cap_get_chan_by_scid(conn, cid))
252 return cid;
253 }
254
255 return 0;
256}
257
258static void l2cap_state_change(struct l2cap_chan *chan, int state)
259{
260 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
261 state_to_string(state));
262
263 chan->state = state;
264 chan->ops->state_change(chan, state, 0);
265}
266
267static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
268 int state, int err)
269{
270 chan->state = state;
271 chan->ops->state_change(chan, chan->state, err);
272}
273
274static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
275{
276 chan->ops->state_change(chan, chan->state, err);
277}
278
279static void __set_retrans_timer(struct l2cap_chan *chan)
280{
281 if (!delayed_work_pending(&chan->monitor_timer) &&
282 chan->retrans_timeout) {
283 l2cap_set_timer(chan, &chan->retrans_timer,
284 msecs_to_jiffies(chan->retrans_timeout));
285 }
286}
287
288static void __set_monitor_timer(struct l2cap_chan *chan)
289{
290 __clear_retrans_timer(chan);
291 if (chan->monitor_timeout) {
292 l2cap_set_timer(chan, &chan->monitor_timer,
293 msecs_to_jiffies(chan->monitor_timeout));
294 }
295}
296
297static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
298 u16 seq)
299{
300 struct sk_buff *skb;
301
302 skb_queue_walk(head, skb) {
303 if (bt_cb(skb)->l2cap.txseq == seq)
304 return skb;
305 }
306
307 return NULL;
308}
309
310/* ---- L2CAP sequence number lists ---- */
311
312/* For ERTM, ordered lists of sequence numbers must be tracked for
313 * SREJ requests that are received and for frames that are to be
314 * retransmitted. These seq_list functions implement a singly-linked
315 * list in an array, where membership in the list can also be checked
316 * in constant time. Items can also be added to the tail of the list
317 * and removed from the head in constant time, without further memory
318 * allocs or frees.
319 */
320
321static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
322{
323 size_t alloc_size, i;
324
325 /* Allocated size is a power of 2 to map sequence numbers
326 * (which may be up to 14 bits) in to a smaller array that is
327 * sized for the negotiated ERTM transmit windows.
328 */
329 alloc_size = roundup_pow_of_two(size);
330
331 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
332 if (!seq_list->list)
333 return -ENOMEM;
334
335 seq_list->mask = alloc_size - 1;
336 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
337 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
338 for (i = 0; i < alloc_size; i++)
339 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
340
341 return 0;
342}
343
344static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
345{
346 kfree(seq_list->list);
347}
348
349static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
350 u16 seq)
351{
352 /* Constant-time check for list membership */
353 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
354}
355
356static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
357{
358 u16 seq = seq_list->head;
359 u16 mask = seq_list->mask;
360
361 seq_list->head = seq_list->list[seq & mask];
362 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
363
364 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
365 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
367 }
368
369 return seq;
370}
371
372static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
373{
374 u16 i;
375
376 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
377 return;
378
379 for (i = 0; i <= seq_list->mask; i++)
380 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
381
382 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
383 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
384}
385
386static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
387{
388 u16 mask = seq_list->mask;
389
390 /* All appends happen in constant time */
391
392 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
393 return;
394
395 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
396 seq_list->head = seq;
397 else
398 seq_list->list[seq_list->tail & mask] = seq;
399
400 seq_list->tail = seq;
401 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
402}
403
404static void l2cap_chan_timeout(struct work_struct *work)
405{
406 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
407 chan_timer.work);
408 struct l2cap_conn *conn = chan->conn;
409 int reason;
410
411 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
412
413 mutex_lock(&conn->chan_lock);
414 l2cap_chan_lock(chan);
415
416 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
417 reason = ECONNREFUSED;
418 else if (chan->state == BT_CONNECT &&
419 chan->sec_level != BT_SECURITY_SDP)
420 reason = ECONNREFUSED;
421 else
422 reason = ETIMEDOUT;
423
424 l2cap_chan_close(chan, reason);
425
426 l2cap_chan_unlock(chan);
427
428 chan->ops->close(chan);
429 mutex_unlock(&conn->chan_lock);
430
431 l2cap_chan_put(chan);
432}
433
434struct l2cap_chan *l2cap_chan_create(void)
435{
436 struct l2cap_chan *chan;
437
438 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
439 if (!chan)
440 return NULL;
441
442 mutex_init(&chan->lock);
443
444 /* Set default lock nesting level */
445 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
446
447 write_lock(&chan_list_lock);
448 list_add(&chan->global_l, &chan_list);
449 write_unlock(&chan_list_lock);
450
451 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
452
453 chan->state = BT_OPEN;
454
455 kref_init(&chan->kref);
456
457 /* This flag is cleared in l2cap_chan_ready() */
458 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
459
460 BT_DBG("chan %p", chan);
461
462 return chan;
463}
464EXPORT_SYMBOL_GPL(l2cap_chan_create);
465
466static void l2cap_chan_destroy(struct kref *kref)
467{
468 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
469
470 BT_DBG("chan %p", chan);
471
472 write_lock(&chan_list_lock);
473 list_del(&chan->global_l);
474 write_unlock(&chan_list_lock);
475
476 kfree(chan);
477}
478
479void l2cap_chan_hold(struct l2cap_chan *c)
480{
481 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
482
483 kref_get(&c->kref);
484}
485
486void l2cap_chan_put(struct l2cap_chan *c)
487{
488 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
489
490 kref_put(&c->kref, l2cap_chan_destroy);
491}
492EXPORT_SYMBOL_GPL(l2cap_chan_put);
493
494void l2cap_chan_set_defaults(struct l2cap_chan *chan)
495{
496 chan->fcs = L2CAP_FCS_CRC16;
497 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
498 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
499 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
500 chan->remote_max_tx = chan->max_tx;
501 chan->remote_tx_win = chan->tx_win;
502 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
503 chan->sec_level = BT_SECURITY_LOW;
504 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
505 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
506 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
507 chan->conf_state = 0;
508
509 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
510}
511EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
512
513static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
514{
515 chan->sdu = NULL;
516 chan->sdu_last_frag = NULL;
517 chan->sdu_len = 0;
518 chan->tx_credits = 0;
519 /* Derive MPS from connection MTU to stop HCI fragmentation */
520 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
521 /* Give enough credits for a full packet */
522 chan->rx_credits = (chan->imtu / chan->mps) + 1;
523
524 skb_queue_head_init(&chan->tx_q);
525}
526
527void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
528{
529 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
530 __le16_to_cpu(chan->psm), chan->dcid);
531
532 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
533
534 chan->conn = conn;
535
536 switch (chan->chan_type) {
537 case L2CAP_CHAN_CONN_ORIENTED:
538 /* Alloc CID for connection-oriented socket */
539 chan->scid = l2cap_alloc_cid(conn);
540 if (conn->hcon->type == ACL_LINK)
541 chan->omtu = L2CAP_DEFAULT_MTU;
542 break;
543
544 case L2CAP_CHAN_CONN_LESS:
545 /* Connectionless socket */
546 chan->scid = L2CAP_CID_CONN_LESS;
547 chan->dcid = L2CAP_CID_CONN_LESS;
548 chan->omtu = L2CAP_DEFAULT_MTU;
549 break;
550
551 case L2CAP_CHAN_FIXED:
552 /* Caller will set CID and CID specific MTU values */
553 break;
554
555 default:
556 /* Raw socket can send/recv signalling messages only */
557 chan->scid = L2CAP_CID_SIGNALING;
558 chan->dcid = L2CAP_CID_SIGNALING;
559 chan->omtu = L2CAP_DEFAULT_MTU;
560 }
561
562 chan->local_id = L2CAP_BESTEFFORT_ID;
563 chan->local_stype = L2CAP_SERV_BESTEFFORT;
564 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
565 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
566 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
567 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
568
569 l2cap_chan_hold(chan);
570
571 /* Only keep a reference for fixed channels if they requested it */
572 if (chan->chan_type != L2CAP_CHAN_FIXED ||
573 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
574 hci_conn_hold(conn->hcon);
575
576 list_add(&chan->list, &conn->chan_l);
577}
578
579void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
580{
581 mutex_lock(&conn->chan_lock);
582 __l2cap_chan_add(conn, chan);
583 mutex_unlock(&conn->chan_lock);
584}
585
586void l2cap_chan_del(struct l2cap_chan *chan, int err)
587{
588 struct l2cap_conn *conn = chan->conn;
589
590 __clear_chan_timer(chan);
591
592 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
593 state_to_string(chan->state));
594
595 chan->ops->teardown(chan, err);
596
597 if (conn) {
598 struct amp_mgr *mgr = conn->hcon->amp_mgr;
599 /* Delete from channel list */
600 list_del(&chan->list);
601
602 l2cap_chan_put(chan);
603
604 chan->conn = NULL;
605
606 /* Reference was only held for non-fixed channels or
607 * fixed channels that explicitly requested it using the
608 * FLAG_HOLD_HCI_CONN flag.
609 */
610 if (chan->chan_type != L2CAP_CHAN_FIXED ||
611 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
612 hci_conn_drop(conn->hcon);
613
614 if (mgr && mgr->bredr_chan == chan)
615 mgr->bredr_chan = NULL;
616 }
617
618 if (chan->hs_hchan) {
619 struct hci_chan *hs_hchan = chan->hs_hchan;
620
621 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
622 amp_disconnect_logical_link(hs_hchan);
623 }
624
625 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
626 return;
627
628 switch(chan->mode) {
629 case L2CAP_MODE_BASIC:
630 break;
631
632 case L2CAP_MODE_LE_FLOWCTL:
633 skb_queue_purge(&chan->tx_q);
634 break;
635
636 case L2CAP_MODE_ERTM:
637 __clear_retrans_timer(chan);
638 __clear_monitor_timer(chan);
639 __clear_ack_timer(chan);
640
641 skb_queue_purge(&chan->srej_q);
642
643 l2cap_seq_list_free(&chan->srej_list);
644 l2cap_seq_list_free(&chan->retrans_list);
645
646 /* fall through */
647
648 case L2CAP_MODE_STREAMING:
649 skb_queue_purge(&chan->tx_q);
650 break;
651 }
652
653 return;
654}
655EXPORT_SYMBOL_GPL(l2cap_chan_del);
656
657static void l2cap_conn_update_id_addr(struct work_struct *work)
658{
659 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
660 id_addr_update_work);
661 struct hci_conn *hcon = conn->hcon;
662 struct l2cap_chan *chan;
663
664 mutex_lock(&conn->chan_lock);
665
666 list_for_each_entry(chan, &conn->chan_l, list) {
667 l2cap_chan_lock(chan);
668 bacpy(&chan->dst, &hcon->dst);
669 chan->dst_type = bdaddr_dst_type(hcon);
670 l2cap_chan_unlock(chan);
671 }
672
673 mutex_unlock(&conn->chan_lock);
674}
675
676static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
677{
678 struct l2cap_conn *conn = chan->conn;
679 struct l2cap_le_conn_rsp rsp;
680 u16 result;
681
682 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
683 result = L2CAP_CR_LE_AUTHORIZATION;
684 else
685 result = L2CAP_CR_LE_BAD_PSM;
686
687 l2cap_state_change(chan, BT_DISCONN);
688
689 rsp.dcid = cpu_to_le16(chan->scid);
690 rsp.mtu = cpu_to_le16(chan->imtu);
691 rsp.mps = cpu_to_le16(chan->mps);
692 rsp.credits = cpu_to_le16(chan->rx_credits);
693 rsp.result = cpu_to_le16(result);
694
695 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
696 &rsp);
697}
698
699static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
700{
701 struct l2cap_conn *conn = chan->conn;
702 struct l2cap_conn_rsp rsp;
703 u16 result;
704
705 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
706 result = L2CAP_CR_SEC_BLOCK;
707 else
708 result = L2CAP_CR_BAD_PSM;
709
710 l2cap_state_change(chan, BT_DISCONN);
711
712 rsp.scid = cpu_to_le16(chan->dcid);
713 rsp.dcid = cpu_to_le16(chan->scid);
714 rsp.result = cpu_to_le16(result);
715 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
716
717 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
718}
719
720void l2cap_chan_close(struct l2cap_chan *chan, int reason)
721{
722 struct l2cap_conn *conn = chan->conn;
723
724 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
725
726 switch (chan->state) {
727 case BT_LISTEN:
728 chan->ops->teardown(chan, 0);
729 break;
730
731 case BT_CONNECTED:
732 case BT_CONFIG:
733 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
734 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
735 l2cap_send_disconn_req(chan, reason);
736 } else
737 l2cap_chan_del(chan, reason);
738 break;
739
740 case BT_CONNECT2:
741 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
742 if (conn->hcon->type == ACL_LINK)
743 l2cap_chan_connect_reject(chan);
744 else if (conn->hcon->type == LE_LINK)
745 l2cap_chan_le_connect_reject(chan);
746 }
747
748 l2cap_chan_del(chan, reason);
749 break;
750
751 case BT_CONNECT:
752 case BT_DISCONN:
753 l2cap_chan_del(chan, reason);
754 break;
755
756 default:
757 chan->ops->teardown(chan, 0);
758 break;
759 }
760}
761EXPORT_SYMBOL(l2cap_chan_close);
762
763static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
764{
765 switch (chan->chan_type) {
766 case L2CAP_CHAN_RAW:
767 switch (chan->sec_level) {
768 case BT_SECURITY_HIGH:
769 case BT_SECURITY_FIPS:
770 return HCI_AT_DEDICATED_BONDING_MITM;
771 case BT_SECURITY_MEDIUM:
772 return HCI_AT_DEDICATED_BONDING;
773 default:
774 return HCI_AT_NO_BONDING;
775 }
776 break;
777 case L2CAP_CHAN_CONN_LESS:
778 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
779 if (chan->sec_level == BT_SECURITY_LOW)
780 chan->sec_level = BT_SECURITY_SDP;
781 }
782 if (chan->sec_level == BT_SECURITY_HIGH ||
783 chan->sec_level == BT_SECURITY_FIPS)
784 return HCI_AT_NO_BONDING_MITM;
785 else
786 return HCI_AT_NO_BONDING;
787 break;
788 case L2CAP_CHAN_CONN_ORIENTED:
789 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
790 if (chan->sec_level == BT_SECURITY_LOW)
791 chan->sec_level = BT_SECURITY_SDP;
792
793 if (chan->sec_level == BT_SECURITY_HIGH ||
794 chan->sec_level == BT_SECURITY_FIPS)
795 return HCI_AT_NO_BONDING_MITM;
796 else
797 return HCI_AT_NO_BONDING;
798 }
799 /* fall through */
800 default:
801 switch (chan->sec_level) {
802 case BT_SECURITY_HIGH:
803 case BT_SECURITY_FIPS:
804 return HCI_AT_GENERAL_BONDING_MITM;
805 case BT_SECURITY_MEDIUM:
806 return HCI_AT_GENERAL_BONDING;
807 default:
808 return HCI_AT_NO_BONDING;
809 }
810 break;
811 }
812}
813
814/* Service level security */
815int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
816{
817 struct l2cap_conn *conn = chan->conn;
818 __u8 auth_type;
819
820 if (conn->hcon->type == LE_LINK)
821 return smp_conn_security(conn->hcon, chan->sec_level);
822
823 auth_type = l2cap_get_auth_type(chan);
824
825 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
826 initiator);
827}
828
829static u8 l2cap_get_ident(struct l2cap_conn *conn)
830{
831 u8 id;
832
833 /* Get next available identificator.
834 * 1 - 128 are used by kernel.
835 * 129 - 199 are reserved.
836 * 200 - 254 are used by utilities like l2ping, etc.
837 */
838
839 mutex_lock(&conn->ident_lock);
840
841 if (++conn->tx_ident > 128)
842 conn->tx_ident = 1;
843
844 id = conn->tx_ident;
845
846 mutex_unlock(&conn->ident_lock);
847
848 return id;
849}
850
851static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
852 void *data)
853{
854 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
855 u8 flags;
856
857 BT_DBG("code 0x%2.2x", code);
858
859 if (!skb)
860 return;
861
862 /* Use NO_FLUSH if supported or we have an LE link (which does
863 * not support auto-flushing packets) */
864 if (lmp_no_flush_capable(conn->hcon->hdev) ||
865 conn->hcon->type == LE_LINK)
866 flags = ACL_START_NO_FLUSH;
867 else
868 flags = ACL_START;
869
870 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
871 skb->priority = HCI_PRIO_MAX;
872
873 hci_send_acl(conn->hchan, skb, flags);
874}
875
876static bool __chan_is_moving(struct l2cap_chan *chan)
877{
878 return chan->move_state != L2CAP_MOVE_STABLE &&
879 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
880}
881
882static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
883{
884 struct hci_conn *hcon = chan->conn->hcon;
885 u16 flags;
886
887 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
888 skb->priority);
889
890 if (chan->hs_hcon && !__chan_is_moving(chan)) {
891 if (chan->hs_hchan)
892 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
893 else
894 kfree_skb(skb);
895
896 return;
897 }
898
899 /* Use NO_FLUSH for LE links (where this is the only option) or
900 * if the BR/EDR link supports it and flushing has not been
901 * explicitly requested (through FLAG_FLUSHABLE).
902 */
903 if (hcon->type == LE_LINK ||
904 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
905 lmp_no_flush_capable(hcon->hdev)))
906 flags = ACL_START_NO_FLUSH;
907 else
908 flags = ACL_START;
909
910 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
911 hci_send_acl(chan->conn->hchan, skb, flags);
912}
913
914static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
915{
916 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
917 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
918
919 if (enh & L2CAP_CTRL_FRAME_TYPE) {
920 /* S-Frame */
921 control->sframe = 1;
922 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
923 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
924
925 control->sar = 0;
926 control->txseq = 0;
927 } else {
928 /* I-Frame */
929 control->sframe = 0;
930 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
931 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
932
933 control->poll = 0;
934 control->super = 0;
935 }
936}
937
938static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
939{
940 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
941 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
942
943 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
944 /* S-Frame */
945 control->sframe = 1;
946 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
947 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
948
949 control->sar = 0;
950 control->txseq = 0;
951 } else {
952 /* I-Frame */
953 control->sframe = 0;
954 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
955 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
956
957 control->poll = 0;
958 control->super = 0;
959 }
960}
961
962static inline void __unpack_control(struct l2cap_chan *chan,
963 struct sk_buff *skb)
964{
965 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
966 __unpack_extended_control(get_unaligned_le32(skb->data),
967 &bt_cb(skb)->l2cap);
968 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
969 } else {
970 __unpack_enhanced_control(get_unaligned_le16(skb->data),
971 &bt_cb(skb)->l2cap);
972 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
973 }
974}
975
976static u32 __pack_extended_control(struct l2cap_ctrl *control)
977{
978 u32 packed;
979
980 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
981 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
982
983 if (control->sframe) {
984 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
985 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
986 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
987 } else {
988 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
989 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
990 }
991
992 return packed;
993}
994
995static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
996{
997 u16 packed;
998
999 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1000 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1001
1002 if (control->sframe) {
1003 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1004 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1005 packed |= L2CAP_CTRL_FRAME_TYPE;
1006 } else {
1007 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1008 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1009 }
1010
1011 return packed;
1012}
1013
1014static inline void __pack_control(struct l2cap_chan *chan,
1015 struct l2cap_ctrl *control,
1016 struct sk_buff *skb)
1017{
1018 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1019 put_unaligned_le32(__pack_extended_control(control),
1020 skb->data + L2CAP_HDR_SIZE);
1021 } else {
1022 put_unaligned_le16(__pack_enhanced_control(control),
1023 skb->data + L2CAP_HDR_SIZE);
1024 }
1025}
1026
1027static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1028{
1029 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1030 return L2CAP_EXT_HDR_SIZE;
1031 else
1032 return L2CAP_ENH_HDR_SIZE;
1033}
1034
1035static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1036 u32 control)
1037{
1038 struct sk_buff *skb;
1039 struct l2cap_hdr *lh;
1040 int hlen = __ertm_hdr_size(chan);
1041
1042 if (chan->fcs == L2CAP_FCS_CRC16)
1043 hlen += L2CAP_FCS_SIZE;
1044
1045 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1046
1047 if (!skb)
1048 return ERR_PTR(-ENOMEM);
1049
1050 lh = skb_put(skb, L2CAP_HDR_SIZE);
1051 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1052 lh->cid = cpu_to_le16(chan->dcid);
1053
1054 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1055 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1056 else
1057 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1058
1059 if (chan->fcs == L2CAP_FCS_CRC16) {
1060 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1061 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1062 }
1063
1064 skb->priority = HCI_PRIO_MAX;
1065 return skb;
1066}
1067
1068static void l2cap_send_sframe(struct l2cap_chan *chan,
1069 struct l2cap_ctrl *control)
1070{
1071 struct sk_buff *skb;
1072 u32 control_field;
1073
1074 BT_DBG("chan %p, control %p", chan, control);
1075
1076 if (!control->sframe)
1077 return;
1078
1079 if (__chan_is_moving(chan))
1080 return;
1081
1082 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1083 !control->poll)
1084 control->final = 1;
1085
1086 if (control->super == L2CAP_SUPER_RR)
1087 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1088 else if (control->super == L2CAP_SUPER_RNR)
1089 set_bit(CONN_RNR_SENT, &chan->conn_state);
1090
1091 if (control->super != L2CAP_SUPER_SREJ) {
1092 chan->last_acked_seq = control->reqseq;
1093 __clear_ack_timer(chan);
1094 }
1095
1096 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1097 control->final, control->poll, control->super);
1098
1099 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1100 control_field = __pack_extended_control(control);
1101 else
1102 control_field = __pack_enhanced_control(control);
1103
1104 skb = l2cap_create_sframe_pdu(chan, control_field);
1105 if (!IS_ERR(skb))
1106 l2cap_do_send(chan, skb);
1107}
1108
1109static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1110{
1111 struct l2cap_ctrl control;
1112
1113 BT_DBG("chan %p, poll %d", chan, poll);
1114
1115 memset(&control, 0, sizeof(control));
1116 control.sframe = 1;
1117 control.poll = poll;
1118
1119 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1120 control.super = L2CAP_SUPER_RNR;
1121 else
1122 control.super = L2CAP_SUPER_RR;
1123
1124 control.reqseq = chan->buffer_seq;
1125 l2cap_send_sframe(chan, &control);
1126}
1127
1128static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1129{
1130 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1131 return true;
1132
1133 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1134}
1135
1136static bool __amp_capable(struct l2cap_chan *chan)
1137{
1138 struct l2cap_conn *conn = chan->conn;
1139 struct hci_dev *hdev;
1140 bool amp_available = false;
1141
1142 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1143 return false;
1144
1145 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1146 return false;
1147
1148 read_lock(&hci_dev_list_lock);
1149 list_for_each_entry(hdev, &hci_dev_list, list) {
1150 if (hdev->amp_type != AMP_TYPE_BREDR &&
1151 test_bit(HCI_UP, &hdev->flags)) {
1152 amp_available = true;
1153 break;
1154 }
1155 }
1156 read_unlock(&hci_dev_list_lock);
1157
1158 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1159 return amp_available;
1160
1161 return false;
1162}
1163
1164static bool l2cap_check_efs(struct l2cap_chan *chan)
1165{
1166 /* Check EFS parameters */
1167 return true;
1168}
1169
1170void l2cap_send_conn_req(struct l2cap_chan *chan)
1171{
1172 struct l2cap_conn *conn = chan->conn;
1173 struct l2cap_conn_req req;
1174
1175 req.scid = cpu_to_le16(chan->scid);
1176 req.psm = chan->psm;
1177
1178 chan->ident = l2cap_get_ident(conn);
1179
1180 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1181
1182 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1183}
1184
1185static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1186{
1187 struct l2cap_create_chan_req req;
1188 req.scid = cpu_to_le16(chan->scid);
1189 req.psm = chan->psm;
1190 req.amp_id = amp_id;
1191
1192 chan->ident = l2cap_get_ident(chan->conn);
1193
1194 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1195 sizeof(req), &req);
1196}
1197
1198static void l2cap_move_setup(struct l2cap_chan *chan)
1199{
1200 struct sk_buff *skb;
1201
1202 BT_DBG("chan %p", chan);
1203
1204 if (chan->mode != L2CAP_MODE_ERTM)
1205 return;
1206
1207 __clear_retrans_timer(chan);
1208 __clear_monitor_timer(chan);
1209 __clear_ack_timer(chan);
1210
1211 chan->retry_count = 0;
1212 skb_queue_walk(&chan->tx_q, skb) {
1213 if (bt_cb(skb)->l2cap.retries)
1214 bt_cb(skb)->l2cap.retries = 1;
1215 else
1216 break;
1217 }
1218
1219 chan->expected_tx_seq = chan->buffer_seq;
1220
1221 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1222 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1223 l2cap_seq_list_clear(&chan->retrans_list);
1224 l2cap_seq_list_clear(&chan->srej_list);
1225 skb_queue_purge(&chan->srej_q);
1226
1227 chan->tx_state = L2CAP_TX_STATE_XMIT;
1228 chan->rx_state = L2CAP_RX_STATE_MOVE;
1229
1230 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1231}
1232
1233static void l2cap_move_done(struct l2cap_chan *chan)
1234{
1235 u8 move_role = chan->move_role;
1236 BT_DBG("chan %p", chan);
1237
1238 chan->move_state = L2CAP_MOVE_STABLE;
1239 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1240
1241 if (chan->mode != L2CAP_MODE_ERTM)
1242 return;
1243
1244 switch (move_role) {
1245 case L2CAP_MOVE_ROLE_INITIATOR:
1246 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1247 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1248 break;
1249 case L2CAP_MOVE_ROLE_RESPONDER:
1250 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1251 break;
1252 }
1253}
1254
1255static void l2cap_chan_ready(struct l2cap_chan *chan)
1256{
1257 /* The channel may have already been flagged as connected in
1258 * case of receiving data before the L2CAP info req/rsp
1259 * procedure is complete.
1260 */
1261 if (chan->state == BT_CONNECTED)
1262 return;
1263
1264 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1265 chan->conf_state = 0;
1266 __clear_chan_timer(chan);
1267
1268 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1269 chan->ops->suspend(chan);
1270
1271 chan->state = BT_CONNECTED;
1272
1273 chan->ops->ready(chan);
1274}
1275
1276static void l2cap_le_connect(struct l2cap_chan *chan)
1277{
1278 struct l2cap_conn *conn = chan->conn;
1279 struct l2cap_le_conn_req req;
1280
1281 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1282 return;
1283
1284 l2cap_le_flowctl_init(chan);
1285
1286 req.psm = chan->psm;
1287 req.scid = cpu_to_le16(chan->scid);
1288 req.mtu = cpu_to_le16(chan->imtu);
1289 req.mps = cpu_to_le16(chan->mps);
1290 req.credits = cpu_to_le16(chan->rx_credits);
1291
1292 chan->ident = l2cap_get_ident(conn);
1293
1294 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1295 sizeof(req), &req);
1296}
1297
1298static void l2cap_le_start(struct l2cap_chan *chan)
1299{
1300 struct l2cap_conn *conn = chan->conn;
1301
1302 if (!smp_conn_security(conn->hcon, chan->sec_level))
1303 return;
1304
1305 if (!chan->psm) {
1306 l2cap_chan_ready(chan);
1307 return;
1308 }
1309
1310 if (chan->state == BT_CONNECT)
1311 l2cap_le_connect(chan);
1312}
1313
1314static void l2cap_start_connection(struct l2cap_chan *chan)
1315{
1316 if (__amp_capable(chan)) {
1317 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1318 a2mp_discover_amp(chan);
1319 } else if (chan->conn->hcon->type == LE_LINK) {
1320 l2cap_le_start(chan);
1321 } else {
1322 l2cap_send_conn_req(chan);
1323 }
1324}
1325
1326static void l2cap_request_info(struct l2cap_conn *conn)
1327{
1328 struct l2cap_info_req req;
1329
1330 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1331 return;
1332
1333 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1334
1335 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1336 conn->info_ident = l2cap_get_ident(conn);
1337
1338 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1339
1340 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1341 sizeof(req), &req);
1342}
1343
1344static void l2cap_do_start(struct l2cap_chan *chan)
1345{
1346 struct l2cap_conn *conn = chan->conn;
1347
1348 if (conn->hcon->type == LE_LINK) {
1349 l2cap_le_start(chan);
1350 return;
1351 }
1352
1353 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1354 l2cap_request_info(conn);
1355 return;
1356 }
1357
1358 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1359 return;
1360
1361 if (l2cap_chan_check_security(chan, true) &&
1362 __l2cap_no_conn_pending(chan))
1363 l2cap_start_connection(chan);
1364}
1365
1366static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1367{
1368 u32 local_feat_mask = l2cap_feat_mask;
1369 if (!disable_ertm)
1370 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1371
1372 switch (mode) {
1373 case L2CAP_MODE_ERTM:
1374 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1375 case L2CAP_MODE_STREAMING:
1376 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1377 default:
1378 return 0x00;
1379 }
1380}
1381
1382static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1383{
1384 struct l2cap_conn *conn = chan->conn;
1385 struct l2cap_disconn_req req;
1386
1387 if (!conn)
1388 return;
1389
1390 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1391 __clear_retrans_timer(chan);
1392 __clear_monitor_timer(chan);
1393 __clear_ack_timer(chan);
1394 }
1395
1396 if (chan->scid == L2CAP_CID_A2MP) {
1397 l2cap_state_change(chan, BT_DISCONN);
1398 return;
1399 }
1400
1401 req.dcid = cpu_to_le16(chan->dcid);
1402 req.scid = cpu_to_le16(chan->scid);
1403 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1404 sizeof(req), &req);
1405
1406 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1407}
1408
1409/* ---- L2CAP connections ---- */
1410static void l2cap_conn_start(struct l2cap_conn *conn)
1411{
1412 struct l2cap_chan *chan, *tmp;
1413
1414 BT_DBG("conn %p", conn);
1415
1416 mutex_lock(&conn->chan_lock);
1417
1418 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1419 l2cap_chan_lock(chan);
1420
1421 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1422 l2cap_chan_ready(chan);
1423 l2cap_chan_unlock(chan);
1424 continue;
1425 }
1426
1427 if (chan->state == BT_CONNECT) {
1428 if (!l2cap_chan_check_security(chan, true) ||
1429 !__l2cap_no_conn_pending(chan)) {
1430 l2cap_chan_unlock(chan);
1431 continue;
1432 }
1433
1434 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1435 && test_bit(CONF_STATE2_DEVICE,
1436 &chan->conf_state)) {
1437 l2cap_chan_close(chan, ECONNRESET);
1438 l2cap_chan_unlock(chan);
1439 continue;
1440 }
1441
1442 l2cap_start_connection(chan);
1443
1444 } else if (chan->state == BT_CONNECT2) {
1445 struct l2cap_conn_rsp rsp;
1446 char buf[128];
1447 rsp.scid = cpu_to_le16(chan->dcid);
1448 rsp.dcid = cpu_to_le16(chan->scid);
1449
1450 if (l2cap_chan_check_security(chan, false)) {
1451 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1452 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1453 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1454 chan->ops->defer(chan);
1455
1456 } else {
1457 l2cap_state_change(chan, BT_CONFIG);
1458 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1459 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1460 }
1461 } else {
1462 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1463 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1464 }
1465
1466 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1467 sizeof(rsp), &rsp);
1468
1469 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1470 rsp.result != L2CAP_CR_SUCCESS) {
1471 l2cap_chan_unlock(chan);
1472 continue;
1473 }
1474
1475 set_bit(CONF_REQ_SENT, &chan->conf_state);
1476 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1477 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1478 chan->num_conf_req++;
1479 }
1480
1481 l2cap_chan_unlock(chan);
1482 }
1483
1484 mutex_unlock(&conn->chan_lock);
1485}
1486
1487static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1488{
1489 struct hci_conn *hcon = conn->hcon;
1490 struct hci_dev *hdev = hcon->hdev;
1491
1492 BT_DBG("%s conn %p", hdev->name, conn);
1493
1494 /* For outgoing pairing which doesn't necessarily have an
1495 * associated socket (e.g. mgmt_pair_device).
1496 */
1497 if (hcon->out)
1498 smp_conn_security(hcon, hcon->pending_sec_level);
1499
1500 /* For LE slave connections, make sure the connection interval
1501 * is in the range of the minium and maximum interval that has
1502 * been configured for this connection. If not, then trigger
1503 * the connection update procedure.
1504 */
1505 if (hcon->role == HCI_ROLE_SLAVE &&
1506 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1507 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1508 struct l2cap_conn_param_update_req req;
1509
1510 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1511 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1512 req.latency = cpu_to_le16(hcon->le_conn_latency);
1513 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1514
1515 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1516 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1517 }
1518}
1519
1520static void l2cap_conn_ready(struct l2cap_conn *conn)
1521{
1522 struct l2cap_chan *chan;
1523 struct hci_conn *hcon = conn->hcon;
1524
1525 BT_DBG("conn %p", conn);
1526
1527 if (hcon->type == ACL_LINK)
1528 l2cap_request_info(conn);
1529
1530 mutex_lock(&conn->chan_lock);
1531
1532 list_for_each_entry(chan, &conn->chan_l, list) {
1533
1534 l2cap_chan_lock(chan);
1535
1536 if (chan->scid == L2CAP_CID_A2MP) {
1537 l2cap_chan_unlock(chan);
1538 continue;
1539 }
1540
1541 if (hcon->type == LE_LINK) {
1542 l2cap_le_start(chan);
1543 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1544 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1545 l2cap_chan_ready(chan);
1546 } else if (chan->state == BT_CONNECT) {
1547 l2cap_do_start(chan);
1548 }
1549
1550 l2cap_chan_unlock(chan);
1551 }
1552
1553 mutex_unlock(&conn->chan_lock);
1554
1555 if (hcon->type == LE_LINK)
1556 l2cap_le_conn_ready(conn);
1557
1558 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1559}
1560
1561/* Notify sockets that we cannot guaranty reliability anymore */
1562static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1563{
1564 struct l2cap_chan *chan;
1565
1566 BT_DBG("conn %p", conn);
1567
1568 mutex_lock(&conn->chan_lock);
1569
1570 list_for_each_entry(chan, &conn->chan_l, list) {
1571 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1572 l2cap_chan_set_err(chan, err);
1573 }
1574
1575 mutex_unlock(&conn->chan_lock);
1576}
1577
1578static void l2cap_info_timeout(struct work_struct *work)
1579{
1580 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1581 info_timer.work);
1582
1583 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1584 conn->info_ident = 0;
1585
1586 l2cap_conn_start(conn);
1587}
1588
1589/*
1590 * l2cap_user
1591 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1592 * callback is called during registration. The ->remove callback is called
1593 * during unregistration.
1594 * An l2cap_user object can either be explicitly unregistered or when the
1595 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1596 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1597 * External modules must own a reference to the l2cap_conn object if they intend
1598 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1599 * any time if they don't.
1600 */
1601
1602int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1603{
1604 struct hci_dev *hdev = conn->hcon->hdev;
1605 int ret;
1606
1607 /* We need to check whether l2cap_conn is registered. If it is not, we
1608 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1609 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1610 * relies on the parent hci_conn object to be locked. This itself relies
1611 * on the hci_dev object to be locked. So we must lock the hci device
1612 * here, too. */
1613
1614 hci_dev_lock(hdev);
1615
1616 if (!list_empty(&user->list)) {
1617 ret = -EINVAL;
1618 goto out_unlock;
1619 }
1620
1621 /* conn->hchan is NULL after l2cap_conn_del() was called */
1622 if (!conn->hchan) {
1623 ret = -ENODEV;
1624 goto out_unlock;
1625 }
1626
1627 ret = user->probe(conn, user);
1628 if (ret)
1629 goto out_unlock;
1630
1631 list_add(&user->list, &conn->users);
1632 ret = 0;
1633
1634out_unlock:
1635 hci_dev_unlock(hdev);
1636 return ret;
1637}
1638EXPORT_SYMBOL(l2cap_register_user);
1639
1640void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1641{
1642 struct hci_dev *hdev = conn->hcon->hdev;
1643
1644 hci_dev_lock(hdev);
1645
1646 if (list_empty(&user->list))
1647 goto out_unlock;
1648
1649 list_del_init(&user->list);
1650 user->remove(conn, user);
1651
1652out_unlock:
1653 hci_dev_unlock(hdev);
1654}
1655EXPORT_SYMBOL(l2cap_unregister_user);
1656
1657static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1658{
1659 struct l2cap_user *user;
1660
1661 while (!list_empty(&conn->users)) {
1662 user = list_first_entry(&conn->users, struct l2cap_user, list);
1663 list_del_init(&user->list);
1664 user->remove(conn, user);
1665 }
1666}
1667
1668static void l2cap_conn_del(struct hci_conn *hcon, int err)
1669{
1670 struct l2cap_conn *conn = hcon->l2cap_data;
1671 struct l2cap_chan *chan, *l;
1672
1673 if (!conn)
1674 return;
1675
1676 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1677
1678 kfree_skb(conn->rx_skb);
1679
1680 skb_queue_purge(&conn->pending_rx);
1681
1682 /* We can not call flush_work(&conn->pending_rx_work) here since we
1683 * might block if we are running on a worker from the same workqueue
1684 * pending_rx_work is waiting on.
1685 */
1686 if (work_pending(&conn->pending_rx_work))
1687 cancel_work_sync(&conn->pending_rx_work);
1688
1689 if (work_pending(&conn->id_addr_update_work))
1690 cancel_work_sync(&conn->id_addr_update_work);
1691
1692 l2cap_unregister_all_users(conn);
1693
1694 /* Force the connection to be immediately dropped */
1695 hcon->disc_timeout = 0;
1696
1697 mutex_lock(&conn->chan_lock);
1698
1699 /* Kill channels */
1700 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1701 l2cap_chan_hold(chan);
1702 l2cap_chan_lock(chan);
1703
1704 l2cap_chan_del(chan, err);
1705
1706 l2cap_chan_unlock(chan);
1707
1708 chan->ops->close(chan);
1709 l2cap_chan_put(chan);
1710 }
1711
1712 mutex_unlock(&conn->chan_lock);
1713
1714 hci_chan_del(conn->hchan);
1715
1716 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1717 cancel_delayed_work_sync(&conn->info_timer);
1718
1719 hcon->l2cap_data = NULL;
1720 conn->hchan = NULL;
1721 l2cap_conn_put(conn);
1722}
1723
1724static void l2cap_conn_free(struct kref *ref)
1725{
1726 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1727
1728 hci_conn_put(conn->hcon);
1729 kfree(conn);
1730}
1731
1732struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1733{
1734 kref_get(&conn->ref);
1735 return conn;
1736}
1737EXPORT_SYMBOL(l2cap_conn_get);
1738
1739void l2cap_conn_put(struct l2cap_conn *conn)
1740{
1741 kref_put(&conn->ref, l2cap_conn_free);
1742}
1743EXPORT_SYMBOL(l2cap_conn_put);
1744
1745/* ---- Socket interface ---- */
1746
1747/* Find socket with psm and source / destination bdaddr.
1748 * Returns closest match.
1749 */
1750static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1751 bdaddr_t *src,
1752 bdaddr_t *dst,
1753 u8 link_type)
1754{
1755 struct l2cap_chan *c, *c1 = NULL;
1756
1757 read_lock(&chan_list_lock);
1758
1759 list_for_each_entry(c, &chan_list, global_l) {
1760 if (state && c->state != state)
1761 continue;
1762
1763 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1764 continue;
1765
1766 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1767 continue;
1768
1769 if (c->psm == psm) {
1770 int src_match, dst_match;
1771 int src_any, dst_any;
1772
1773 /* Exact match. */
1774 src_match = !bacmp(&c->src, src);
1775 dst_match = !bacmp(&c->dst, dst);
1776 if (src_match && dst_match) {
1777 l2cap_chan_hold(c);
1778 read_unlock(&chan_list_lock);
1779 return c;
1780 }
1781
1782 /* Closest match */
1783 src_any = !bacmp(&c->src, BDADDR_ANY);
1784 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1785 if ((src_match && dst_any) || (src_any && dst_match) ||
1786 (src_any && dst_any))
1787 c1 = c;
1788 }
1789 }
1790
1791 if (c1)
1792 l2cap_chan_hold(c1);
1793
1794 read_unlock(&chan_list_lock);
1795
1796 return c1;
1797}
1798
1799static void l2cap_monitor_timeout(struct work_struct *work)
1800{
1801 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1802 monitor_timer.work);
1803
1804 BT_DBG("chan %p", chan);
1805
1806 l2cap_chan_lock(chan);
1807
1808 if (!chan->conn) {
1809 l2cap_chan_unlock(chan);
1810 l2cap_chan_put(chan);
1811 return;
1812 }
1813
1814 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1815
1816 l2cap_chan_unlock(chan);
1817 l2cap_chan_put(chan);
1818}
1819
1820static void l2cap_retrans_timeout(struct work_struct *work)
1821{
1822 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1823 retrans_timer.work);
1824
1825 BT_DBG("chan %p", chan);
1826
1827 l2cap_chan_lock(chan);
1828
1829 if (!chan->conn) {
1830 l2cap_chan_unlock(chan);
1831 l2cap_chan_put(chan);
1832 return;
1833 }
1834
1835 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1836 l2cap_chan_unlock(chan);
1837 l2cap_chan_put(chan);
1838}
1839
1840static void l2cap_streaming_send(struct l2cap_chan *chan,
1841 struct sk_buff_head *skbs)
1842{
1843 struct sk_buff *skb;
1844 struct l2cap_ctrl *control;
1845
1846 BT_DBG("chan %p, skbs %p", chan, skbs);
1847
1848 if (__chan_is_moving(chan))
1849 return;
1850
1851 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1852
1853 while (!skb_queue_empty(&chan->tx_q)) {
1854
1855 skb = skb_dequeue(&chan->tx_q);
1856
1857 bt_cb(skb)->l2cap.retries = 1;
1858 control = &bt_cb(skb)->l2cap;
1859
1860 control->reqseq = 0;
1861 control->txseq = chan->next_tx_seq;
1862
1863 __pack_control(chan, control, skb);
1864
1865 if (chan->fcs == L2CAP_FCS_CRC16) {
1866 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1867 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1868 }
1869
1870 l2cap_do_send(chan, skb);
1871
1872 BT_DBG("Sent txseq %u", control->txseq);
1873
1874 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1875 chan->frames_sent++;
1876 }
1877}
1878
1879static int l2cap_ertm_send(struct l2cap_chan *chan)
1880{
1881 struct sk_buff *skb, *tx_skb;
1882 struct l2cap_ctrl *control;
1883 int sent = 0;
1884
1885 BT_DBG("chan %p", chan);
1886
1887 if (chan->state != BT_CONNECTED)
1888 return -ENOTCONN;
1889
1890 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1891 return 0;
1892
1893 if (__chan_is_moving(chan))
1894 return 0;
1895
1896 while (chan->tx_send_head &&
1897 chan->unacked_frames < chan->remote_tx_win &&
1898 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1899
1900 skb = chan->tx_send_head;
1901
1902 bt_cb(skb)->l2cap.retries = 1;
1903 control = &bt_cb(skb)->l2cap;
1904
1905 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1906 control->final = 1;
1907
1908 control->reqseq = chan->buffer_seq;
1909 chan->last_acked_seq = chan->buffer_seq;
1910 control->txseq = chan->next_tx_seq;
1911
1912 __pack_control(chan, control, skb);
1913
1914 if (chan->fcs == L2CAP_FCS_CRC16) {
1915 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1916 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1917 }
1918
1919 /* Clone after data has been modified. Data is assumed to be
1920 read-only (for locking purposes) on cloned sk_buffs.
1921 */
1922 tx_skb = skb_clone(skb, GFP_KERNEL);
1923
1924 if (!tx_skb)
1925 break;
1926
1927 __set_retrans_timer(chan);
1928
1929 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1930 chan->unacked_frames++;
1931 chan->frames_sent++;
1932 sent++;
1933
1934 if (skb_queue_is_last(&chan->tx_q, skb))
1935 chan->tx_send_head = NULL;
1936 else
1937 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1938
1939 l2cap_do_send(chan, tx_skb);
1940 BT_DBG("Sent txseq %u", control->txseq);
1941 }
1942
1943 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1944 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1945
1946 return sent;
1947}
1948
1949static void l2cap_ertm_resend(struct l2cap_chan *chan)
1950{
1951 struct l2cap_ctrl control;
1952 struct sk_buff *skb;
1953 struct sk_buff *tx_skb;
1954 u16 seq;
1955
1956 BT_DBG("chan %p", chan);
1957
1958 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1959 return;
1960
1961 if (__chan_is_moving(chan))
1962 return;
1963
1964 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1965 seq = l2cap_seq_list_pop(&chan->retrans_list);
1966
1967 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1968 if (!skb) {
1969 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1970 seq);
1971 continue;
1972 }
1973
1974 bt_cb(skb)->l2cap.retries++;
1975 control = bt_cb(skb)->l2cap;
1976
1977 if (chan->max_tx != 0 &&
1978 bt_cb(skb)->l2cap.retries > chan->max_tx) {
1979 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1980 l2cap_send_disconn_req(chan, ECONNRESET);
1981 l2cap_seq_list_clear(&chan->retrans_list);
1982 break;
1983 }
1984
1985 control.reqseq = chan->buffer_seq;
1986 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1987 control.final = 1;
1988 else
1989 control.final = 0;
1990
1991 if (skb_cloned(skb)) {
1992 /* Cloned sk_buffs are read-only, so we need a
1993 * writeable copy
1994 */
1995 tx_skb = skb_copy(skb, GFP_KERNEL);
1996 } else {
1997 tx_skb = skb_clone(skb, GFP_KERNEL);
1998 }
1999
2000 if (!tx_skb) {
2001 l2cap_seq_list_clear(&chan->retrans_list);
2002 break;
2003 }
2004
2005 /* Update skb contents */
2006 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2007 put_unaligned_le32(__pack_extended_control(&control),
2008 tx_skb->data + L2CAP_HDR_SIZE);
2009 } else {
2010 put_unaligned_le16(__pack_enhanced_control(&control),
2011 tx_skb->data + L2CAP_HDR_SIZE);
2012 }
2013
2014 /* Update FCS */
2015 if (chan->fcs == L2CAP_FCS_CRC16) {
2016 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2017 tx_skb->len - L2CAP_FCS_SIZE);
2018 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2019 L2CAP_FCS_SIZE);
2020 }
2021
2022 l2cap_do_send(chan, tx_skb);
2023
2024 BT_DBG("Resent txseq %d", control.txseq);
2025
2026 chan->last_acked_seq = chan->buffer_seq;
2027 }
2028}
2029
2030static void l2cap_retransmit(struct l2cap_chan *chan,
2031 struct l2cap_ctrl *control)
2032{
2033 BT_DBG("chan %p, control %p", chan, control);
2034
2035 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2036 l2cap_ertm_resend(chan);
2037}
2038
2039static void l2cap_retransmit_all(struct l2cap_chan *chan,
2040 struct l2cap_ctrl *control)
2041{
2042 struct sk_buff *skb;
2043
2044 BT_DBG("chan %p, control %p", chan, control);
2045
2046 if (control->poll)
2047 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2048
2049 l2cap_seq_list_clear(&chan->retrans_list);
2050
2051 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2052 return;
2053
2054 if (chan->unacked_frames) {
2055 skb_queue_walk(&chan->tx_q, skb) {
2056 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2057 skb == chan->tx_send_head)
2058 break;
2059 }
2060
2061 skb_queue_walk_from(&chan->tx_q, skb) {
2062 if (skb == chan->tx_send_head)
2063 break;
2064
2065 l2cap_seq_list_append(&chan->retrans_list,
2066 bt_cb(skb)->l2cap.txseq);
2067 }
2068
2069 l2cap_ertm_resend(chan);
2070 }
2071}
2072
2073static void l2cap_send_ack(struct l2cap_chan *chan)
2074{
2075 struct l2cap_ctrl control;
2076 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2077 chan->last_acked_seq);
2078 int threshold;
2079
2080 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2081 chan, chan->last_acked_seq, chan->buffer_seq);
2082
2083 memset(&control, 0, sizeof(control));
2084 control.sframe = 1;
2085
2086 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2087 chan->rx_state == L2CAP_RX_STATE_RECV) {
2088 __clear_ack_timer(chan);
2089 control.super = L2CAP_SUPER_RNR;
2090 control.reqseq = chan->buffer_seq;
2091 l2cap_send_sframe(chan, &control);
2092 } else {
2093 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2094 l2cap_ertm_send(chan);
2095 /* If any i-frames were sent, they included an ack */
2096 if (chan->buffer_seq == chan->last_acked_seq)
2097 frames_to_ack = 0;
2098 }
2099
2100 /* Ack now if the window is 3/4ths full.
2101 * Calculate without mul or div
2102 */
2103 threshold = chan->ack_win;
2104 threshold += threshold << 1;
2105 threshold >>= 2;
2106
2107 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2108 threshold);
2109
2110 if (frames_to_ack >= threshold) {
2111 __clear_ack_timer(chan);
2112 control.super = L2CAP_SUPER_RR;
2113 control.reqseq = chan->buffer_seq;
2114 l2cap_send_sframe(chan, &control);
2115 frames_to_ack = 0;
2116 }
2117
2118 if (frames_to_ack)
2119 __set_ack_timer(chan);
2120 }
2121}
2122
2123static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2124 struct msghdr *msg, int len,
2125 int count, struct sk_buff *skb)
2126{
2127 struct l2cap_conn *conn = chan->conn;
2128 struct sk_buff **frag;
2129 int sent = 0;
2130
2131 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2132 return -EFAULT;
2133
2134 sent += count;
2135 len -= count;
2136
2137 /* Continuation fragments (no L2CAP header) */
2138 frag = &skb_shinfo(skb)->frag_list;
2139 while (len) {
2140 struct sk_buff *tmp;
2141
2142 count = min_t(unsigned int, conn->mtu, len);
2143
2144 tmp = chan->ops->alloc_skb(chan, 0, count,
2145 msg->msg_flags & MSG_DONTWAIT);
2146 if (IS_ERR(tmp))
2147 return PTR_ERR(tmp);
2148
2149 *frag = tmp;
2150
2151 if (!copy_from_iter_full(skb_put(*frag, count), count,
2152 &msg->msg_iter))
2153 return -EFAULT;
2154
2155 sent += count;
2156 len -= count;
2157
2158 skb->len += (*frag)->len;
2159 skb->data_len += (*frag)->len;
2160
2161 frag = &(*frag)->next;
2162 }
2163
2164 return sent;
2165}
2166
2167static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2168 struct msghdr *msg, size_t len)
2169{
2170 struct l2cap_conn *conn = chan->conn;
2171 struct sk_buff *skb;
2172 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2173 struct l2cap_hdr *lh;
2174
2175 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2176 __le16_to_cpu(chan->psm), len);
2177
2178 count = min_t(unsigned int, (conn->mtu - hlen), len);
2179
2180 skb = chan->ops->alloc_skb(chan, hlen, count,
2181 msg->msg_flags & MSG_DONTWAIT);
2182 if (IS_ERR(skb))
2183 return skb;
2184
2185 /* Create L2CAP header */
2186 lh = skb_put(skb, L2CAP_HDR_SIZE);
2187 lh->cid = cpu_to_le16(chan->dcid);
2188 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2189 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2190
2191 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2192 if (unlikely(err < 0)) {
2193 kfree_skb(skb);
2194 return ERR_PTR(err);
2195 }
2196 return skb;
2197}
2198
2199static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2200 struct msghdr *msg, size_t len)
2201{
2202 struct l2cap_conn *conn = chan->conn;
2203 struct sk_buff *skb;
2204 int err, count;
2205 struct l2cap_hdr *lh;
2206
2207 BT_DBG("chan %p len %zu", chan, len);
2208
2209 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2210
2211 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2212 msg->msg_flags & MSG_DONTWAIT);
2213 if (IS_ERR(skb))
2214 return skb;
2215
2216 /* Create L2CAP header */
2217 lh = skb_put(skb, L2CAP_HDR_SIZE);
2218 lh->cid = cpu_to_le16(chan->dcid);
2219 lh->len = cpu_to_le16(len);
2220
2221 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2222 if (unlikely(err < 0)) {
2223 kfree_skb(skb);
2224 return ERR_PTR(err);
2225 }
2226 return skb;
2227}
2228
2229static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2230 struct msghdr *msg, size_t len,
2231 u16 sdulen)
2232{
2233 struct l2cap_conn *conn = chan->conn;
2234 struct sk_buff *skb;
2235 int err, count, hlen;
2236 struct l2cap_hdr *lh;
2237
2238 BT_DBG("chan %p len %zu", chan, len);
2239
2240 if (!conn)
2241 return ERR_PTR(-ENOTCONN);
2242
2243 hlen = __ertm_hdr_size(chan);
2244
2245 if (sdulen)
2246 hlen += L2CAP_SDULEN_SIZE;
2247
2248 if (chan->fcs == L2CAP_FCS_CRC16)
2249 hlen += L2CAP_FCS_SIZE;
2250
2251 count = min_t(unsigned int, (conn->mtu - hlen), len);
2252
2253 skb = chan->ops->alloc_skb(chan, hlen, count,
2254 msg->msg_flags & MSG_DONTWAIT);
2255 if (IS_ERR(skb))
2256 return skb;
2257
2258 /* Create L2CAP header */
2259 lh = skb_put(skb, L2CAP_HDR_SIZE);
2260 lh->cid = cpu_to_le16(chan->dcid);
2261 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2262
2263 /* Control header is populated later */
2264 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2265 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2266 else
2267 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2268
2269 if (sdulen)
2270 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2271
2272 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2273 if (unlikely(err < 0)) {
2274 kfree_skb(skb);
2275 return ERR_PTR(err);
2276 }
2277
2278 bt_cb(skb)->l2cap.fcs = chan->fcs;
2279 bt_cb(skb)->l2cap.retries = 0;
2280 return skb;
2281}
2282
2283static int l2cap_segment_sdu(struct l2cap_chan *chan,
2284 struct sk_buff_head *seg_queue,
2285 struct msghdr *msg, size_t len)
2286{
2287 struct sk_buff *skb;
2288 u16 sdu_len;
2289 size_t pdu_len;
2290 u8 sar;
2291
2292 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2293
2294 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2295 * so fragmented skbs are not used. The HCI layer's handling
2296 * of fragmented skbs is not compatible with ERTM's queueing.
2297 */
2298
2299 /* PDU size is derived from the HCI MTU */
2300 pdu_len = chan->conn->mtu;
2301
2302 /* Constrain PDU size for BR/EDR connections */
2303 if (!chan->hs_hcon)
2304 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2305
2306 /* Adjust for largest possible L2CAP overhead. */
2307 if (chan->fcs)
2308 pdu_len -= L2CAP_FCS_SIZE;
2309
2310 pdu_len -= __ertm_hdr_size(chan);
2311
2312 /* Remote device may have requested smaller PDUs */
2313 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2314
2315 if (len <= pdu_len) {
2316 sar = L2CAP_SAR_UNSEGMENTED;
2317 sdu_len = 0;
2318 pdu_len = len;
2319 } else {
2320 sar = L2CAP_SAR_START;
2321 sdu_len = len;
2322 }
2323
2324 while (len > 0) {
2325 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2326
2327 if (IS_ERR(skb)) {
2328 __skb_queue_purge(seg_queue);
2329 return PTR_ERR(skb);
2330 }
2331
2332 bt_cb(skb)->l2cap.sar = sar;
2333 __skb_queue_tail(seg_queue, skb);
2334
2335 len -= pdu_len;
2336 if (sdu_len)
2337 sdu_len = 0;
2338
2339 if (len <= pdu_len) {
2340 sar = L2CAP_SAR_END;
2341 pdu_len = len;
2342 } else {
2343 sar = L2CAP_SAR_CONTINUE;
2344 }
2345 }
2346
2347 return 0;
2348}
2349
2350static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2351 struct msghdr *msg,
2352 size_t len, u16 sdulen)
2353{
2354 struct l2cap_conn *conn = chan->conn;
2355 struct sk_buff *skb;
2356 int err, count, hlen;
2357 struct l2cap_hdr *lh;
2358
2359 BT_DBG("chan %p len %zu", chan, len);
2360
2361 if (!conn)
2362 return ERR_PTR(-ENOTCONN);
2363
2364 hlen = L2CAP_HDR_SIZE;
2365
2366 if (sdulen)
2367 hlen += L2CAP_SDULEN_SIZE;
2368
2369 count = min_t(unsigned int, (conn->mtu - hlen), len);
2370
2371 skb = chan->ops->alloc_skb(chan, hlen, count,
2372 msg->msg_flags & MSG_DONTWAIT);
2373 if (IS_ERR(skb))
2374 return skb;
2375
2376 /* Create L2CAP header */
2377 lh = skb_put(skb, L2CAP_HDR_SIZE);
2378 lh->cid = cpu_to_le16(chan->dcid);
2379 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2380
2381 if (sdulen)
2382 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2383
2384 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2385 if (unlikely(err < 0)) {
2386 kfree_skb(skb);
2387 return ERR_PTR(err);
2388 }
2389
2390 return skb;
2391}
2392
2393static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2394 struct sk_buff_head *seg_queue,
2395 struct msghdr *msg, size_t len)
2396{
2397 struct sk_buff *skb;
2398 size_t pdu_len;
2399 u16 sdu_len;
2400
2401 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2402
2403 sdu_len = len;
2404 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2405
2406 while (len > 0) {
2407 if (len <= pdu_len)
2408 pdu_len = len;
2409
2410 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2411 if (IS_ERR(skb)) {
2412 __skb_queue_purge(seg_queue);
2413 return PTR_ERR(skb);
2414 }
2415
2416 __skb_queue_tail(seg_queue, skb);
2417
2418 len -= pdu_len;
2419
2420 if (sdu_len) {
2421 sdu_len = 0;
2422 pdu_len += L2CAP_SDULEN_SIZE;
2423 }
2424 }
2425
2426 return 0;
2427}
2428
2429static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2430{
2431 int sent = 0;
2432
2433 BT_DBG("chan %p", chan);
2434
2435 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2436 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2437 chan->tx_credits--;
2438 sent++;
2439 }
2440
2441 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2442 skb_queue_len(&chan->tx_q));
2443}
2444
2445int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2446{
2447 struct sk_buff *skb;
2448 int err;
2449 struct sk_buff_head seg_queue;
2450
2451 if (!chan->conn)
2452 return -ENOTCONN;
2453
2454 /* Connectionless channel */
2455 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2456 skb = l2cap_create_connless_pdu(chan, msg, len);
2457 if (IS_ERR(skb))
2458 return PTR_ERR(skb);
2459
2460 /* Channel lock is released before requesting new skb and then
2461 * reacquired thus we need to recheck channel state.
2462 */
2463 if (chan->state != BT_CONNECTED) {
2464 kfree_skb(skb);
2465 return -ENOTCONN;
2466 }
2467
2468 l2cap_do_send(chan, skb);
2469 return len;
2470 }
2471
2472 switch (chan->mode) {
2473 case L2CAP_MODE_LE_FLOWCTL:
2474 /* Check outgoing MTU */
2475 if (len > chan->omtu)
2476 return -EMSGSIZE;
2477
2478 __skb_queue_head_init(&seg_queue);
2479
2480 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2481
2482 if (chan->state != BT_CONNECTED) {
2483 __skb_queue_purge(&seg_queue);
2484 err = -ENOTCONN;
2485 }
2486
2487 if (err)
2488 return err;
2489
2490 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2491
2492 l2cap_le_flowctl_send(chan);
2493
2494 if (!chan->tx_credits)
2495 chan->ops->suspend(chan);
2496
2497 err = len;
2498
2499 break;
2500
2501 case L2CAP_MODE_BASIC:
2502 /* Check outgoing MTU */
2503 if (len > chan->omtu)
2504 return -EMSGSIZE;
2505
2506 /* Create a basic PDU */
2507 skb = l2cap_create_basic_pdu(chan, msg, len);
2508 if (IS_ERR(skb))
2509 return PTR_ERR(skb);
2510
2511 /* Channel lock is released before requesting new skb and then
2512 * reacquired thus we need to recheck channel state.
2513 */
2514 if (chan->state != BT_CONNECTED) {
2515 kfree_skb(skb);
2516 return -ENOTCONN;
2517 }
2518
2519 l2cap_do_send(chan, skb);
2520 err = len;
2521 break;
2522
2523 case L2CAP_MODE_ERTM:
2524 case L2CAP_MODE_STREAMING:
2525 /* Check outgoing MTU */
2526 if (len > chan->omtu) {
2527 err = -EMSGSIZE;
2528 break;
2529 }
2530
2531 __skb_queue_head_init(&seg_queue);
2532
2533 /* Do segmentation before calling in to the state machine,
2534 * since it's possible to block while waiting for memory
2535 * allocation.
2536 */
2537 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2538
2539 /* The channel could have been closed while segmenting,
2540 * check that it is still connected.
2541 */
2542 if (chan->state != BT_CONNECTED) {
2543 __skb_queue_purge(&seg_queue);
2544 err = -ENOTCONN;
2545 }
2546
2547 if (err)
2548 break;
2549
2550 if (chan->mode == L2CAP_MODE_ERTM)
2551 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2552 else
2553 l2cap_streaming_send(chan, &seg_queue);
2554
2555 err = len;
2556
2557 /* If the skbs were not queued for sending, they'll still be in
2558 * seg_queue and need to be purged.
2559 */
2560 __skb_queue_purge(&seg_queue);
2561 break;
2562
2563 default:
2564 BT_DBG("bad state %1.1x", chan->mode);
2565 err = -EBADFD;
2566 }
2567
2568 return err;
2569}
2570EXPORT_SYMBOL_GPL(l2cap_chan_send);
2571
2572static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2573{
2574 struct l2cap_ctrl control;
2575 u16 seq;
2576
2577 BT_DBG("chan %p, txseq %u", chan, txseq);
2578
2579 memset(&control, 0, sizeof(control));
2580 control.sframe = 1;
2581 control.super = L2CAP_SUPER_SREJ;
2582
2583 for (seq = chan->expected_tx_seq; seq != txseq;
2584 seq = __next_seq(chan, seq)) {
2585 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2586 control.reqseq = seq;
2587 l2cap_send_sframe(chan, &control);
2588 l2cap_seq_list_append(&chan->srej_list, seq);
2589 }
2590 }
2591
2592 chan->expected_tx_seq = __next_seq(chan, txseq);
2593}
2594
2595static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2596{
2597 struct l2cap_ctrl control;
2598
2599 BT_DBG("chan %p", chan);
2600
2601 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2602 return;
2603
2604 memset(&control, 0, sizeof(control));
2605 control.sframe = 1;
2606 control.super = L2CAP_SUPER_SREJ;
2607 control.reqseq = chan->srej_list.tail;
2608 l2cap_send_sframe(chan, &control);
2609}
2610
2611static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2612{
2613 struct l2cap_ctrl control;
2614 u16 initial_head;
2615 u16 seq;
2616
2617 BT_DBG("chan %p, txseq %u", chan, txseq);
2618
2619 memset(&control, 0, sizeof(control));
2620 control.sframe = 1;
2621 control.super = L2CAP_SUPER_SREJ;
2622
2623 /* Capture initial list head to allow only one pass through the list. */
2624 initial_head = chan->srej_list.head;
2625
2626 do {
2627 seq = l2cap_seq_list_pop(&chan->srej_list);
2628 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2629 break;
2630
2631 control.reqseq = seq;
2632 l2cap_send_sframe(chan, &control);
2633 l2cap_seq_list_append(&chan->srej_list, seq);
2634 } while (chan->srej_list.head != initial_head);
2635}
2636
2637static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2638{
2639 struct sk_buff *acked_skb;
2640 u16 ackseq;
2641
2642 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2643
2644 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2645 return;
2646
2647 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2648 chan->expected_ack_seq, chan->unacked_frames);
2649
2650 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2651 ackseq = __next_seq(chan, ackseq)) {
2652
2653 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2654 if (acked_skb) {
2655 skb_unlink(acked_skb, &chan->tx_q);
2656 kfree_skb(acked_skb);
2657 chan->unacked_frames--;
2658 }
2659 }
2660
2661 chan->expected_ack_seq = reqseq;
2662
2663 if (chan->unacked_frames == 0)
2664 __clear_retrans_timer(chan);
2665
2666 BT_DBG("unacked_frames %u", chan->unacked_frames);
2667}
2668
2669static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2670{
2671 BT_DBG("chan %p", chan);
2672
2673 chan->expected_tx_seq = chan->buffer_seq;
2674 l2cap_seq_list_clear(&chan->srej_list);
2675 skb_queue_purge(&chan->srej_q);
2676 chan->rx_state = L2CAP_RX_STATE_RECV;
2677}
2678
2679static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2680 struct l2cap_ctrl *control,
2681 struct sk_buff_head *skbs, u8 event)
2682{
2683 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2684 event);
2685
2686 switch (event) {
2687 case L2CAP_EV_DATA_REQUEST:
2688 if (chan->tx_send_head == NULL)
2689 chan->tx_send_head = skb_peek(skbs);
2690
2691 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2692 l2cap_ertm_send(chan);
2693 break;
2694 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2695 BT_DBG("Enter LOCAL_BUSY");
2696 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2697
2698 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2699 /* The SREJ_SENT state must be aborted if we are to
2700 * enter the LOCAL_BUSY state.
2701 */
2702 l2cap_abort_rx_srej_sent(chan);
2703 }
2704
2705 l2cap_send_ack(chan);
2706
2707 break;
2708 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2709 BT_DBG("Exit LOCAL_BUSY");
2710 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2711
2712 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2713 struct l2cap_ctrl local_control;
2714
2715 memset(&local_control, 0, sizeof(local_control));
2716 local_control.sframe = 1;
2717 local_control.super = L2CAP_SUPER_RR;
2718 local_control.poll = 1;
2719 local_control.reqseq = chan->buffer_seq;
2720 l2cap_send_sframe(chan, &local_control);
2721
2722 chan->retry_count = 1;
2723 __set_monitor_timer(chan);
2724 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2725 }
2726 break;
2727 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2728 l2cap_process_reqseq(chan, control->reqseq);
2729 break;
2730 case L2CAP_EV_EXPLICIT_POLL:
2731 l2cap_send_rr_or_rnr(chan, 1);
2732 chan->retry_count = 1;
2733 __set_monitor_timer(chan);
2734 __clear_ack_timer(chan);
2735 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2736 break;
2737 case L2CAP_EV_RETRANS_TO:
2738 l2cap_send_rr_or_rnr(chan, 1);
2739 chan->retry_count = 1;
2740 __set_monitor_timer(chan);
2741 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2742 break;
2743 case L2CAP_EV_RECV_FBIT:
2744 /* Nothing to process */
2745 break;
2746 default:
2747 break;
2748 }
2749}
2750
2751static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2752 struct l2cap_ctrl *control,
2753 struct sk_buff_head *skbs, u8 event)
2754{
2755 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2756 event);
2757
2758 switch (event) {
2759 case L2CAP_EV_DATA_REQUEST:
2760 if (chan->tx_send_head == NULL)
2761 chan->tx_send_head = skb_peek(skbs);
2762 /* Queue data, but don't send. */
2763 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2764 break;
2765 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2766 BT_DBG("Enter LOCAL_BUSY");
2767 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2768
2769 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2770 /* The SREJ_SENT state must be aborted if we are to
2771 * enter the LOCAL_BUSY state.
2772 */
2773 l2cap_abort_rx_srej_sent(chan);
2774 }
2775
2776 l2cap_send_ack(chan);
2777
2778 break;
2779 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2780 BT_DBG("Exit LOCAL_BUSY");
2781 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2782
2783 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2784 struct l2cap_ctrl local_control;
2785 memset(&local_control, 0, sizeof(local_control));
2786 local_control.sframe = 1;
2787 local_control.super = L2CAP_SUPER_RR;
2788 local_control.poll = 1;
2789 local_control.reqseq = chan->buffer_seq;
2790 l2cap_send_sframe(chan, &local_control);
2791
2792 chan->retry_count = 1;
2793 __set_monitor_timer(chan);
2794 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2795 }
2796 break;
2797 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2798 l2cap_process_reqseq(chan, control->reqseq);
2799
2800 /* Fall through */
2801
2802 case L2CAP_EV_RECV_FBIT:
2803 if (control && control->final) {
2804 __clear_monitor_timer(chan);
2805 if (chan->unacked_frames > 0)
2806 __set_retrans_timer(chan);
2807 chan->retry_count = 0;
2808 chan->tx_state = L2CAP_TX_STATE_XMIT;
2809 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2810 }
2811 break;
2812 case L2CAP_EV_EXPLICIT_POLL:
2813 /* Ignore */
2814 break;
2815 case L2CAP_EV_MONITOR_TO:
2816 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2817 l2cap_send_rr_or_rnr(chan, 1);
2818 __set_monitor_timer(chan);
2819 chan->retry_count++;
2820 } else {
2821 l2cap_send_disconn_req(chan, ECONNABORTED);
2822 }
2823 break;
2824 default:
2825 break;
2826 }
2827}
2828
2829static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2830 struct sk_buff_head *skbs, u8 event)
2831{
2832 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2833 chan, control, skbs, event, chan->tx_state);
2834
2835 switch (chan->tx_state) {
2836 case L2CAP_TX_STATE_XMIT:
2837 l2cap_tx_state_xmit(chan, control, skbs, event);
2838 break;
2839 case L2CAP_TX_STATE_WAIT_F:
2840 l2cap_tx_state_wait_f(chan, control, skbs, event);
2841 break;
2842 default:
2843 /* Ignore event */
2844 break;
2845 }
2846}
2847
2848static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2849 struct l2cap_ctrl *control)
2850{
2851 BT_DBG("chan %p, control %p", chan, control);
2852 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2853}
2854
2855static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2856 struct l2cap_ctrl *control)
2857{
2858 BT_DBG("chan %p, control %p", chan, control);
2859 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2860}
2861
2862/* Copy frame to all raw sockets on that connection */
2863static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2864{
2865 struct sk_buff *nskb;
2866 struct l2cap_chan *chan;
2867
2868 BT_DBG("conn %p", conn);
2869
2870 mutex_lock(&conn->chan_lock);
2871
2872 list_for_each_entry(chan, &conn->chan_l, list) {
2873 if (chan->chan_type != L2CAP_CHAN_RAW)
2874 continue;
2875
2876 /* Don't send frame to the channel it came from */
2877 if (bt_cb(skb)->l2cap.chan == chan)
2878 continue;
2879
2880 nskb = skb_clone(skb, GFP_KERNEL);
2881 if (!nskb)
2882 continue;
2883 if (chan->ops->recv(chan, nskb))
2884 kfree_skb(nskb);
2885 }
2886
2887 mutex_unlock(&conn->chan_lock);
2888}
2889
2890/* ---- L2CAP signalling commands ---- */
2891static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2892 u8 ident, u16 dlen, void *data)
2893{
2894 struct sk_buff *skb, **frag;
2895 struct l2cap_cmd_hdr *cmd;
2896 struct l2cap_hdr *lh;
2897 int len, count;
2898
2899 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2900 conn, code, ident, dlen);
2901
2902 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2903 return NULL;
2904
2905 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2906 count = min_t(unsigned int, conn->mtu, len);
2907
2908 skb = bt_skb_alloc(count, GFP_KERNEL);
2909 if (!skb)
2910 return NULL;
2911
2912 lh = skb_put(skb, L2CAP_HDR_SIZE);
2913 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2914
2915 if (conn->hcon->type == LE_LINK)
2916 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2917 else
2918 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2919
2920 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2921 cmd->code = code;
2922 cmd->ident = ident;
2923 cmd->len = cpu_to_le16(dlen);
2924
2925 if (dlen) {
2926 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2927 skb_put_data(skb, data, count);
2928 data += count;
2929 }
2930
2931 len -= skb->len;
2932
2933 /* Continuation fragments (no L2CAP header) */
2934 frag = &skb_shinfo(skb)->frag_list;
2935 while (len) {
2936 count = min_t(unsigned int, conn->mtu, len);
2937
2938 *frag = bt_skb_alloc(count, GFP_KERNEL);
2939 if (!*frag)
2940 goto fail;
2941
2942 skb_put_data(*frag, data, count);
2943
2944 len -= count;
2945 data += count;
2946
2947 frag = &(*frag)->next;
2948 }
2949
2950 return skb;
2951
2952fail:
2953 kfree_skb(skb);
2954 return NULL;
2955}
2956
2957static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2958 unsigned long *val)
2959{
2960 struct l2cap_conf_opt *opt = *ptr;
2961 int len;
2962
2963 len = L2CAP_CONF_OPT_SIZE + opt->len;
2964 *ptr += len;
2965
2966 *type = opt->type;
2967 *olen = opt->len;
2968
2969 switch (opt->len) {
2970 case 1:
2971 *val = *((u8 *) opt->val);
2972 break;
2973
2974 case 2:
2975 *val = get_unaligned_le16(opt->val);
2976 break;
2977
2978 case 4:
2979 *val = get_unaligned_le32(opt->val);
2980 break;
2981
2982 default:
2983 *val = (unsigned long) opt->val;
2984 break;
2985 }
2986
2987 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2988 return len;
2989}
2990
2991static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
2992{
2993 struct l2cap_conf_opt *opt = *ptr;
2994
2995 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2996
2997 if (size < L2CAP_CONF_OPT_SIZE + len)
2998 return;
2999
3000 opt->type = type;
3001 opt->len = len;
3002
3003 switch (len) {
3004 case 1:
3005 *((u8 *) opt->val) = val;
3006 break;
3007
3008 case 2:
3009 put_unaligned_le16(val, opt->val);
3010 break;
3011
3012 case 4:
3013 put_unaligned_le32(val, opt->val);
3014 break;
3015
3016 default:
3017 memcpy(opt->val, (void *) val, len);
3018 break;
3019 }
3020
3021 *ptr += L2CAP_CONF_OPT_SIZE + len;
3022}
3023
3024static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3025{
3026 struct l2cap_conf_efs efs;
3027
3028 switch (chan->mode) {
3029 case L2CAP_MODE_ERTM:
3030 efs.id = chan->local_id;
3031 efs.stype = chan->local_stype;
3032 efs.msdu = cpu_to_le16(chan->local_msdu);
3033 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3034 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3035 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3036 break;
3037
3038 case L2CAP_MODE_STREAMING:
3039 efs.id = 1;
3040 efs.stype = L2CAP_SERV_BESTEFFORT;
3041 efs.msdu = cpu_to_le16(chan->local_msdu);
3042 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3043 efs.acc_lat = 0;
3044 efs.flush_to = 0;
3045 break;
3046
3047 default:
3048 return;
3049 }
3050
3051 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3052 (unsigned long) &efs, size);
3053}
3054
3055static void l2cap_ack_timeout(struct work_struct *work)
3056{
3057 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3058 ack_timer.work);
3059 u16 frames_to_ack;
3060
3061 BT_DBG("chan %p", chan);
3062
3063 l2cap_chan_lock(chan);
3064
3065 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3066 chan->last_acked_seq);
3067
3068 if (frames_to_ack)
3069 l2cap_send_rr_or_rnr(chan, 0);
3070
3071 l2cap_chan_unlock(chan);
3072 l2cap_chan_put(chan);
3073}
3074
3075int l2cap_ertm_init(struct l2cap_chan *chan)
3076{
3077 int err;
3078
3079 chan->next_tx_seq = 0;
3080 chan->expected_tx_seq = 0;
3081 chan->expected_ack_seq = 0;
3082 chan->unacked_frames = 0;
3083 chan->buffer_seq = 0;
3084 chan->frames_sent = 0;
3085 chan->last_acked_seq = 0;
3086 chan->sdu = NULL;
3087 chan->sdu_last_frag = NULL;
3088 chan->sdu_len = 0;
3089
3090 skb_queue_head_init(&chan->tx_q);
3091
3092 chan->local_amp_id = AMP_ID_BREDR;
3093 chan->move_id = AMP_ID_BREDR;
3094 chan->move_state = L2CAP_MOVE_STABLE;
3095 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3096
3097 if (chan->mode != L2CAP_MODE_ERTM)
3098 return 0;
3099
3100 chan->rx_state = L2CAP_RX_STATE_RECV;
3101 chan->tx_state = L2CAP_TX_STATE_XMIT;
3102
3103 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3104 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3105 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3106
3107 skb_queue_head_init(&chan->srej_q);
3108
3109 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3110 if (err < 0)
3111 return err;
3112
3113 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3114 if (err < 0)
3115 l2cap_seq_list_free(&chan->srej_list);
3116
3117 return err;
3118}
3119
3120static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3121{
3122 switch (mode) {
3123 case L2CAP_MODE_STREAMING:
3124 case L2CAP_MODE_ERTM:
3125 if (l2cap_mode_supported(mode, remote_feat_mask))
3126 return mode;
3127 /* fall through */
3128 default:
3129 return L2CAP_MODE_BASIC;
3130 }
3131}
3132
3133static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3134{
3135 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3136 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3137}
3138
3139static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3140{
3141 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3142 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3143}
3144
3145static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3146 struct l2cap_conf_rfc *rfc)
3147{
3148 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3149 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3150
3151 /* Class 1 devices have must have ERTM timeouts
3152 * exceeding the Link Supervision Timeout. The
3153 * default Link Supervision Timeout for AMP
3154 * controllers is 10 seconds.
3155 *
3156 * Class 1 devices use 0xffffffff for their
3157 * best-effort flush timeout, so the clamping logic
3158 * will result in a timeout that meets the above
3159 * requirement. ERTM timeouts are 16-bit values, so
3160 * the maximum timeout is 65.535 seconds.
3161 */
3162
3163 /* Convert timeout to milliseconds and round */
3164 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3165
3166 /* This is the recommended formula for class 2 devices
3167 * that start ERTM timers when packets are sent to the
3168 * controller.
3169 */
3170 ertm_to = 3 * ertm_to + 500;
3171
3172 if (ertm_to > 0xffff)
3173 ertm_to = 0xffff;
3174
3175 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3176 rfc->monitor_timeout = rfc->retrans_timeout;
3177 } else {
3178 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3179 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3180 }
3181}
3182
3183static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3184{
3185 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3186 __l2cap_ews_supported(chan->conn)) {
3187 /* use extended control field */
3188 set_bit(FLAG_EXT_CTRL, &chan->flags);
3189 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3190 } else {
3191 chan->tx_win = min_t(u16, chan->tx_win,
3192 L2CAP_DEFAULT_TX_WINDOW);
3193 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3194 }
3195 chan->ack_win = chan->tx_win;
3196}
3197
3198static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3199{
3200 struct l2cap_conf_req *req = data;
3201 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3202 void *ptr = req->data;
3203 void *endptr = data + data_size;
3204 u16 size;
3205
3206 BT_DBG("chan %p", chan);
3207
3208 if (chan->num_conf_req || chan->num_conf_rsp)
3209 goto done;
3210
3211 switch (chan->mode) {
3212 case L2CAP_MODE_STREAMING:
3213 case L2CAP_MODE_ERTM:
3214 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3215 break;
3216
3217 if (__l2cap_efs_supported(chan->conn))
3218 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3219
3220 /* fall through */
3221 default:
3222 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3223 break;
3224 }
3225
3226done:
3227 if (chan->imtu != L2CAP_DEFAULT_MTU)
3228 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3229
3230 switch (chan->mode) {
3231 case L2CAP_MODE_BASIC:
3232 if (disable_ertm)
3233 break;
3234
3235 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3236 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3237 break;
3238
3239 rfc.mode = L2CAP_MODE_BASIC;
3240 rfc.txwin_size = 0;
3241 rfc.max_transmit = 0;
3242 rfc.retrans_timeout = 0;
3243 rfc.monitor_timeout = 0;
3244 rfc.max_pdu_size = 0;
3245
3246 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3247 (unsigned long) &rfc, endptr - ptr);
3248 break;
3249
3250 case L2CAP_MODE_ERTM:
3251 rfc.mode = L2CAP_MODE_ERTM;
3252 rfc.max_transmit = chan->max_tx;
3253
3254 __l2cap_set_ertm_timeouts(chan, &rfc);
3255
3256 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3257 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3258 L2CAP_FCS_SIZE);
3259 rfc.max_pdu_size = cpu_to_le16(size);
3260
3261 l2cap_txwin_setup(chan);
3262
3263 rfc.txwin_size = min_t(u16, chan->tx_win,
3264 L2CAP_DEFAULT_TX_WINDOW);
3265
3266 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3267 (unsigned long) &rfc, endptr - ptr);
3268
3269 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3270 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3271
3272 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3273 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3274 chan->tx_win, endptr - ptr);
3275
3276 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3277 if (chan->fcs == L2CAP_FCS_NONE ||
3278 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3279 chan->fcs = L2CAP_FCS_NONE;
3280 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3281 chan->fcs, endptr - ptr);
3282 }
3283 break;
3284
3285 case L2CAP_MODE_STREAMING:
3286 l2cap_txwin_setup(chan);
3287 rfc.mode = L2CAP_MODE_STREAMING;
3288 rfc.txwin_size = 0;
3289 rfc.max_transmit = 0;
3290 rfc.retrans_timeout = 0;
3291 rfc.monitor_timeout = 0;
3292
3293 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3294 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3295 L2CAP_FCS_SIZE);
3296 rfc.max_pdu_size = cpu_to_le16(size);
3297
3298 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3299 (unsigned long) &rfc, endptr - ptr);
3300
3301 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3302 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3303
3304 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3305 if (chan->fcs == L2CAP_FCS_NONE ||
3306 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3307 chan->fcs = L2CAP_FCS_NONE;
3308 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3309 chan->fcs, endptr - ptr);
3310 }
3311 break;
3312 }
3313
3314 req->dcid = cpu_to_le16(chan->dcid);
3315 req->flags = cpu_to_le16(0);
3316
3317 return ptr - data;
3318}
3319
3320static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3321{
3322 struct l2cap_conf_rsp *rsp = data;
3323 void *ptr = rsp->data;
3324 void *endptr = data + data_size;
3325 void *req = chan->conf_req;
3326 int len = chan->conf_len;
3327 int type, hint, olen;
3328 unsigned long val;
3329 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3330 struct l2cap_conf_efs efs;
3331 u8 remote_efs = 0;
3332 u16 mtu = L2CAP_DEFAULT_MTU;
3333 u16 result = L2CAP_CONF_SUCCESS;
3334 u16 size;
3335
3336 BT_DBG("chan %p", chan);
3337
3338 while (len >= L2CAP_CONF_OPT_SIZE) {
3339 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3340
3341 hint = type & L2CAP_CONF_HINT;
3342 type &= L2CAP_CONF_MASK;
3343
3344 switch (type) {
3345 case L2CAP_CONF_MTU:
3346 mtu = val;
3347 break;
3348
3349 case L2CAP_CONF_FLUSH_TO:
3350 chan->flush_to = val;
3351 break;
3352
3353 case L2CAP_CONF_QOS:
3354 break;
3355
3356 case L2CAP_CONF_RFC:
3357 if (olen == sizeof(rfc))
3358 memcpy(&rfc, (void *) val, olen);
3359 break;
3360
3361 case L2CAP_CONF_FCS:
3362 if (val == L2CAP_FCS_NONE)
3363 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3364 break;
3365
3366 case L2CAP_CONF_EFS:
3367 if (olen == sizeof(efs)) {
3368 remote_efs = 1;
3369 memcpy(&efs, (void *) val, olen);
3370 }
3371 break;
3372
3373 case L2CAP_CONF_EWS:
3374 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3375 return -ECONNREFUSED;
3376
3377 set_bit(FLAG_EXT_CTRL, &chan->flags);
3378 set_bit(CONF_EWS_RECV, &chan->conf_state);
3379 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3380 chan->remote_tx_win = val;
3381 break;
3382
3383 default:
3384 if (hint)
3385 break;
3386
3387 result = L2CAP_CONF_UNKNOWN;
3388 *((u8 *) ptr++) = type;
3389 break;
3390 }
3391 }
3392
3393 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3394 goto done;
3395
3396 switch (chan->mode) {
3397 case L2CAP_MODE_STREAMING:
3398 case L2CAP_MODE_ERTM:
3399 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3400 chan->mode = l2cap_select_mode(rfc.mode,
3401 chan->conn->feat_mask);
3402 break;
3403 }
3404
3405 if (remote_efs) {
3406 if (__l2cap_efs_supported(chan->conn))
3407 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3408 else
3409 return -ECONNREFUSED;
3410 }
3411
3412 if (chan->mode != rfc.mode)
3413 return -ECONNREFUSED;
3414
3415 break;
3416 }
3417
3418done:
3419 if (chan->mode != rfc.mode) {
3420 result = L2CAP_CONF_UNACCEPT;
3421 rfc.mode = chan->mode;
3422
3423 if (chan->num_conf_rsp == 1)
3424 return -ECONNREFUSED;
3425
3426 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3427 (unsigned long) &rfc, endptr - ptr);
3428 }
3429
3430 if (result == L2CAP_CONF_SUCCESS) {
3431 /* Configure output options and let the other side know
3432 * which ones we don't like. */
3433
3434 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3435 result = L2CAP_CONF_UNACCEPT;
3436 else {
3437 chan->omtu = mtu;
3438 set_bit(CONF_MTU_DONE, &chan->conf_state);
3439 }
3440 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3441
3442 if (remote_efs) {
3443 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3444 efs.stype != L2CAP_SERV_NOTRAFIC &&
3445 efs.stype != chan->local_stype) {
3446
3447 result = L2CAP_CONF_UNACCEPT;
3448
3449 if (chan->num_conf_req >= 1)
3450 return -ECONNREFUSED;
3451
3452 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3453 sizeof(efs),
3454 (unsigned long) &efs, endptr - ptr);
3455 } else {
3456 /* Send PENDING Conf Rsp */
3457 result = L2CAP_CONF_PENDING;
3458 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3459 }
3460 }
3461
3462 switch (rfc.mode) {
3463 case L2CAP_MODE_BASIC:
3464 chan->fcs = L2CAP_FCS_NONE;
3465 set_bit(CONF_MODE_DONE, &chan->conf_state);
3466 break;
3467
3468 case L2CAP_MODE_ERTM:
3469 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3470 chan->remote_tx_win = rfc.txwin_size;
3471 else
3472 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3473
3474 chan->remote_max_tx = rfc.max_transmit;
3475
3476 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3477 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3478 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3479 rfc.max_pdu_size = cpu_to_le16(size);
3480 chan->remote_mps = size;
3481
3482 __l2cap_set_ertm_timeouts(chan, &rfc);
3483
3484 set_bit(CONF_MODE_DONE, &chan->conf_state);
3485
3486 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3487 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3488
3489 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3490 chan->remote_id = efs.id;
3491 chan->remote_stype = efs.stype;
3492 chan->remote_msdu = le16_to_cpu(efs.msdu);
3493 chan->remote_flush_to =
3494 le32_to_cpu(efs.flush_to);
3495 chan->remote_acc_lat =
3496 le32_to_cpu(efs.acc_lat);
3497 chan->remote_sdu_itime =
3498 le32_to_cpu(efs.sdu_itime);
3499 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3500 sizeof(efs),
3501 (unsigned long) &efs, endptr - ptr);
3502 }
3503 break;
3504
3505 case L2CAP_MODE_STREAMING:
3506 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3507 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3508 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3509 rfc.max_pdu_size = cpu_to_le16(size);
3510 chan->remote_mps = size;
3511
3512 set_bit(CONF_MODE_DONE, &chan->conf_state);
3513
3514 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3515 (unsigned long) &rfc, endptr - ptr);
3516
3517 break;
3518
3519 default:
3520 result = L2CAP_CONF_UNACCEPT;
3521
3522 memset(&rfc, 0, sizeof(rfc));
3523 rfc.mode = chan->mode;
3524 }
3525
3526 if (result == L2CAP_CONF_SUCCESS)
3527 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3528 }
3529 rsp->scid = cpu_to_le16(chan->dcid);
3530 rsp->result = cpu_to_le16(result);
3531 rsp->flags = cpu_to_le16(0);
3532
3533 return ptr - data;
3534}
3535
3536static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3537 void *data, size_t size, u16 *result)
3538{
3539 struct l2cap_conf_req *req = data;
3540 void *ptr = req->data;
3541 void *endptr = data + size;
3542 int type, olen;
3543 unsigned long val;
3544 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3545 struct l2cap_conf_efs efs;
3546
3547 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3548
3549 while (len >= L2CAP_CONF_OPT_SIZE) {
3550 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3551
3552 switch (type) {
3553 case L2CAP_CONF_MTU:
3554 if (val < L2CAP_DEFAULT_MIN_MTU) {
3555 *result = L2CAP_CONF_UNACCEPT;
3556 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3557 } else
3558 chan->imtu = val;
3559 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3560 break;
3561
3562 case L2CAP_CONF_FLUSH_TO:
3563 chan->flush_to = val;
3564 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3565 2, chan->flush_to, endptr - ptr);
3566 break;
3567
3568 case L2CAP_CONF_RFC:
3569 if (olen == sizeof(rfc))
3570 memcpy(&rfc, (void *)val, olen);
3571
3572 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3573 rfc.mode != chan->mode)
3574 return -ECONNREFUSED;
3575
3576 chan->fcs = 0;
3577
3578 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3579 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3580 break;
3581
3582 case L2CAP_CONF_EWS:
3583 chan->ack_win = min_t(u16, val, chan->ack_win);
3584 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3585 chan->tx_win, endptr - ptr);
3586 break;
3587
3588 case L2CAP_CONF_EFS:
3589 if (olen == sizeof(efs)) {
3590 memcpy(&efs, (void *)val, olen);
3591
3592 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3593 efs.stype != L2CAP_SERV_NOTRAFIC &&
3594 efs.stype != chan->local_stype)
3595 return -ECONNREFUSED;
3596
3597 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3598 (unsigned long) &efs, endptr - ptr);
3599 }
3600 break;
3601
3602 case L2CAP_CONF_FCS:
3603 if (*result == L2CAP_CONF_PENDING)
3604 if (val == L2CAP_FCS_NONE)
3605 set_bit(CONF_RECV_NO_FCS,
3606 &chan->conf_state);
3607 break;
3608 }
3609 }
3610
3611 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3612 return -ECONNREFUSED;
3613
3614 chan->mode = rfc.mode;
3615
3616 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3617 switch (rfc.mode) {
3618 case L2CAP_MODE_ERTM:
3619 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3620 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3621 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3622 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3623 chan->ack_win = min_t(u16, chan->ack_win,
3624 rfc.txwin_size);
3625
3626 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3627 chan->local_msdu = le16_to_cpu(efs.msdu);
3628 chan->local_sdu_itime =
3629 le32_to_cpu(efs.sdu_itime);
3630 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3631 chan->local_flush_to =
3632 le32_to_cpu(efs.flush_to);
3633 }
3634 break;
3635
3636 case L2CAP_MODE_STREAMING:
3637 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3638 }
3639 }
3640
3641 req->dcid = cpu_to_le16(chan->dcid);
3642 req->flags = cpu_to_le16(0);
3643
3644 return ptr - data;
3645}
3646
3647static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3648 u16 result, u16 flags)
3649{
3650 struct l2cap_conf_rsp *rsp = data;
3651 void *ptr = rsp->data;
3652
3653 BT_DBG("chan %p", chan);
3654
3655 rsp->scid = cpu_to_le16(chan->dcid);
3656 rsp->result = cpu_to_le16(result);
3657 rsp->flags = cpu_to_le16(flags);
3658
3659 return ptr - data;
3660}
3661
3662void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3663{
3664 struct l2cap_le_conn_rsp rsp;
3665 struct l2cap_conn *conn = chan->conn;
3666
3667 BT_DBG("chan %p", chan);
3668
3669 rsp.dcid = cpu_to_le16(chan->scid);
3670 rsp.mtu = cpu_to_le16(chan->imtu);
3671 rsp.mps = cpu_to_le16(chan->mps);
3672 rsp.credits = cpu_to_le16(chan->rx_credits);
3673 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3674
3675 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3676 &rsp);
3677}
3678
3679void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3680{
3681 struct l2cap_conn_rsp rsp;
3682 struct l2cap_conn *conn = chan->conn;
3683 u8 buf[128];
3684 u8 rsp_code;
3685
3686 rsp.scid = cpu_to_le16(chan->dcid);
3687 rsp.dcid = cpu_to_le16(chan->scid);
3688 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3689 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3690
3691 if (chan->hs_hcon)
3692 rsp_code = L2CAP_CREATE_CHAN_RSP;
3693 else
3694 rsp_code = L2CAP_CONN_RSP;
3695
3696 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3697
3698 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3699
3700 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3701 return;
3702
3703 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3704 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3705 chan->num_conf_req++;
3706}
3707
3708static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3709{
3710 int type, olen;
3711 unsigned long val;
3712 /* Use sane default values in case a misbehaving remote device
3713 * did not send an RFC or extended window size option.
3714 */
3715 u16 txwin_ext = chan->ack_win;
3716 struct l2cap_conf_rfc rfc = {
3717 .mode = chan->mode,
3718 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3719 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3720 .max_pdu_size = cpu_to_le16(chan->imtu),
3721 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3722 };
3723
3724 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3725
3726 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3727 return;
3728
3729 while (len >= L2CAP_CONF_OPT_SIZE) {
3730 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3731
3732 switch (type) {
3733 case L2CAP_CONF_RFC:
3734 if (olen == sizeof(rfc))
3735 memcpy(&rfc, (void *)val, olen);
3736 break;
3737 case L2CAP_CONF_EWS:
3738 txwin_ext = val;
3739 break;
3740 }
3741 }
3742
3743 switch (rfc.mode) {
3744 case L2CAP_MODE_ERTM:
3745 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3746 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3747 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3748 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3749 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3750 else
3751 chan->ack_win = min_t(u16, chan->ack_win,
3752 rfc.txwin_size);
3753 break;
3754 case L2CAP_MODE_STREAMING:
3755 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3756 }
3757}
3758
3759static inline int l2cap_command_rej(struct l2cap_conn *conn,
3760 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3761 u8 *data)
3762{
3763 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3764
3765 if (cmd_len < sizeof(*rej))
3766 return -EPROTO;
3767
3768 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3769 return 0;
3770
3771 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3772 cmd->ident == conn->info_ident) {
3773 cancel_delayed_work(&conn->info_timer);
3774
3775 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3776 conn->info_ident = 0;
3777
3778 l2cap_conn_start(conn);
3779 }
3780
3781 return 0;
3782}
3783
3784static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3785 struct l2cap_cmd_hdr *cmd,
3786 u8 *data, u8 rsp_code, u8 amp_id)
3787{
3788 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3789 struct l2cap_conn_rsp rsp;
3790 struct l2cap_chan *chan = NULL, *pchan;
3791 int result, status = L2CAP_CS_NO_INFO;
3792
3793 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3794 __le16 psm = req->psm;
3795
3796 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3797
3798 /* Check if we have socket listening on psm */
3799 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3800 &conn->hcon->dst, ACL_LINK);
3801 if (!pchan) {
3802 result = L2CAP_CR_BAD_PSM;
3803 goto sendresp;
3804 }
3805
3806 mutex_lock(&conn->chan_lock);
3807 l2cap_chan_lock(pchan);
3808
3809 /* Check if the ACL is secure enough (if not SDP) */
3810 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3811 !hci_conn_check_link_mode(conn->hcon)) {
3812 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3813 result = L2CAP_CR_SEC_BLOCK;
3814 goto response;
3815 }
3816
3817 result = L2CAP_CR_NO_MEM;
3818
3819 /* Check for valid dynamic CID range (as per Erratum 3253) */
3820 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3821 result = L2CAP_CR_INVALID_SCID;
3822 goto response;
3823 }
3824
3825 /* Check if we already have channel with that dcid */
3826 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3827 result = L2CAP_CR_SCID_IN_USE;
3828 goto response;
3829 }
3830
3831 chan = pchan->ops->new_connection(pchan);
3832 if (!chan)
3833 goto response;
3834
3835 /* For certain devices (ex: HID mouse), support for authentication,
3836 * pairing and bonding is optional. For such devices, inorder to avoid
3837 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3838 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3839 */
3840 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3841
3842 bacpy(&chan->src, &conn->hcon->src);
3843 bacpy(&chan->dst, &conn->hcon->dst);
3844 chan->src_type = bdaddr_src_type(conn->hcon);
3845 chan->dst_type = bdaddr_dst_type(conn->hcon);
3846 chan->psm = psm;
3847 chan->dcid = scid;
3848 chan->local_amp_id = amp_id;
3849
3850 __l2cap_chan_add(conn, chan);
3851
3852 dcid = chan->scid;
3853
3854 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3855
3856 chan->ident = cmd->ident;
3857
3858 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3859 if (l2cap_chan_check_security(chan, false)) {
3860 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3861 l2cap_state_change(chan, BT_CONNECT2);
3862 result = L2CAP_CR_PEND;
3863 status = L2CAP_CS_AUTHOR_PEND;
3864 chan->ops->defer(chan);
3865 } else {
3866 /* Force pending result for AMP controllers.
3867 * The connection will succeed after the
3868 * physical link is up.
3869 */
3870 if (amp_id == AMP_ID_BREDR) {
3871 l2cap_state_change(chan, BT_CONFIG);
3872 result = L2CAP_CR_SUCCESS;
3873 } else {
3874 l2cap_state_change(chan, BT_CONNECT2);
3875 result = L2CAP_CR_PEND;
3876 }
3877 status = L2CAP_CS_NO_INFO;
3878 }
3879 } else {
3880 l2cap_state_change(chan, BT_CONNECT2);
3881 result = L2CAP_CR_PEND;
3882 status = L2CAP_CS_AUTHEN_PEND;
3883 }
3884 } else {
3885 l2cap_state_change(chan, BT_CONNECT2);
3886 result = L2CAP_CR_PEND;
3887 status = L2CAP_CS_NO_INFO;
3888 }
3889
3890response:
3891 l2cap_chan_unlock(pchan);
3892 mutex_unlock(&conn->chan_lock);
3893 l2cap_chan_put(pchan);
3894
3895sendresp:
3896 rsp.scid = cpu_to_le16(scid);
3897 rsp.dcid = cpu_to_le16(dcid);
3898 rsp.result = cpu_to_le16(result);
3899 rsp.status = cpu_to_le16(status);
3900 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3901
3902 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3903 struct l2cap_info_req info;
3904 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3905
3906 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3907 conn->info_ident = l2cap_get_ident(conn);
3908
3909 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3910
3911 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3912 sizeof(info), &info);
3913 }
3914
3915 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3916 result == L2CAP_CR_SUCCESS) {
3917 u8 buf[128];
3918 set_bit(CONF_REQ_SENT, &chan->conf_state);
3919 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3920 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3921 chan->num_conf_req++;
3922 }
3923
3924 return chan;
3925}
3926
3927static int l2cap_connect_req(struct l2cap_conn *conn,
3928 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3929{
3930 struct hci_dev *hdev = conn->hcon->hdev;
3931 struct hci_conn *hcon = conn->hcon;
3932
3933 if (cmd_len < sizeof(struct l2cap_conn_req))
3934 return -EPROTO;
3935
3936 hci_dev_lock(hdev);
3937 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3938 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3939 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3940 hci_dev_unlock(hdev);
3941
3942 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3943 return 0;
3944}
3945
3946static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3947 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3948 u8 *data)
3949{
3950 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3951 u16 scid, dcid, result, status;
3952 struct l2cap_chan *chan;
3953 u8 req[128];
3954 int err;
3955
3956 if (cmd_len < sizeof(*rsp))
3957 return -EPROTO;
3958
3959 scid = __le16_to_cpu(rsp->scid);
3960 dcid = __le16_to_cpu(rsp->dcid);
3961 result = __le16_to_cpu(rsp->result);
3962 status = __le16_to_cpu(rsp->status);
3963
3964 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3965 dcid, scid, result, status);
3966
3967 mutex_lock(&conn->chan_lock);
3968
3969 if (scid) {
3970 chan = __l2cap_get_chan_by_scid(conn, scid);
3971 if (!chan) {
3972 err = -EBADSLT;
3973 goto unlock;
3974 }
3975 } else {
3976 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3977 if (!chan) {
3978 err = -EBADSLT;
3979 goto unlock;
3980 }
3981 }
3982
3983 err = 0;
3984
3985 l2cap_chan_lock(chan);
3986
3987 switch (result) {
3988 case L2CAP_CR_SUCCESS:
3989 l2cap_state_change(chan, BT_CONFIG);
3990 chan->ident = 0;
3991 chan->dcid = dcid;
3992 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3993
3994 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3995 break;
3996
3997 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3998 l2cap_build_conf_req(chan, req, sizeof(req)), req);
3999 chan->num_conf_req++;
4000 break;
4001
4002 case L2CAP_CR_PEND:
4003 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4004 break;
4005
4006 default:
4007 l2cap_chan_del(chan, ECONNREFUSED);
4008 break;
4009 }
4010
4011 l2cap_chan_unlock(chan);
4012
4013unlock:
4014 mutex_unlock(&conn->chan_lock);
4015
4016 return err;
4017}
4018
4019static inline void set_default_fcs(struct l2cap_chan *chan)
4020{
4021 /* FCS is enabled only in ERTM or streaming mode, if one or both
4022 * sides request it.
4023 */
4024 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4025 chan->fcs = L2CAP_FCS_NONE;
4026 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4027 chan->fcs = L2CAP_FCS_CRC16;
4028}
4029
4030static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4031 u8 ident, u16 flags)
4032{
4033 struct l2cap_conn *conn = chan->conn;
4034
4035 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4036 flags);
4037
4038 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4039 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4040
4041 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4042 l2cap_build_conf_rsp(chan, data,
4043 L2CAP_CONF_SUCCESS, flags), data);
4044}
4045
4046static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4047 u16 scid, u16 dcid)
4048{
4049 struct l2cap_cmd_rej_cid rej;
4050
4051 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4052 rej.scid = __cpu_to_le16(scid);
4053 rej.dcid = __cpu_to_le16(dcid);
4054
4055 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4056}
4057
4058static inline int l2cap_config_req(struct l2cap_conn *conn,
4059 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4060 u8 *data)
4061{
4062 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4063 u16 dcid, flags;
4064 u8 rsp[64];
4065 struct l2cap_chan *chan;
4066 int len, err = 0;
4067
4068 if (cmd_len < sizeof(*req))
4069 return -EPROTO;
4070
4071 dcid = __le16_to_cpu(req->dcid);
4072 flags = __le16_to_cpu(req->flags);
4073
4074 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4075
4076 chan = l2cap_get_chan_by_scid(conn, dcid);
4077 if (!chan) {
4078 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4079 return 0;
4080 }
4081
4082 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4083 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4084 chan->dcid);
4085 goto unlock;
4086 }
4087
4088 /* Reject if config buffer is too small. */
4089 len = cmd_len - sizeof(*req);
4090 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4091 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4092 l2cap_build_conf_rsp(chan, rsp,
4093 L2CAP_CONF_REJECT, flags), rsp);
4094 goto unlock;
4095 }
4096
4097 /* Store config. */
4098 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4099 chan->conf_len += len;
4100
4101 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4102 /* Incomplete config. Send empty response. */
4103 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4104 l2cap_build_conf_rsp(chan, rsp,
4105 L2CAP_CONF_SUCCESS, flags), rsp);
4106 goto unlock;
4107 }
4108
4109 /* Complete config. */
4110 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4111 if (len < 0) {
4112 l2cap_send_disconn_req(chan, ECONNRESET);
4113 goto unlock;
4114 }
4115
4116 chan->ident = cmd->ident;
4117 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4118 chan->num_conf_rsp++;
4119
4120 /* Reset config buffer. */
4121 chan->conf_len = 0;
4122
4123 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4124 goto unlock;
4125
4126 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4127 set_default_fcs(chan);
4128
4129 if (chan->mode == L2CAP_MODE_ERTM ||
4130 chan->mode == L2CAP_MODE_STREAMING)
4131 err = l2cap_ertm_init(chan);
4132
4133 if (err < 0)
4134 l2cap_send_disconn_req(chan, -err);
4135 else
4136 l2cap_chan_ready(chan);
4137
4138 goto unlock;
4139 }
4140
4141 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4142 u8 buf[64];
4143 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4144 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4145 chan->num_conf_req++;
4146 }
4147
4148 /* Got Conf Rsp PENDING from remote side and assume we sent
4149 Conf Rsp PENDING in the code above */
4150 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4151 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4152
4153 /* check compatibility */
4154
4155 /* Send rsp for BR/EDR channel */
4156 if (!chan->hs_hcon)
4157 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4158 else
4159 chan->ident = cmd->ident;
4160 }
4161
4162unlock:
4163 l2cap_chan_unlock(chan);
4164 return err;
4165}
4166
4167static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4168 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4169 u8 *data)
4170{
4171 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4172 u16 scid, flags, result;
4173 struct l2cap_chan *chan;
4174 int len = cmd_len - sizeof(*rsp);
4175 int err = 0;
4176
4177 if (cmd_len < sizeof(*rsp))
4178 return -EPROTO;
4179
4180 scid = __le16_to_cpu(rsp->scid);
4181 flags = __le16_to_cpu(rsp->flags);
4182 result = __le16_to_cpu(rsp->result);
4183
4184 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4185 result, len);
4186
4187 chan = l2cap_get_chan_by_scid(conn, scid);
4188 if (!chan)
4189 return 0;
4190
4191 switch (result) {
4192 case L2CAP_CONF_SUCCESS:
4193 l2cap_conf_rfc_get(chan, rsp->data, len);
4194 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4195 break;
4196
4197 case L2CAP_CONF_PENDING:
4198 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4199
4200 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4201 char buf[64];
4202
4203 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4204 buf, sizeof(buf), &result);
4205 if (len < 0) {
4206 l2cap_send_disconn_req(chan, ECONNRESET);
4207 goto done;
4208 }
4209
4210 if (!chan->hs_hcon) {
4211 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4212 0);
4213 } else {
4214 if (l2cap_check_efs(chan)) {
4215 amp_create_logical_link(chan);
4216 chan->ident = cmd->ident;
4217 }
4218 }
4219 }
4220 goto done;
4221
4222 case L2CAP_CONF_UNACCEPT:
4223 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4224 char req[64];
4225
4226 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4227 l2cap_send_disconn_req(chan, ECONNRESET);
4228 goto done;
4229 }
4230
4231 /* throw out any old stored conf requests */
4232 result = L2CAP_CONF_SUCCESS;
4233 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4234 req, sizeof(req), &result);
4235 if (len < 0) {
4236 l2cap_send_disconn_req(chan, ECONNRESET);
4237 goto done;
4238 }
4239
4240 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4241 L2CAP_CONF_REQ, len, req);
4242 chan->num_conf_req++;
4243 if (result != L2CAP_CONF_SUCCESS)
4244 goto done;
4245 break;
4246 }
4247
4248 default:
4249 l2cap_chan_set_err(chan, ECONNRESET);
4250
4251 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4252 l2cap_send_disconn_req(chan, ECONNRESET);
4253 goto done;
4254 }
4255
4256 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4257 goto done;
4258
4259 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4260
4261 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4262 set_default_fcs(chan);
4263
4264 if (chan->mode == L2CAP_MODE_ERTM ||
4265 chan->mode == L2CAP_MODE_STREAMING)
4266 err = l2cap_ertm_init(chan);
4267
4268 if (err < 0)
4269 l2cap_send_disconn_req(chan, -err);
4270 else
4271 l2cap_chan_ready(chan);
4272 }
4273
4274done:
4275 l2cap_chan_unlock(chan);
4276 return err;
4277}
4278
4279static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4280 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4281 u8 *data)
4282{
4283 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4284 struct l2cap_disconn_rsp rsp;
4285 u16 dcid, scid;
4286 struct l2cap_chan *chan;
4287
4288 if (cmd_len != sizeof(*req))
4289 return -EPROTO;
4290
4291 scid = __le16_to_cpu(req->scid);
4292 dcid = __le16_to_cpu(req->dcid);
4293
4294 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4295
4296 mutex_lock(&conn->chan_lock);
4297
4298 chan = __l2cap_get_chan_by_scid(conn, dcid);
4299 if (!chan) {
4300 mutex_unlock(&conn->chan_lock);
4301 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4302 return 0;
4303 }
4304
4305 l2cap_chan_lock(chan);
4306
4307 rsp.dcid = cpu_to_le16(chan->scid);
4308 rsp.scid = cpu_to_le16(chan->dcid);
4309 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4310
4311 chan->ops->set_shutdown(chan);
4312
4313 l2cap_chan_hold(chan);
4314 l2cap_chan_del(chan, ECONNRESET);
4315
4316 l2cap_chan_unlock(chan);
4317
4318 chan->ops->close(chan);
4319 l2cap_chan_put(chan);
4320
4321 mutex_unlock(&conn->chan_lock);
4322
4323 return 0;
4324}
4325
4326static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4327 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4328 u8 *data)
4329{
4330 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4331 u16 dcid, scid;
4332 struct l2cap_chan *chan;
4333
4334 if (cmd_len != sizeof(*rsp))
4335 return -EPROTO;
4336
4337 scid = __le16_to_cpu(rsp->scid);
4338 dcid = __le16_to_cpu(rsp->dcid);
4339
4340 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4341
4342 mutex_lock(&conn->chan_lock);
4343
4344 chan = __l2cap_get_chan_by_scid(conn, scid);
4345 if (!chan) {
4346 mutex_unlock(&conn->chan_lock);
4347 return 0;
4348 }
4349
4350 l2cap_chan_lock(chan);
4351
4352 l2cap_chan_hold(chan);
4353 l2cap_chan_del(chan, 0);
4354
4355 l2cap_chan_unlock(chan);
4356
4357 chan->ops->close(chan);
4358 l2cap_chan_put(chan);
4359
4360 mutex_unlock(&conn->chan_lock);
4361
4362 return 0;
4363}
4364
4365static inline int l2cap_information_req(struct l2cap_conn *conn,
4366 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4367 u8 *data)
4368{
4369 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4370 u16 type;
4371
4372 if (cmd_len != sizeof(*req))
4373 return -EPROTO;
4374
4375 type = __le16_to_cpu(req->type);
4376
4377 BT_DBG("type 0x%4.4x", type);
4378
4379 if (type == L2CAP_IT_FEAT_MASK) {
4380 u8 buf[8];
4381 u32 feat_mask = l2cap_feat_mask;
4382 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4383 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4384 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4385 if (!disable_ertm)
4386 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4387 | L2CAP_FEAT_FCS;
4388 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4389 feat_mask |= L2CAP_FEAT_EXT_FLOW
4390 | L2CAP_FEAT_EXT_WINDOW;
4391
4392 put_unaligned_le32(feat_mask, rsp->data);
4393 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4394 buf);
4395 } else if (type == L2CAP_IT_FIXED_CHAN) {
4396 u8 buf[12];
4397 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4398
4399 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4400 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4401 rsp->data[0] = conn->local_fixed_chan;
4402 memset(rsp->data + 1, 0, 7);
4403 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4404 buf);
4405 } else {
4406 struct l2cap_info_rsp rsp;
4407 rsp.type = cpu_to_le16(type);
4408 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4409 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4410 &rsp);
4411 }
4412
4413 return 0;
4414}
4415
4416static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4417 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4418 u8 *data)
4419{
4420 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4421 u16 type, result;
4422
4423 if (cmd_len < sizeof(*rsp))
4424 return -EPROTO;
4425
4426 type = __le16_to_cpu(rsp->type);
4427 result = __le16_to_cpu(rsp->result);
4428
4429 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4430
4431 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4432 if (cmd->ident != conn->info_ident ||
4433 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4434 return 0;
4435
4436 cancel_delayed_work(&conn->info_timer);
4437
4438 if (result != L2CAP_IR_SUCCESS) {
4439 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4440 conn->info_ident = 0;
4441
4442 l2cap_conn_start(conn);
4443
4444 return 0;
4445 }
4446
4447 switch (type) {
4448 case L2CAP_IT_FEAT_MASK:
4449 conn->feat_mask = get_unaligned_le32(rsp->data);
4450
4451 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4452 struct l2cap_info_req req;
4453 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4454
4455 conn->info_ident = l2cap_get_ident(conn);
4456
4457 l2cap_send_cmd(conn, conn->info_ident,
4458 L2CAP_INFO_REQ, sizeof(req), &req);
4459 } else {
4460 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4461 conn->info_ident = 0;
4462
4463 l2cap_conn_start(conn);
4464 }
4465 break;
4466
4467 case L2CAP_IT_FIXED_CHAN:
4468 conn->remote_fixed_chan = rsp->data[0];
4469 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4470 conn->info_ident = 0;
4471
4472 l2cap_conn_start(conn);
4473 break;
4474 }
4475
4476 return 0;
4477}
4478
4479static int l2cap_create_channel_req(struct l2cap_conn *conn,
4480 struct l2cap_cmd_hdr *cmd,
4481 u16 cmd_len, void *data)
4482{
4483 struct l2cap_create_chan_req *req = data;
4484 struct l2cap_create_chan_rsp rsp;
4485 struct l2cap_chan *chan;
4486 struct hci_dev *hdev;
4487 u16 psm, scid;
4488
4489 if (cmd_len != sizeof(*req))
4490 return -EPROTO;
4491
4492 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4493 return -EINVAL;
4494
4495 psm = le16_to_cpu(req->psm);
4496 scid = le16_to_cpu(req->scid);
4497
4498 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4499
4500 /* For controller id 0 make BR/EDR connection */
4501 if (req->amp_id == AMP_ID_BREDR) {
4502 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4503 req->amp_id);
4504 return 0;
4505 }
4506
4507 /* Validate AMP controller id */
4508 hdev = hci_dev_get(req->amp_id);
4509 if (!hdev)
4510 goto error;
4511
4512 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4513 hci_dev_put(hdev);
4514 goto error;
4515 }
4516
4517 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4518 req->amp_id);
4519 if (chan) {
4520 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4521 struct hci_conn *hs_hcon;
4522
4523 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4524 &conn->hcon->dst);
4525 if (!hs_hcon) {
4526 hci_dev_put(hdev);
4527 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4528 chan->dcid);
4529 return 0;
4530 }
4531
4532 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4533
4534 mgr->bredr_chan = chan;
4535 chan->hs_hcon = hs_hcon;
4536 chan->fcs = L2CAP_FCS_NONE;
4537 conn->mtu = hdev->block_mtu;
4538 }
4539
4540 hci_dev_put(hdev);
4541
4542 return 0;
4543
4544error:
4545 rsp.dcid = 0;
4546 rsp.scid = cpu_to_le16(scid);
4547 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4548 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4549
4550 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4551 sizeof(rsp), &rsp);
4552
4553 return 0;
4554}
4555
4556static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4557{
4558 struct l2cap_move_chan_req req;
4559 u8 ident;
4560
4561 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4562
4563 ident = l2cap_get_ident(chan->conn);
4564 chan->ident = ident;
4565
4566 req.icid = cpu_to_le16(chan->scid);
4567 req.dest_amp_id = dest_amp_id;
4568
4569 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4570 &req);
4571
4572 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4573}
4574
4575static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4576{
4577 struct l2cap_move_chan_rsp rsp;
4578
4579 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4580
4581 rsp.icid = cpu_to_le16(chan->dcid);
4582 rsp.result = cpu_to_le16(result);
4583
4584 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4585 sizeof(rsp), &rsp);
4586}
4587
4588static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4589{
4590 struct l2cap_move_chan_cfm cfm;
4591
4592 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4593
4594 chan->ident = l2cap_get_ident(chan->conn);
4595
4596 cfm.icid = cpu_to_le16(chan->scid);
4597 cfm.result = cpu_to_le16(result);
4598
4599 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4600 sizeof(cfm), &cfm);
4601
4602 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4603}
4604
4605static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4606{
4607 struct l2cap_move_chan_cfm cfm;
4608
4609 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4610
4611 cfm.icid = cpu_to_le16(icid);
4612 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4613
4614 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4615 sizeof(cfm), &cfm);
4616}
4617
4618static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4619 u16 icid)
4620{
4621 struct l2cap_move_chan_cfm_rsp rsp;
4622
4623 BT_DBG("icid 0x%4.4x", icid);
4624
4625 rsp.icid = cpu_to_le16(icid);
4626 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4627}
4628
4629static void __release_logical_link(struct l2cap_chan *chan)
4630{
4631 chan->hs_hchan = NULL;
4632 chan->hs_hcon = NULL;
4633
4634 /* Placeholder - release the logical link */
4635}
4636
4637static void l2cap_logical_fail(struct l2cap_chan *chan)
4638{
4639 /* Logical link setup failed */
4640 if (chan->state != BT_CONNECTED) {
4641 /* Create channel failure, disconnect */
4642 l2cap_send_disconn_req(chan, ECONNRESET);
4643 return;
4644 }
4645
4646 switch (chan->move_role) {
4647 case L2CAP_MOVE_ROLE_RESPONDER:
4648 l2cap_move_done(chan);
4649 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4650 break;
4651 case L2CAP_MOVE_ROLE_INITIATOR:
4652 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4653 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4654 /* Remote has only sent pending or
4655 * success responses, clean up
4656 */
4657 l2cap_move_done(chan);
4658 }
4659
4660 /* Other amp move states imply that the move
4661 * has already aborted
4662 */
4663 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4664 break;
4665 }
4666}
4667
4668static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4669 struct hci_chan *hchan)
4670{
4671 struct l2cap_conf_rsp rsp;
4672
4673 chan->hs_hchan = hchan;
4674 chan->hs_hcon->l2cap_data = chan->conn;
4675
4676 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4677
4678 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4679 int err;
4680
4681 set_default_fcs(chan);
4682
4683 err = l2cap_ertm_init(chan);
4684 if (err < 0)
4685 l2cap_send_disconn_req(chan, -err);
4686 else
4687 l2cap_chan_ready(chan);
4688 }
4689}
4690
4691static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4692 struct hci_chan *hchan)
4693{
4694 chan->hs_hcon = hchan->conn;
4695 chan->hs_hcon->l2cap_data = chan->conn;
4696
4697 BT_DBG("move_state %d", chan->move_state);
4698
4699 switch (chan->move_state) {
4700 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4701 /* Move confirm will be sent after a success
4702 * response is received
4703 */
4704 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4705 break;
4706 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4707 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4708 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4709 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4710 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4711 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4712 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4713 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4714 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4715 }
4716 break;
4717 default:
4718 /* Move was not in expected state, free the channel */
4719 __release_logical_link(chan);
4720
4721 chan->move_state = L2CAP_MOVE_STABLE;
4722 }
4723}
4724
4725/* Call with chan locked */
4726void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4727 u8 status)
4728{
4729 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4730
4731 if (status) {
4732 l2cap_logical_fail(chan);
4733 __release_logical_link(chan);
4734 return;
4735 }
4736
4737 if (chan->state != BT_CONNECTED) {
4738 /* Ignore logical link if channel is on BR/EDR */
4739 if (chan->local_amp_id != AMP_ID_BREDR)
4740 l2cap_logical_finish_create(chan, hchan);
4741 } else {
4742 l2cap_logical_finish_move(chan, hchan);
4743 }
4744}
4745
4746void l2cap_move_start(struct l2cap_chan *chan)
4747{
4748 BT_DBG("chan %p", chan);
4749
4750 if (chan->local_amp_id == AMP_ID_BREDR) {
4751 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4752 return;
4753 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4754 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4755 /* Placeholder - start physical link setup */
4756 } else {
4757 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4758 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4759 chan->move_id = 0;
4760 l2cap_move_setup(chan);
4761 l2cap_send_move_chan_req(chan, 0);
4762 }
4763}
4764
4765static void l2cap_do_create(struct l2cap_chan *chan, int result,
4766 u8 local_amp_id, u8 remote_amp_id)
4767{
4768 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4769 local_amp_id, remote_amp_id);
4770
4771 chan->fcs = L2CAP_FCS_NONE;
4772
4773 /* Outgoing channel on AMP */
4774 if (chan->state == BT_CONNECT) {
4775 if (result == L2CAP_CR_SUCCESS) {
4776 chan->local_amp_id = local_amp_id;
4777 l2cap_send_create_chan_req(chan, remote_amp_id);
4778 } else {
4779 /* Revert to BR/EDR connect */
4780 l2cap_send_conn_req(chan);
4781 }
4782
4783 return;
4784 }
4785
4786 /* Incoming channel on AMP */
4787 if (__l2cap_no_conn_pending(chan)) {
4788 struct l2cap_conn_rsp rsp;
4789 char buf[128];
4790 rsp.scid = cpu_to_le16(chan->dcid);
4791 rsp.dcid = cpu_to_le16(chan->scid);
4792
4793 if (result == L2CAP_CR_SUCCESS) {
4794 /* Send successful response */
4795 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4796 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4797 } else {
4798 /* Send negative response */
4799 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4800 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4801 }
4802
4803 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4804 sizeof(rsp), &rsp);
4805
4806 if (result == L2CAP_CR_SUCCESS) {
4807 l2cap_state_change(chan, BT_CONFIG);
4808 set_bit(CONF_REQ_SENT, &chan->conf_state);
4809 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4810 L2CAP_CONF_REQ,
4811 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4812 chan->num_conf_req++;
4813 }
4814 }
4815}
4816
4817static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4818 u8 remote_amp_id)
4819{
4820 l2cap_move_setup(chan);
4821 chan->move_id = local_amp_id;
4822 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4823
4824 l2cap_send_move_chan_req(chan, remote_amp_id);
4825}
4826
4827static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4828{
4829 struct hci_chan *hchan = NULL;
4830
4831 /* Placeholder - get hci_chan for logical link */
4832
4833 if (hchan) {
4834 if (hchan->state == BT_CONNECTED) {
4835 /* Logical link is ready to go */
4836 chan->hs_hcon = hchan->conn;
4837 chan->hs_hcon->l2cap_data = chan->conn;
4838 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4839 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4840
4841 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4842 } else {
4843 /* Wait for logical link to be ready */
4844 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4845 }
4846 } else {
4847 /* Logical link not available */
4848 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4849 }
4850}
4851
4852static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4853{
4854 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4855 u8 rsp_result;
4856 if (result == -EINVAL)
4857 rsp_result = L2CAP_MR_BAD_ID;
4858 else
4859 rsp_result = L2CAP_MR_NOT_ALLOWED;
4860
4861 l2cap_send_move_chan_rsp(chan, rsp_result);
4862 }
4863
4864 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4865 chan->move_state = L2CAP_MOVE_STABLE;
4866
4867 /* Restart data transmission */
4868 l2cap_ertm_send(chan);
4869}
4870
4871/* Invoke with locked chan */
4872void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4873{
4874 u8 local_amp_id = chan->local_amp_id;
4875 u8 remote_amp_id = chan->remote_amp_id;
4876
4877 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4878 chan, result, local_amp_id, remote_amp_id);
4879
4880 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4881 l2cap_chan_unlock(chan);
4882 return;
4883 }
4884
4885 if (chan->state != BT_CONNECTED) {
4886 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4887 } else if (result != L2CAP_MR_SUCCESS) {
4888 l2cap_do_move_cancel(chan, result);
4889 } else {
4890 switch (chan->move_role) {
4891 case L2CAP_MOVE_ROLE_INITIATOR:
4892 l2cap_do_move_initiate(chan, local_amp_id,
4893 remote_amp_id);
4894 break;
4895 case L2CAP_MOVE_ROLE_RESPONDER:
4896 l2cap_do_move_respond(chan, result);
4897 break;
4898 default:
4899 l2cap_do_move_cancel(chan, result);
4900 break;
4901 }
4902 }
4903}
4904
4905static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4906 struct l2cap_cmd_hdr *cmd,
4907 u16 cmd_len, void *data)
4908{
4909 struct l2cap_move_chan_req *req = data;
4910 struct l2cap_move_chan_rsp rsp;
4911 struct l2cap_chan *chan;
4912 u16 icid = 0;
4913 u16 result = L2CAP_MR_NOT_ALLOWED;
4914
4915 if (cmd_len != sizeof(*req))
4916 return -EPROTO;
4917
4918 icid = le16_to_cpu(req->icid);
4919
4920 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4921
4922 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4923 return -EINVAL;
4924
4925 chan = l2cap_get_chan_by_dcid(conn, icid);
4926 if (!chan) {
4927 rsp.icid = cpu_to_le16(icid);
4928 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4929 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4930 sizeof(rsp), &rsp);
4931 return 0;
4932 }
4933
4934 chan->ident = cmd->ident;
4935
4936 if (chan->scid < L2CAP_CID_DYN_START ||
4937 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4938 (chan->mode != L2CAP_MODE_ERTM &&
4939 chan->mode != L2CAP_MODE_STREAMING)) {
4940 result = L2CAP_MR_NOT_ALLOWED;
4941 goto send_move_response;
4942 }
4943
4944 if (chan->local_amp_id == req->dest_amp_id) {
4945 result = L2CAP_MR_SAME_ID;
4946 goto send_move_response;
4947 }
4948
4949 if (req->dest_amp_id != AMP_ID_BREDR) {
4950 struct hci_dev *hdev;
4951 hdev = hci_dev_get(req->dest_amp_id);
4952 if (!hdev || hdev->dev_type != HCI_AMP ||
4953 !test_bit(HCI_UP, &hdev->flags)) {
4954 if (hdev)
4955 hci_dev_put(hdev);
4956
4957 result = L2CAP_MR_BAD_ID;
4958 goto send_move_response;
4959 }
4960 hci_dev_put(hdev);
4961 }
4962
4963 /* Detect a move collision. Only send a collision response
4964 * if this side has "lost", otherwise proceed with the move.
4965 * The winner has the larger bd_addr.
4966 */
4967 if ((__chan_is_moving(chan) ||
4968 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4969 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4970 result = L2CAP_MR_COLLISION;
4971 goto send_move_response;
4972 }
4973
4974 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4975 l2cap_move_setup(chan);
4976 chan->move_id = req->dest_amp_id;
4977 icid = chan->dcid;
4978
4979 if (req->dest_amp_id == AMP_ID_BREDR) {
4980 /* Moving to BR/EDR */
4981 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4982 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4983 result = L2CAP_MR_PEND;
4984 } else {
4985 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4986 result = L2CAP_MR_SUCCESS;
4987 }
4988 } else {
4989 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4990 /* Placeholder - uncomment when amp functions are available */
4991 /*amp_accept_physical(chan, req->dest_amp_id);*/
4992 result = L2CAP_MR_PEND;
4993 }
4994
4995send_move_response:
4996 l2cap_send_move_chan_rsp(chan, result);
4997
4998 l2cap_chan_unlock(chan);
4999
5000 return 0;
5001}
5002
5003static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5004{
5005 struct l2cap_chan *chan;
5006 struct hci_chan *hchan = NULL;
5007
5008 chan = l2cap_get_chan_by_scid(conn, icid);
5009 if (!chan) {
5010 l2cap_send_move_chan_cfm_icid(conn, icid);
5011 return;
5012 }
5013
5014 __clear_chan_timer(chan);
5015 if (result == L2CAP_MR_PEND)
5016 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5017
5018 switch (chan->move_state) {
5019 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5020 /* Move confirm will be sent when logical link
5021 * is complete.
5022 */
5023 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5024 break;
5025 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5026 if (result == L2CAP_MR_PEND) {
5027 break;
5028 } else if (test_bit(CONN_LOCAL_BUSY,
5029 &chan->conn_state)) {
5030 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5031 } else {
5032 /* Logical link is up or moving to BR/EDR,
5033 * proceed with move
5034 */
5035 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5036 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5037 }
5038 break;
5039 case L2CAP_MOVE_WAIT_RSP:
5040 /* Moving to AMP */
5041 if (result == L2CAP_MR_SUCCESS) {
5042 /* Remote is ready, send confirm immediately
5043 * after logical link is ready
5044 */
5045 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5046 } else {
5047 /* Both logical link and move success
5048 * are required to confirm
5049 */
5050 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5051 }
5052
5053 /* Placeholder - get hci_chan for logical link */
5054 if (!hchan) {
5055 /* Logical link not available */
5056 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5057 break;
5058 }
5059
5060 /* If the logical link is not yet connected, do not
5061 * send confirmation.
5062 */
5063 if (hchan->state != BT_CONNECTED)
5064 break;
5065
5066 /* Logical link is already ready to go */
5067
5068 chan->hs_hcon = hchan->conn;
5069 chan->hs_hcon->l2cap_data = chan->conn;
5070
5071 if (result == L2CAP_MR_SUCCESS) {
5072 /* Can confirm now */
5073 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5074 } else {
5075 /* Now only need move success
5076 * to confirm
5077 */
5078 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5079 }
5080
5081 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5082 break;
5083 default:
5084 /* Any other amp move state means the move failed. */
5085 chan->move_id = chan->local_amp_id;
5086 l2cap_move_done(chan);
5087 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5088 }
5089
5090 l2cap_chan_unlock(chan);
5091}
5092
5093static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5094 u16 result)
5095{
5096 struct l2cap_chan *chan;
5097
5098 chan = l2cap_get_chan_by_ident(conn, ident);
5099 if (!chan) {
5100 /* Could not locate channel, icid is best guess */
5101 l2cap_send_move_chan_cfm_icid(conn, icid);
5102 return;
5103 }
5104
5105 __clear_chan_timer(chan);
5106
5107 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5108 if (result == L2CAP_MR_COLLISION) {
5109 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5110 } else {
5111 /* Cleanup - cancel move */
5112 chan->move_id = chan->local_amp_id;
5113 l2cap_move_done(chan);
5114 }
5115 }
5116
5117 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5118
5119 l2cap_chan_unlock(chan);
5120}
5121
5122static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5123 struct l2cap_cmd_hdr *cmd,
5124 u16 cmd_len, void *data)
5125{
5126 struct l2cap_move_chan_rsp *rsp = data;
5127 u16 icid, result;
5128
5129 if (cmd_len != sizeof(*rsp))
5130 return -EPROTO;
5131
5132 icid = le16_to_cpu(rsp->icid);
5133 result = le16_to_cpu(rsp->result);
5134
5135 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5136
5137 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5138 l2cap_move_continue(conn, icid, result);
5139 else
5140 l2cap_move_fail(conn, cmd->ident, icid, result);
5141
5142 return 0;
5143}
5144
5145static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5146 struct l2cap_cmd_hdr *cmd,
5147 u16 cmd_len, void *data)
5148{
5149 struct l2cap_move_chan_cfm *cfm = data;
5150 struct l2cap_chan *chan;
5151 u16 icid, result;
5152
5153 if (cmd_len != sizeof(*cfm))
5154 return -EPROTO;
5155
5156 icid = le16_to_cpu(cfm->icid);
5157 result = le16_to_cpu(cfm->result);
5158
5159 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5160
5161 chan = l2cap_get_chan_by_dcid(conn, icid);
5162 if (!chan) {
5163 /* Spec requires a response even if the icid was not found */
5164 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5165 return 0;
5166 }
5167
5168 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5169 if (result == L2CAP_MC_CONFIRMED) {
5170 chan->local_amp_id = chan->move_id;
5171 if (chan->local_amp_id == AMP_ID_BREDR)
5172 __release_logical_link(chan);
5173 } else {
5174 chan->move_id = chan->local_amp_id;
5175 }
5176
5177 l2cap_move_done(chan);
5178 }
5179
5180 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5181
5182 l2cap_chan_unlock(chan);
5183
5184 return 0;
5185}
5186
5187static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5188 struct l2cap_cmd_hdr *cmd,
5189 u16 cmd_len, void *data)
5190{
5191 struct l2cap_move_chan_cfm_rsp *rsp = data;
5192 struct l2cap_chan *chan;
5193 u16 icid;
5194
5195 if (cmd_len != sizeof(*rsp))
5196 return -EPROTO;
5197
5198 icid = le16_to_cpu(rsp->icid);
5199
5200 BT_DBG("icid 0x%4.4x", icid);
5201
5202 chan = l2cap_get_chan_by_scid(conn, icid);
5203 if (!chan)
5204 return 0;
5205
5206 __clear_chan_timer(chan);
5207
5208 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5209 chan->local_amp_id = chan->move_id;
5210
5211 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5212 __release_logical_link(chan);
5213
5214 l2cap_move_done(chan);
5215 }
5216
5217 l2cap_chan_unlock(chan);
5218
5219 return 0;
5220}
5221
5222static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5223 struct l2cap_cmd_hdr *cmd,
5224 u16 cmd_len, u8 *data)
5225{
5226 struct hci_conn *hcon = conn->hcon;
5227 struct l2cap_conn_param_update_req *req;
5228 struct l2cap_conn_param_update_rsp rsp;
5229 u16 min, max, latency, to_multiplier;
5230 int err;
5231
5232 if (hcon->role != HCI_ROLE_MASTER)
5233 return -EINVAL;
5234
5235 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5236 return -EPROTO;
5237
5238 req = (struct l2cap_conn_param_update_req *) data;
5239 min = __le16_to_cpu(req->min);
5240 max = __le16_to_cpu(req->max);
5241 latency = __le16_to_cpu(req->latency);
5242 to_multiplier = __le16_to_cpu(req->to_multiplier);
5243
5244 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5245 min, max, latency, to_multiplier);
5246
5247 memset(&rsp, 0, sizeof(rsp));
5248
5249 err = hci_check_conn_params(min, max, latency, to_multiplier);
5250 if (err)
5251 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5252 else
5253 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5254
5255 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5256 sizeof(rsp), &rsp);
5257
5258 if (!err) {
5259 u8 store_hint;
5260
5261 store_hint = hci_le_conn_update(hcon, min, max, latency,
5262 to_multiplier);
5263 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5264 store_hint, min, max, latency,
5265 to_multiplier);
5266
5267 }
5268
5269 return 0;
5270}
5271
5272static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5273 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5274 u8 *data)
5275{
5276 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5277 struct hci_conn *hcon = conn->hcon;
5278 u16 dcid, mtu, mps, credits, result;
5279 struct l2cap_chan *chan;
5280 int err, sec_level;
5281
5282 if (cmd_len < sizeof(*rsp))
5283 return -EPROTO;
5284
5285 dcid = __le16_to_cpu(rsp->dcid);
5286 mtu = __le16_to_cpu(rsp->mtu);
5287 mps = __le16_to_cpu(rsp->mps);
5288 credits = __le16_to_cpu(rsp->credits);
5289 result = __le16_to_cpu(rsp->result);
5290
5291 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5292 dcid < L2CAP_CID_DYN_START ||
5293 dcid > L2CAP_CID_LE_DYN_END))
5294 return -EPROTO;
5295
5296 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5297 dcid, mtu, mps, credits, result);
5298
5299 mutex_lock(&conn->chan_lock);
5300
5301 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5302 if (!chan) {
5303 err = -EBADSLT;
5304 goto unlock;
5305 }
5306
5307 err = 0;
5308
5309 l2cap_chan_lock(chan);
5310
5311 switch (result) {
5312 case L2CAP_CR_LE_SUCCESS:
5313 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5314 err = -EBADSLT;
5315 break;
5316 }
5317
5318 chan->ident = 0;
5319 chan->dcid = dcid;
5320 chan->omtu = mtu;
5321 chan->remote_mps = mps;
5322 chan->tx_credits = credits;
5323 l2cap_chan_ready(chan);
5324 break;
5325
5326 case L2CAP_CR_LE_AUTHENTICATION:
5327 case L2CAP_CR_LE_ENCRYPTION:
5328 /* If we already have MITM protection we can't do
5329 * anything.
5330 */
5331 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5332 l2cap_chan_del(chan, ECONNREFUSED);
5333 break;
5334 }
5335
5336 sec_level = hcon->sec_level + 1;
5337 if (chan->sec_level < sec_level)
5338 chan->sec_level = sec_level;
5339
5340 /* We'll need to send a new Connect Request */
5341 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5342
5343 smp_conn_security(hcon, chan->sec_level);
5344 break;
5345
5346 default:
5347 l2cap_chan_del(chan, ECONNREFUSED);
5348 break;
5349 }
5350
5351 l2cap_chan_unlock(chan);
5352
5353unlock:
5354 mutex_unlock(&conn->chan_lock);
5355
5356 return err;
5357}
5358
5359static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5360 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5361 u8 *data)
5362{
5363 int err = 0;
5364
5365 switch (cmd->code) {
5366 case L2CAP_COMMAND_REJ:
5367 l2cap_command_rej(conn, cmd, cmd_len, data);
5368 break;
5369
5370 case L2CAP_CONN_REQ:
5371 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5372 break;
5373
5374 case L2CAP_CONN_RSP:
5375 case L2CAP_CREATE_CHAN_RSP:
5376 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5377 break;
5378
5379 case L2CAP_CONF_REQ:
5380 err = l2cap_config_req(conn, cmd, cmd_len, data);
5381 break;
5382
5383 case L2CAP_CONF_RSP:
5384 l2cap_config_rsp(conn, cmd, cmd_len, data);
5385 break;
5386
5387 case L2CAP_DISCONN_REQ:
5388 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5389 break;
5390
5391 case L2CAP_DISCONN_RSP:
5392 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5393 break;
5394
5395 case L2CAP_ECHO_REQ:
5396 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5397 break;
5398
5399 case L2CAP_ECHO_RSP:
5400 break;
5401
5402 case L2CAP_INFO_REQ:
5403 err = l2cap_information_req(conn, cmd, cmd_len, data);
5404 break;
5405
5406 case L2CAP_INFO_RSP:
5407 l2cap_information_rsp(conn, cmd, cmd_len, data);
5408 break;
5409
5410 case L2CAP_CREATE_CHAN_REQ:
5411 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5412 break;
5413
5414 case L2CAP_MOVE_CHAN_REQ:
5415 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5416 break;
5417
5418 case L2CAP_MOVE_CHAN_RSP:
5419 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5420 break;
5421
5422 case L2CAP_MOVE_CHAN_CFM:
5423 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5424 break;
5425
5426 case L2CAP_MOVE_CHAN_CFM_RSP:
5427 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5428 break;
5429
5430 default:
5431 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5432 err = -EINVAL;
5433 break;
5434 }
5435
5436 return err;
5437}
5438
5439static int l2cap_le_connect_req(struct l2cap_conn *conn,
5440 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5441 u8 *data)
5442{
5443 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5444 struct l2cap_le_conn_rsp rsp;
5445 struct l2cap_chan *chan, *pchan;
5446 u16 dcid, scid, credits, mtu, mps;
5447 __le16 psm;
5448 u8 result;
5449
5450 if (cmd_len != sizeof(*req))
5451 return -EPROTO;
5452
5453 scid = __le16_to_cpu(req->scid);
5454 mtu = __le16_to_cpu(req->mtu);
5455 mps = __le16_to_cpu(req->mps);
5456 psm = req->psm;
5457 dcid = 0;
5458 credits = 0;
5459
5460 if (mtu < 23 || mps < 23)
5461 return -EPROTO;
5462
5463 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5464 scid, mtu, mps);
5465
5466 /* Check if we have socket listening on psm */
5467 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5468 &conn->hcon->dst, LE_LINK);
5469 if (!pchan) {
5470 result = L2CAP_CR_LE_BAD_PSM;
5471 chan = NULL;
5472 goto response;
5473 }
5474
5475 mutex_lock(&conn->chan_lock);
5476 l2cap_chan_lock(pchan);
5477
5478 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5479 SMP_ALLOW_STK)) {
5480 result = L2CAP_CR_LE_AUTHENTICATION;
5481 chan = NULL;
5482 goto response_unlock;
5483 }
5484
5485 /* Check for valid dynamic CID range */
5486 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5487 result = L2CAP_CR_LE_INVALID_SCID;
5488 chan = NULL;
5489 goto response_unlock;
5490 }
5491
5492 /* Check if we already have channel with that dcid */
5493 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5494 result = L2CAP_CR_LE_SCID_IN_USE;
5495 chan = NULL;
5496 goto response_unlock;
5497 }
5498
5499 chan = pchan->ops->new_connection(pchan);
5500 if (!chan) {
5501 result = L2CAP_CR_LE_NO_MEM;
5502 goto response_unlock;
5503 }
5504
5505 bacpy(&chan->src, &conn->hcon->src);
5506 bacpy(&chan->dst, &conn->hcon->dst);
5507 chan->src_type = bdaddr_src_type(conn->hcon);
5508 chan->dst_type = bdaddr_dst_type(conn->hcon);
5509 chan->psm = psm;
5510 chan->dcid = scid;
5511 chan->omtu = mtu;
5512 chan->remote_mps = mps;
5513 chan->tx_credits = __le16_to_cpu(req->credits);
5514
5515 __l2cap_chan_add(conn, chan);
5516
5517 l2cap_le_flowctl_init(chan);
5518
5519 dcid = chan->scid;
5520 credits = chan->rx_credits;
5521
5522 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5523
5524 chan->ident = cmd->ident;
5525
5526 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5527 l2cap_state_change(chan, BT_CONNECT2);
5528 /* The following result value is actually not defined
5529 * for LE CoC but we use it to let the function know
5530 * that it should bail out after doing its cleanup
5531 * instead of sending a response.
5532 */
5533 result = L2CAP_CR_PEND;
5534 chan->ops->defer(chan);
5535 } else {
5536 l2cap_chan_ready(chan);
5537 result = L2CAP_CR_LE_SUCCESS;
5538 }
5539
5540response_unlock:
5541 l2cap_chan_unlock(pchan);
5542 mutex_unlock(&conn->chan_lock);
5543 l2cap_chan_put(pchan);
5544
5545 if (result == L2CAP_CR_PEND)
5546 return 0;
5547
5548response:
5549 if (chan) {
5550 rsp.mtu = cpu_to_le16(chan->imtu);
5551 rsp.mps = cpu_to_le16(chan->mps);
5552 } else {
5553 rsp.mtu = 0;
5554 rsp.mps = 0;
5555 }
5556
5557 rsp.dcid = cpu_to_le16(dcid);
5558 rsp.credits = cpu_to_le16(credits);
5559 rsp.result = cpu_to_le16(result);
5560
5561 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5562
5563 return 0;
5564}
5565
5566static inline int l2cap_le_credits(struct l2cap_conn *conn,
5567 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5568 u8 *data)
5569{
5570 struct l2cap_le_credits *pkt;
5571 struct l2cap_chan *chan;
5572 u16 cid, credits, max_credits;
5573
5574 if (cmd_len != sizeof(*pkt))
5575 return -EPROTO;
5576
5577 pkt = (struct l2cap_le_credits *) data;
5578 cid = __le16_to_cpu(pkt->cid);
5579 credits = __le16_to_cpu(pkt->credits);
5580
5581 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5582
5583 chan = l2cap_get_chan_by_dcid(conn, cid);
5584 if (!chan)
5585 return -EBADSLT;
5586
5587 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5588 if (credits > max_credits) {
5589 BT_ERR("LE credits overflow");
5590 l2cap_send_disconn_req(chan, ECONNRESET);
5591 l2cap_chan_unlock(chan);
5592
5593 /* Return 0 so that we don't trigger an unnecessary
5594 * command reject packet.
5595 */
5596 return 0;
5597 }
5598
5599 chan->tx_credits += credits;
5600
5601 /* Resume sending */
5602 l2cap_le_flowctl_send(chan);
5603
5604 if (chan->tx_credits)
5605 chan->ops->resume(chan);
5606
5607 l2cap_chan_unlock(chan);
5608
5609 return 0;
5610}
5611
5612static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5613 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5614 u8 *data)
5615{
5616 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5617 struct l2cap_chan *chan;
5618
5619 if (cmd_len < sizeof(*rej))
5620 return -EPROTO;
5621
5622 mutex_lock(&conn->chan_lock);
5623
5624 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5625 if (!chan)
5626 goto done;
5627
5628 l2cap_chan_lock(chan);
5629 l2cap_chan_del(chan, ECONNREFUSED);
5630 l2cap_chan_unlock(chan);
5631
5632done:
5633 mutex_unlock(&conn->chan_lock);
5634 return 0;
5635}
5636
5637static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5638 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5639 u8 *data)
5640{
5641 int err = 0;
5642
5643 switch (cmd->code) {
5644 case L2CAP_COMMAND_REJ:
5645 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5646 break;
5647
5648 case L2CAP_CONN_PARAM_UPDATE_REQ:
5649 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5650 break;
5651
5652 case L2CAP_CONN_PARAM_UPDATE_RSP:
5653 break;
5654
5655 case L2CAP_LE_CONN_RSP:
5656 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5657 break;
5658
5659 case L2CAP_LE_CONN_REQ:
5660 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5661 break;
5662
5663 case L2CAP_LE_CREDITS:
5664 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5665 break;
5666
5667 case L2CAP_DISCONN_REQ:
5668 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5669 break;
5670
5671 case L2CAP_DISCONN_RSP:
5672 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5673 break;
5674
5675 default:
5676 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5677 err = -EINVAL;
5678 break;
5679 }
5680
5681 return err;
5682}
5683
5684static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5685 struct sk_buff *skb)
5686{
5687 struct hci_conn *hcon = conn->hcon;
5688 struct l2cap_cmd_hdr *cmd;
5689 u16 len;
5690 int err;
5691
5692 if (hcon->type != LE_LINK)
5693 goto drop;
5694
5695 if (skb->len < L2CAP_CMD_HDR_SIZE)
5696 goto drop;
5697
5698 cmd = (void *) skb->data;
5699 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5700
5701 len = le16_to_cpu(cmd->len);
5702
5703 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5704
5705 if (len != skb->len || !cmd->ident) {
5706 BT_DBG("corrupted command");
5707 goto drop;
5708 }
5709
5710 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5711 if (err) {
5712 struct l2cap_cmd_rej_unk rej;
5713
5714 BT_ERR("Wrong link type (%d)", err);
5715
5716 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5717 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5718 sizeof(rej), &rej);
5719 }
5720
5721drop:
5722 kfree_skb(skb);
5723}
5724
5725static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5726 struct sk_buff *skb)
5727{
5728 struct hci_conn *hcon = conn->hcon;
5729 u8 *data = skb->data;
5730 int len = skb->len;
5731 struct l2cap_cmd_hdr cmd;
5732 int err;
5733
5734 l2cap_raw_recv(conn, skb);
5735
5736 if (hcon->type != ACL_LINK)
5737 goto drop;
5738
5739 while (len >= L2CAP_CMD_HDR_SIZE) {
5740 u16 cmd_len;
5741 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5742 data += L2CAP_CMD_HDR_SIZE;
5743 len -= L2CAP_CMD_HDR_SIZE;
5744
5745 cmd_len = le16_to_cpu(cmd.len);
5746
5747 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5748 cmd.ident);
5749
5750 if (cmd_len > len || !cmd.ident) {
5751 BT_DBG("corrupted command");
5752 break;
5753 }
5754
5755 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5756 if (err) {
5757 struct l2cap_cmd_rej_unk rej;
5758
5759 BT_ERR("Wrong link type (%d)", err);
5760
5761 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5762 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5763 sizeof(rej), &rej);
5764 }
5765
5766 data += cmd_len;
5767 len -= cmd_len;
5768 }
5769
5770drop:
5771 kfree_skb(skb);
5772}
5773
5774static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5775{
5776 u16 our_fcs, rcv_fcs;
5777 int hdr_size;
5778
5779 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5780 hdr_size = L2CAP_EXT_HDR_SIZE;
5781 else
5782 hdr_size = L2CAP_ENH_HDR_SIZE;
5783
5784 if (chan->fcs == L2CAP_FCS_CRC16) {
5785 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5786 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5787 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5788
5789 if (our_fcs != rcv_fcs)
5790 return -EBADMSG;
5791 }
5792 return 0;
5793}
5794
5795static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5796{
5797 struct l2cap_ctrl control;
5798
5799 BT_DBG("chan %p", chan);
5800
5801 memset(&control, 0, sizeof(control));
5802 control.sframe = 1;
5803 control.final = 1;
5804 control.reqseq = chan->buffer_seq;
5805 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5806
5807 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5808 control.super = L2CAP_SUPER_RNR;
5809 l2cap_send_sframe(chan, &control);
5810 }
5811
5812 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5813 chan->unacked_frames > 0)
5814 __set_retrans_timer(chan);
5815
5816 /* Send pending iframes */
5817 l2cap_ertm_send(chan);
5818
5819 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5820 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5821 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5822 * send it now.
5823 */
5824 control.super = L2CAP_SUPER_RR;
5825 l2cap_send_sframe(chan, &control);
5826 }
5827}
5828
5829static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5830 struct sk_buff **last_frag)
5831{
5832 /* skb->len reflects data in skb as well as all fragments
5833 * skb->data_len reflects only data in fragments
5834 */
5835 if (!skb_has_frag_list(skb))
5836 skb_shinfo(skb)->frag_list = new_frag;
5837
5838 new_frag->next = NULL;
5839
5840 (*last_frag)->next = new_frag;
5841 *last_frag = new_frag;
5842
5843 skb->len += new_frag->len;
5844 skb->data_len += new_frag->len;
5845 skb->truesize += new_frag->truesize;
5846}
5847
5848static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5849 struct l2cap_ctrl *control)
5850{
5851 int err = -EINVAL;
5852
5853 switch (control->sar) {
5854 case L2CAP_SAR_UNSEGMENTED:
5855 if (chan->sdu)
5856 break;
5857
5858 err = chan->ops->recv(chan, skb);
5859 break;
5860
5861 case L2CAP_SAR_START:
5862 if (chan->sdu)
5863 break;
5864
5865 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5866 break;
5867
5868 chan->sdu_len = get_unaligned_le16(skb->data);
5869 skb_pull(skb, L2CAP_SDULEN_SIZE);
5870
5871 if (chan->sdu_len > chan->imtu) {
5872 err = -EMSGSIZE;
5873 break;
5874 }
5875
5876 if (skb->len >= chan->sdu_len)
5877 break;
5878
5879 chan->sdu = skb;
5880 chan->sdu_last_frag = skb;
5881
5882 skb = NULL;
5883 err = 0;
5884 break;
5885
5886 case L2CAP_SAR_CONTINUE:
5887 if (!chan->sdu)
5888 break;
5889
5890 append_skb_frag(chan->sdu, skb,
5891 &chan->sdu_last_frag);
5892 skb = NULL;
5893
5894 if (chan->sdu->len >= chan->sdu_len)
5895 break;
5896
5897 err = 0;
5898 break;
5899
5900 case L2CAP_SAR_END:
5901 if (!chan->sdu)
5902 break;
5903
5904 append_skb_frag(chan->sdu, skb,
5905 &chan->sdu_last_frag);
5906 skb = NULL;
5907
5908 if (chan->sdu->len != chan->sdu_len)
5909 break;
5910
5911 err = chan->ops->recv(chan, chan->sdu);
5912
5913 if (!err) {
5914 /* Reassembly complete */
5915 chan->sdu = NULL;
5916 chan->sdu_last_frag = NULL;
5917 chan->sdu_len = 0;
5918 }
5919 break;
5920 }
5921
5922 if (err) {
5923 kfree_skb(skb);
5924 kfree_skb(chan->sdu);
5925 chan->sdu = NULL;
5926 chan->sdu_last_frag = NULL;
5927 chan->sdu_len = 0;
5928 }
5929
5930 return err;
5931}
5932
5933static int l2cap_resegment(struct l2cap_chan *chan)
5934{
5935 /* Placeholder */
5936 return 0;
5937}
5938
5939void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5940{
5941 u8 event;
5942
5943 if (chan->mode != L2CAP_MODE_ERTM)
5944 return;
5945
5946 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5947 l2cap_tx(chan, NULL, NULL, event);
5948}
5949
5950static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5951{
5952 int err = 0;
5953 /* Pass sequential frames to l2cap_reassemble_sdu()
5954 * until a gap is encountered.
5955 */
5956
5957 BT_DBG("chan %p", chan);
5958
5959 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5960 struct sk_buff *skb;
5961 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5962 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5963
5964 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5965
5966 if (!skb)
5967 break;
5968
5969 skb_unlink(skb, &chan->srej_q);
5970 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5971 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5972 if (err)
5973 break;
5974 }
5975
5976 if (skb_queue_empty(&chan->srej_q)) {
5977 chan->rx_state = L2CAP_RX_STATE_RECV;
5978 l2cap_send_ack(chan);
5979 }
5980
5981 return err;
5982}
5983
5984static void l2cap_handle_srej(struct l2cap_chan *chan,
5985 struct l2cap_ctrl *control)
5986{
5987 struct sk_buff *skb;
5988
5989 BT_DBG("chan %p, control %p", chan, control);
5990
5991 if (control->reqseq == chan->next_tx_seq) {
5992 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5993 l2cap_send_disconn_req(chan, ECONNRESET);
5994 return;
5995 }
5996
5997 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5998
5999 if (skb == NULL) {
6000 BT_DBG("Seq %d not available for retransmission",
6001 control->reqseq);
6002 return;
6003 }
6004
6005 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6006 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6007 l2cap_send_disconn_req(chan, ECONNRESET);
6008 return;
6009 }
6010
6011 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6012
6013 if (control->poll) {
6014 l2cap_pass_to_tx(chan, control);
6015
6016 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6017 l2cap_retransmit(chan, control);
6018 l2cap_ertm_send(chan);
6019
6020 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6021 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6022 chan->srej_save_reqseq = control->reqseq;
6023 }
6024 } else {
6025 l2cap_pass_to_tx_fbit(chan, control);
6026
6027 if (control->final) {
6028 if (chan->srej_save_reqseq != control->reqseq ||
6029 !test_and_clear_bit(CONN_SREJ_ACT,
6030 &chan->conn_state))
6031 l2cap_retransmit(chan, control);
6032 } else {
6033 l2cap_retransmit(chan, control);
6034 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6035 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6036 chan->srej_save_reqseq = control->reqseq;
6037 }
6038 }
6039 }
6040}
6041
6042static void l2cap_handle_rej(struct l2cap_chan *chan,
6043 struct l2cap_ctrl *control)
6044{
6045 struct sk_buff *skb;
6046
6047 BT_DBG("chan %p, control %p", chan, control);
6048
6049 if (control->reqseq == chan->next_tx_seq) {
6050 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6051 l2cap_send_disconn_req(chan, ECONNRESET);
6052 return;
6053 }
6054
6055 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6056
6057 if (chan->max_tx && skb &&
6058 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6059 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6060 l2cap_send_disconn_req(chan, ECONNRESET);
6061 return;
6062 }
6063
6064 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6065
6066 l2cap_pass_to_tx(chan, control);
6067
6068 if (control->final) {
6069 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6070 l2cap_retransmit_all(chan, control);
6071 } else {
6072 l2cap_retransmit_all(chan, control);
6073 l2cap_ertm_send(chan);
6074 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6075 set_bit(CONN_REJ_ACT, &chan->conn_state);
6076 }
6077}
6078
6079static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6080{
6081 BT_DBG("chan %p, txseq %d", chan, txseq);
6082
6083 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6084 chan->expected_tx_seq);
6085
6086 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6087 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6088 chan->tx_win) {
6089 /* See notes below regarding "double poll" and
6090 * invalid packets.
6091 */
6092 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6093 BT_DBG("Invalid/Ignore - after SREJ");
6094 return L2CAP_TXSEQ_INVALID_IGNORE;
6095 } else {
6096 BT_DBG("Invalid - in window after SREJ sent");
6097 return L2CAP_TXSEQ_INVALID;
6098 }
6099 }
6100
6101 if (chan->srej_list.head == txseq) {
6102 BT_DBG("Expected SREJ");
6103 return L2CAP_TXSEQ_EXPECTED_SREJ;
6104 }
6105
6106 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6107 BT_DBG("Duplicate SREJ - txseq already stored");
6108 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6109 }
6110
6111 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6112 BT_DBG("Unexpected SREJ - not requested");
6113 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6114 }
6115 }
6116
6117 if (chan->expected_tx_seq == txseq) {
6118 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6119 chan->tx_win) {
6120 BT_DBG("Invalid - txseq outside tx window");
6121 return L2CAP_TXSEQ_INVALID;
6122 } else {
6123 BT_DBG("Expected");
6124 return L2CAP_TXSEQ_EXPECTED;
6125 }
6126 }
6127
6128 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6129 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6130 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6131 return L2CAP_TXSEQ_DUPLICATE;
6132 }
6133
6134 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6135 /* A source of invalid packets is a "double poll" condition,
6136 * where delays cause us to send multiple poll packets. If
6137 * the remote stack receives and processes both polls,
6138 * sequence numbers can wrap around in such a way that a
6139 * resent frame has a sequence number that looks like new data
6140 * with a sequence gap. This would trigger an erroneous SREJ
6141 * request.
6142 *
6143 * Fortunately, this is impossible with a tx window that's
6144 * less than half of the maximum sequence number, which allows
6145 * invalid frames to be safely ignored.
6146 *
6147 * With tx window sizes greater than half of the tx window
6148 * maximum, the frame is invalid and cannot be ignored. This
6149 * causes a disconnect.
6150 */
6151
6152 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6153 BT_DBG("Invalid/Ignore - txseq outside tx window");
6154 return L2CAP_TXSEQ_INVALID_IGNORE;
6155 } else {
6156 BT_DBG("Invalid - txseq outside tx window");
6157 return L2CAP_TXSEQ_INVALID;
6158 }
6159 } else {
6160 BT_DBG("Unexpected - txseq indicates missing frames");
6161 return L2CAP_TXSEQ_UNEXPECTED;
6162 }
6163}
6164
6165static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6166 struct l2cap_ctrl *control,
6167 struct sk_buff *skb, u8 event)
6168{
6169 int err = 0;
6170 bool skb_in_use = false;
6171
6172 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6173 event);
6174
6175 switch (event) {
6176 case L2CAP_EV_RECV_IFRAME:
6177 switch (l2cap_classify_txseq(chan, control->txseq)) {
6178 case L2CAP_TXSEQ_EXPECTED:
6179 l2cap_pass_to_tx(chan, control);
6180
6181 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6182 BT_DBG("Busy, discarding expected seq %d",
6183 control->txseq);
6184 break;
6185 }
6186
6187 chan->expected_tx_seq = __next_seq(chan,
6188 control->txseq);
6189
6190 chan->buffer_seq = chan->expected_tx_seq;
6191 skb_in_use = true;
6192
6193 err = l2cap_reassemble_sdu(chan, skb, control);
6194 if (err)
6195 break;
6196
6197 if (control->final) {
6198 if (!test_and_clear_bit(CONN_REJ_ACT,
6199 &chan->conn_state)) {
6200 control->final = 0;
6201 l2cap_retransmit_all(chan, control);
6202 l2cap_ertm_send(chan);
6203 }
6204 }
6205
6206 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6207 l2cap_send_ack(chan);
6208 break;
6209 case L2CAP_TXSEQ_UNEXPECTED:
6210 l2cap_pass_to_tx(chan, control);
6211
6212 /* Can't issue SREJ frames in the local busy state.
6213 * Drop this frame, it will be seen as missing
6214 * when local busy is exited.
6215 */
6216 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6217 BT_DBG("Busy, discarding unexpected seq %d",
6218 control->txseq);
6219 break;
6220 }
6221
6222 /* There was a gap in the sequence, so an SREJ
6223 * must be sent for each missing frame. The
6224 * current frame is stored for later use.
6225 */
6226 skb_queue_tail(&chan->srej_q, skb);
6227 skb_in_use = true;
6228 BT_DBG("Queued %p (queue len %d)", skb,
6229 skb_queue_len(&chan->srej_q));
6230
6231 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6232 l2cap_seq_list_clear(&chan->srej_list);
6233 l2cap_send_srej(chan, control->txseq);
6234
6235 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6236 break;
6237 case L2CAP_TXSEQ_DUPLICATE:
6238 l2cap_pass_to_tx(chan, control);
6239 break;
6240 case L2CAP_TXSEQ_INVALID_IGNORE:
6241 break;
6242 case L2CAP_TXSEQ_INVALID:
6243 default:
6244 l2cap_send_disconn_req(chan, ECONNRESET);
6245 break;
6246 }
6247 break;
6248 case L2CAP_EV_RECV_RR:
6249 l2cap_pass_to_tx(chan, control);
6250 if (control->final) {
6251 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6252
6253 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6254 !__chan_is_moving(chan)) {
6255 control->final = 0;
6256 l2cap_retransmit_all(chan, control);
6257 }
6258
6259 l2cap_ertm_send(chan);
6260 } else if (control->poll) {
6261 l2cap_send_i_or_rr_or_rnr(chan);
6262 } else {
6263 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6264 &chan->conn_state) &&
6265 chan->unacked_frames)
6266 __set_retrans_timer(chan);
6267
6268 l2cap_ertm_send(chan);
6269 }
6270 break;
6271 case L2CAP_EV_RECV_RNR:
6272 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6273 l2cap_pass_to_tx(chan, control);
6274 if (control && control->poll) {
6275 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6276 l2cap_send_rr_or_rnr(chan, 0);
6277 }
6278 __clear_retrans_timer(chan);
6279 l2cap_seq_list_clear(&chan->retrans_list);
6280 break;
6281 case L2CAP_EV_RECV_REJ:
6282 l2cap_handle_rej(chan, control);
6283 break;
6284 case L2CAP_EV_RECV_SREJ:
6285 l2cap_handle_srej(chan, control);
6286 break;
6287 default:
6288 break;
6289 }
6290
6291 if (skb && !skb_in_use) {
6292 BT_DBG("Freeing %p", skb);
6293 kfree_skb(skb);
6294 }
6295
6296 return err;
6297}
6298
6299static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6300 struct l2cap_ctrl *control,
6301 struct sk_buff *skb, u8 event)
6302{
6303 int err = 0;
6304 u16 txseq = control->txseq;
6305 bool skb_in_use = false;
6306
6307 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6308 event);
6309
6310 switch (event) {
6311 case L2CAP_EV_RECV_IFRAME:
6312 switch (l2cap_classify_txseq(chan, txseq)) {
6313 case L2CAP_TXSEQ_EXPECTED:
6314 /* Keep frame for reassembly later */
6315 l2cap_pass_to_tx(chan, control);
6316 skb_queue_tail(&chan->srej_q, skb);
6317 skb_in_use = true;
6318 BT_DBG("Queued %p (queue len %d)", skb,
6319 skb_queue_len(&chan->srej_q));
6320
6321 chan->expected_tx_seq = __next_seq(chan, txseq);
6322 break;
6323 case L2CAP_TXSEQ_EXPECTED_SREJ:
6324 l2cap_seq_list_pop(&chan->srej_list);
6325
6326 l2cap_pass_to_tx(chan, control);
6327 skb_queue_tail(&chan->srej_q, skb);
6328 skb_in_use = true;
6329 BT_DBG("Queued %p (queue len %d)", skb,
6330 skb_queue_len(&chan->srej_q));
6331
6332 err = l2cap_rx_queued_iframes(chan);
6333 if (err)
6334 break;
6335
6336 break;
6337 case L2CAP_TXSEQ_UNEXPECTED:
6338 /* Got a frame that can't be reassembled yet.
6339 * Save it for later, and send SREJs to cover
6340 * the missing frames.
6341 */
6342 skb_queue_tail(&chan->srej_q, skb);
6343 skb_in_use = true;
6344 BT_DBG("Queued %p (queue len %d)", skb,
6345 skb_queue_len(&chan->srej_q));
6346
6347 l2cap_pass_to_tx(chan, control);
6348 l2cap_send_srej(chan, control->txseq);
6349 break;
6350 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6351 /* This frame was requested with an SREJ, but
6352 * some expected retransmitted frames are
6353 * missing. Request retransmission of missing
6354 * SREJ'd frames.
6355 */
6356 skb_queue_tail(&chan->srej_q, skb);
6357 skb_in_use = true;
6358 BT_DBG("Queued %p (queue len %d)", skb,
6359 skb_queue_len(&chan->srej_q));
6360
6361 l2cap_pass_to_tx(chan, control);
6362 l2cap_send_srej_list(chan, control->txseq);
6363 break;
6364 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6365 /* We've already queued this frame. Drop this copy. */
6366 l2cap_pass_to_tx(chan, control);
6367 break;
6368 case L2CAP_TXSEQ_DUPLICATE:
6369 /* Expecting a later sequence number, so this frame
6370 * was already received. Ignore it completely.
6371 */
6372 break;
6373 case L2CAP_TXSEQ_INVALID_IGNORE:
6374 break;
6375 case L2CAP_TXSEQ_INVALID:
6376 default:
6377 l2cap_send_disconn_req(chan, ECONNRESET);
6378 break;
6379 }
6380 break;
6381 case L2CAP_EV_RECV_RR:
6382 l2cap_pass_to_tx(chan, control);
6383 if (control->final) {
6384 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6385
6386 if (!test_and_clear_bit(CONN_REJ_ACT,
6387 &chan->conn_state)) {
6388 control->final = 0;
6389 l2cap_retransmit_all(chan, control);
6390 }
6391
6392 l2cap_ertm_send(chan);
6393 } else if (control->poll) {
6394 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6395 &chan->conn_state) &&
6396 chan->unacked_frames) {
6397 __set_retrans_timer(chan);
6398 }
6399
6400 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6401 l2cap_send_srej_tail(chan);
6402 } else {
6403 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6404 &chan->conn_state) &&
6405 chan->unacked_frames)
6406 __set_retrans_timer(chan);
6407
6408 l2cap_send_ack(chan);
6409 }
6410 break;
6411 case L2CAP_EV_RECV_RNR:
6412 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6413 l2cap_pass_to_tx(chan, control);
6414 if (control->poll) {
6415 l2cap_send_srej_tail(chan);
6416 } else {
6417 struct l2cap_ctrl rr_control;
6418 memset(&rr_control, 0, sizeof(rr_control));
6419 rr_control.sframe = 1;
6420 rr_control.super = L2CAP_SUPER_RR;
6421 rr_control.reqseq = chan->buffer_seq;
6422 l2cap_send_sframe(chan, &rr_control);
6423 }
6424
6425 break;
6426 case L2CAP_EV_RECV_REJ:
6427 l2cap_handle_rej(chan, control);
6428 break;
6429 case L2CAP_EV_RECV_SREJ:
6430 l2cap_handle_srej(chan, control);
6431 break;
6432 }
6433
6434 if (skb && !skb_in_use) {
6435 BT_DBG("Freeing %p", skb);
6436 kfree_skb(skb);
6437 }
6438
6439 return err;
6440}
6441
6442static int l2cap_finish_move(struct l2cap_chan *chan)
6443{
6444 BT_DBG("chan %p", chan);
6445
6446 chan->rx_state = L2CAP_RX_STATE_RECV;
6447
6448 if (chan->hs_hcon)
6449 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6450 else
6451 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6452
6453 return l2cap_resegment(chan);
6454}
6455
6456static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6457 struct l2cap_ctrl *control,
6458 struct sk_buff *skb, u8 event)
6459{
6460 int err;
6461
6462 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6463 event);
6464
6465 if (!control->poll)
6466 return -EPROTO;
6467
6468 l2cap_process_reqseq(chan, control->reqseq);
6469
6470 if (!skb_queue_empty(&chan->tx_q))
6471 chan->tx_send_head = skb_peek(&chan->tx_q);
6472 else
6473 chan->tx_send_head = NULL;
6474
6475 /* Rewind next_tx_seq to the point expected
6476 * by the receiver.
6477 */
6478 chan->next_tx_seq = control->reqseq;
6479 chan->unacked_frames = 0;
6480
6481 err = l2cap_finish_move(chan);
6482 if (err)
6483 return err;
6484
6485 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6486 l2cap_send_i_or_rr_or_rnr(chan);
6487
6488 if (event == L2CAP_EV_RECV_IFRAME)
6489 return -EPROTO;
6490
6491 return l2cap_rx_state_recv(chan, control, NULL, event);
6492}
6493
6494static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6495 struct l2cap_ctrl *control,
6496 struct sk_buff *skb, u8 event)
6497{
6498 int err;
6499
6500 if (!control->final)
6501 return -EPROTO;
6502
6503 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6504
6505 chan->rx_state = L2CAP_RX_STATE_RECV;
6506 l2cap_process_reqseq(chan, control->reqseq);
6507
6508 if (!skb_queue_empty(&chan->tx_q))
6509 chan->tx_send_head = skb_peek(&chan->tx_q);
6510 else
6511 chan->tx_send_head = NULL;
6512
6513 /* Rewind next_tx_seq to the point expected
6514 * by the receiver.
6515 */
6516 chan->next_tx_seq = control->reqseq;
6517 chan->unacked_frames = 0;
6518
6519 if (chan->hs_hcon)
6520 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6521 else
6522 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6523
6524 err = l2cap_resegment(chan);
6525
6526 if (!err)
6527 err = l2cap_rx_state_recv(chan, control, skb, event);
6528
6529 return err;
6530}
6531
6532static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6533{
6534 /* Make sure reqseq is for a packet that has been sent but not acked */
6535 u16 unacked;
6536
6537 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6538 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6539}
6540
6541static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6542 struct sk_buff *skb, u8 event)
6543{
6544 int err = 0;
6545
6546 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6547 control, skb, event, chan->rx_state);
6548
6549 if (__valid_reqseq(chan, control->reqseq)) {
6550 switch (chan->rx_state) {
6551 case L2CAP_RX_STATE_RECV:
6552 err = l2cap_rx_state_recv(chan, control, skb, event);
6553 break;
6554 case L2CAP_RX_STATE_SREJ_SENT:
6555 err = l2cap_rx_state_srej_sent(chan, control, skb,
6556 event);
6557 break;
6558 case L2CAP_RX_STATE_WAIT_P:
6559 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6560 break;
6561 case L2CAP_RX_STATE_WAIT_F:
6562 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6563 break;
6564 default:
6565 /* shut it down */
6566 break;
6567 }
6568 } else {
6569 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6570 control->reqseq, chan->next_tx_seq,
6571 chan->expected_ack_seq);
6572 l2cap_send_disconn_req(chan, ECONNRESET);
6573 }
6574
6575 return err;
6576}
6577
6578static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6579 struct sk_buff *skb)
6580{
6581 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6582 chan->rx_state);
6583
6584 if (l2cap_classify_txseq(chan, control->txseq) ==
6585 L2CAP_TXSEQ_EXPECTED) {
6586 l2cap_pass_to_tx(chan, control);
6587
6588 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6589 __next_seq(chan, chan->buffer_seq));
6590
6591 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6592
6593 l2cap_reassemble_sdu(chan, skb, control);
6594 } else {
6595 if (chan->sdu) {
6596 kfree_skb(chan->sdu);
6597 chan->sdu = NULL;
6598 }
6599 chan->sdu_last_frag = NULL;
6600 chan->sdu_len = 0;
6601
6602 if (skb) {
6603 BT_DBG("Freeing %p", skb);
6604 kfree_skb(skb);
6605 }
6606 }
6607
6608 chan->last_acked_seq = control->txseq;
6609 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6610
6611 return 0;
6612}
6613
6614static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6615{
6616 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6617 u16 len;
6618 u8 event;
6619
6620 __unpack_control(chan, skb);
6621
6622 len = skb->len;
6623
6624 /*
6625 * We can just drop the corrupted I-frame here.
6626 * Receiver will miss it and start proper recovery
6627 * procedures and ask for retransmission.
6628 */
6629 if (l2cap_check_fcs(chan, skb))
6630 goto drop;
6631
6632 if (!control->sframe && control->sar == L2CAP_SAR_START)
6633 len -= L2CAP_SDULEN_SIZE;
6634
6635 if (chan->fcs == L2CAP_FCS_CRC16)
6636 len -= L2CAP_FCS_SIZE;
6637
6638 if (len > chan->mps) {
6639 l2cap_send_disconn_req(chan, ECONNRESET);
6640 goto drop;
6641 }
6642
6643 if ((chan->mode == L2CAP_MODE_ERTM ||
6644 chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
6645 goto drop;
6646
6647 if (!control->sframe) {
6648 int err;
6649
6650 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6651 control->sar, control->reqseq, control->final,
6652 control->txseq);
6653
6654 /* Validate F-bit - F=0 always valid, F=1 only
6655 * valid in TX WAIT_F
6656 */
6657 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6658 goto drop;
6659
6660 if (chan->mode != L2CAP_MODE_STREAMING) {
6661 event = L2CAP_EV_RECV_IFRAME;
6662 err = l2cap_rx(chan, control, skb, event);
6663 } else {
6664 err = l2cap_stream_rx(chan, control, skb);
6665 }
6666
6667 if (err)
6668 l2cap_send_disconn_req(chan, ECONNRESET);
6669 } else {
6670 const u8 rx_func_to_event[4] = {
6671 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6672 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6673 };
6674
6675 /* Only I-frames are expected in streaming mode */
6676 if (chan->mode == L2CAP_MODE_STREAMING)
6677 goto drop;
6678
6679 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6680 control->reqseq, control->final, control->poll,
6681 control->super);
6682
6683 if (len != 0) {
6684 BT_ERR("Trailing bytes: %d in sframe", len);
6685 l2cap_send_disconn_req(chan, ECONNRESET);
6686 goto drop;
6687 }
6688
6689 /* Validate F and P bits */
6690 if (control->final && (control->poll ||
6691 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6692 goto drop;
6693
6694 event = rx_func_to_event[control->super];
6695 if (l2cap_rx(chan, control, skb, event))
6696 l2cap_send_disconn_req(chan, ECONNRESET);
6697 }
6698
6699 return 0;
6700
6701drop:
6702 kfree_skb(skb);
6703 return 0;
6704}
6705
6706static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6707{
6708 struct l2cap_conn *conn = chan->conn;
6709 struct l2cap_le_credits pkt;
6710 u16 return_credits;
6711
6712 return_credits = ((chan->imtu / chan->mps) + 1) - chan->rx_credits;
6713
6714 if (!return_credits)
6715 return;
6716
6717 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6718
6719 chan->rx_credits += return_credits;
6720
6721 pkt.cid = cpu_to_le16(chan->scid);
6722 pkt.credits = cpu_to_le16(return_credits);
6723
6724 chan->ident = l2cap_get_ident(conn);
6725
6726 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6727}
6728
6729static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6730{
6731 int err;
6732
6733 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6734
6735 /* Wait recv to confirm reception before updating the credits */
6736 err = chan->ops->recv(chan, skb);
6737
6738 /* Update credits whenever an SDU is received */
6739 l2cap_chan_le_send_credits(chan);
6740
6741 return err;
6742}
6743
6744static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6745{
6746 int err;
6747
6748 if (!chan->rx_credits) {
6749 BT_ERR("No credits to receive LE L2CAP data");
6750 l2cap_send_disconn_req(chan, ECONNRESET);
6751 return -ENOBUFS;
6752 }
6753
6754 if (chan->imtu < skb->len) {
6755 BT_ERR("Too big LE L2CAP PDU");
6756 return -ENOBUFS;
6757 }
6758
6759 chan->rx_credits--;
6760 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6761
6762 /* Update if remote had run out of credits, this should only happens
6763 * if the remote is not using the entire MPS.
6764 */
6765 if (!chan->rx_credits)
6766 l2cap_chan_le_send_credits(chan);
6767
6768 err = 0;
6769
6770 if (!chan->sdu) {
6771 u16 sdu_len;
6772
6773 sdu_len = get_unaligned_le16(skb->data);
6774 skb_pull(skb, L2CAP_SDULEN_SIZE);
6775
6776 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6777 sdu_len, skb->len, chan->imtu);
6778
6779 if (sdu_len > chan->imtu) {
6780 BT_ERR("Too big LE L2CAP SDU length received");
6781 err = -EMSGSIZE;
6782 goto failed;
6783 }
6784
6785 if (skb->len > sdu_len) {
6786 BT_ERR("Too much LE L2CAP data received");
6787 err = -EINVAL;
6788 goto failed;
6789 }
6790
6791 if (skb->len == sdu_len)
6792 return l2cap_le_recv(chan, skb);
6793
6794 chan->sdu = skb;
6795 chan->sdu_len = sdu_len;
6796 chan->sdu_last_frag = skb;
6797
6798 /* Detect if remote is not able to use the selected MPS */
6799 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6800 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6801
6802 /* Adjust the number of credits */
6803 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6804 chan->mps = mps_len;
6805 l2cap_chan_le_send_credits(chan);
6806 }
6807
6808 return 0;
6809 }
6810
6811 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6812 chan->sdu->len, skb->len, chan->sdu_len);
6813
6814 if (chan->sdu->len + skb->len > chan->sdu_len) {
6815 BT_ERR("Too much LE L2CAP data received");
6816 err = -EINVAL;
6817 goto failed;
6818 }
6819
6820 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6821 skb = NULL;
6822
6823 if (chan->sdu->len == chan->sdu_len) {
6824 err = l2cap_le_recv(chan, chan->sdu);
6825 if (!err) {
6826 chan->sdu = NULL;
6827 chan->sdu_last_frag = NULL;
6828 chan->sdu_len = 0;
6829 }
6830 }
6831
6832failed:
6833 if (err) {
6834 kfree_skb(skb);
6835 kfree_skb(chan->sdu);
6836 chan->sdu = NULL;
6837 chan->sdu_last_frag = NULL;
6838 chan->sdu_len = 0;
6839 }
6840
6841 /* We can't return an error here since we took care of the skb
6842 * freeing internally. An error return would cause the caller to
6843 * do a double-free of the skb.
6844 */
6845 return 0;
6846}
6847
6848static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6849 struct sk_buff *skb)
6850{
6851 struct l2cap_chan *chan;
6852
6853 chan = l2cap_get_chan_by_scid(conn, cid);
6854 if (!chan) {
6855 if (cid == L2CAP_CID_A2MP) {
6856 chan = a2mp_channel_create(conn, skb);
6857 if (!chan) {
6858 kfree_skb(skb);
6859 return;
6860 }
6861
6862 l2cap_chan_lock(chan);
6863 } else {
6864 BT_DBG("unknown cid 0x%4.4x", cid);
6865 /* Drop packet and return */
6866 kfree_skb(skb);
6867 return;
6868 }
6869 }
6870
6871 BT_DBG("chan %p, len %d", chan, skb->len);
6872
6873 /* If we receive data on a fixed channel before the info req/rsp
6874 * procdure is done simply assume that the channel is supported
6875 * and mark it as ready.
6876 */
6877 if (chan->chan_type == L2CAP_CHAN_FIXED)
6878 l2cap_chan_ready(chan);
6879
6880 if (chan->state != BT_CONNECTED)
6881 goto drop;
6882
6883 switch (chan->mode) {
6884 case L2CAP_MODE_LE_FLOWCTL:
6885 if (l2cap_le_data_rcv(chan, skb) < 0)
6886 goto drop;
6887
6888 goto done;
6889
6890 case L2CAP_MODE_BASIC:
6891 /* If socket recv buffers overflows we drop data here
6892 * which is *bad* because L2CAP has to be reliable.
6893 * But we don't have any other choice. L2CAP doesn't
6894 * provide flow control mechanism. */
6895
6896 if (chan->imtu < skb->len) {
6897 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6898 goto drop;
6899 }
6900
6901 if (!chan->ops->recv(chan, skb))
6902 goto done;
6903 break;
6904
6905 case L2CAP_MODE_ERTM:
6906 case L2CAP_MODE_STREAMING:
6907 l2cap_data_rcv(chan, skb);
6908 goto done;
6909
6910 default:
6911 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6912 break;
6913 }
6914
6915drop:
6916 kfree_skb(skb);
6917
6918done:
6919 l2cap_chan_unlock(chan);
6920}
6921
6922static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6923 struct sk_buff *skb)
6924{
6925 struct hci_conn *hcon = conn->hcon;
6926 struct l2cap_chan *chan;
6927
6928 if (hcon->type != ACL_LINK)
6929 goto free_skb;
6930
6931 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6932 ACL_LINK);
6933 if (!chan)
6934 goto free_skb;
6935
6936 BT_DBG("chan %p, len %d", chan, skb->len);
6937
6938 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6939 goto drop;
6940
6941 if (chan->imtu < skb->len)
6942 goto drop;
6943
6944 /* Store remote BD_ADDR and PSM for msg_name */
6945 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6946 bt_cb(skb)->l2cap.psm = psm;
6947
6948 if (!chan->ops->recv(chan, skb)) {
6949 l2cap_chan_put(chan);
6950 return;
6951 }
6952
6953drop:
6954 l2cap_chan_put(chan);
6955free_skb:
6956 kfree_skb(skb);
6957}
6958
6959static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6960{
6961 struct l2cap_hdr *lh = (void *) skb->data;
6962 struct hci_conn *hcon = conn->hcon;
6963 u16 cid, len;
6964 __le16 psm;
6965
6966 if (hcon->state != BT_CONNECTED) {
6967 BT_DBG("queueing pending rx skb");
6968 skb_queue_tail(&conn->pending_rx, skb);
6969 return;
6970 }
6971
6972 skb_pull(skb, L2CAP_HDR_SIZE);
6973 cid = __le16_to_cpu(lh->cid);
6974 len = __le16_to_cpu(lh->len);
6975
6976 if (len != skb->len) {
6977 kfree_skb(skb);
6978 return;
6979 }
6980
6981 /* Since we can't actively block incoming LE connections we must
6982 * at least ensure that we ignore incoming data from them.
6983 */
6984 if (hcon->type == LE_LINK &&
6985 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6986 bdaddr_dst_type(hcon))) {
6987 kfree_skb(skb);
6988 return;
6989 }
6990
6991 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6992
6993 switch (cid) {
6994 case L2CAP_CID_SIGNALING:
6995 l2cap_sig_channel(conn, skb);
6996 break;
6997
6998 case L2CAP_CID_CONN_LESS:
6999 psm = get_unaligned((__le16 *) skb->data);
7000 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7001 l2cap_conless_channel(conn, psm, skb);
7002 break;
7003
7004 case L2CAP_CID_LE_SIGNALING:
7005 l2cap_le_sig_channel(conn, skb);
7006 break;
7007
7008 default:
7009 l2cap_data_channel(conn, cid, skb);
7010 break;
7011 }
7012}
7013
7014static void process_pending_rx(struct work_struct *work)
7015{
7016 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7017 pending_rx_work);
7018 struct sk_buff *skb;
7019
7020 BT_DBG("");
7021
7022 while ((skb = skb_dequeue(&conn->pending_rx)))
7023 l2cap_recv_frame(conn, skb);
7024}
7025
7026static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7027{
7028 struct l2cap_conn *conn = hcon->l2cap_data;
7029 struct hci_chan *hchan;
7030
7031 if (conn)
7032 return conn;
7033
7034 hchan = hci_chan_create(hcon);
7035 if (!hchan)
7036 return NULL;
7037
7038 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7039 if (!conn) {
7040 hci_chan_del(hchan);
7041 return NULL;
7042 }
7043
7044 kref_init(&conn->ref);
7045 hcon->l2cap_data = conn;
7046 conn->hcon = hci_conn_get(hcon);
7047 conn->hchan = hchan;
7048
7049 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7050
7051 switch (hcon->type) {
7052 case LE_LINK:
7053 if (hcon->hdev->le_mtu) {
7054 conn->mtu = hcon->hdev->le_mtu;
7055 break;
7056 }
7057 /* fall through */
7058 default:
7059 conn->mtu = hcon->hdev->acl_mtu;
7060 break;
7061 }
7062
7063 conn->feat_mask = 0;
7064
7065 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7066
7067 if (hcon->type == ACL_LINK &&
7068 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7069 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7070
7071 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7072 (bredr_sc_enabled(hcon->hdev) ||
7073 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7074 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7075
7076 mutex_init(&conn->ident_lock);
7077 mutex_init(&conn->chan_lock);
7078
7079 INIT_LIST_HEAD(&conn->chan_l);
7080 INIT_LIST_HEAD(&conn->users);
7081
7082 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7083
7084 skb_queue_head_init(&conn->pending_rx);
7085 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7086 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7087
7088 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7089
7090 return conn;
7091}
7092
7093static bool is_valid_psm(u16 psm, u8 dst_type) {
7094 if (!psm)
7095 return false;
7096
7097 if (bdaddr_type_is_le(dst_type))
7098 return (psm <= 0x00ff);
7099
7100 /* PSM must be odd and lsb of upper byte must be 0 */
7101 return ((psm & 0x0101) == 0x0001);
7102}
7103
7104int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7105 bdaddr_t *dst, u8 dst_type)
7106{
7107 struct l2cap_conn *conn;
7108 struct hci_conn *hcon;
7109 struct hci_dev *hdev;
7110 int err;
7111
7112 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7113 dst_type, __le16_to_cpu(psm));
7114
7115 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7116 if (!hdev)
7117 return -EHOSTUNREACH;
7118
7119 hci_dev_lock(hdev);
7120
7121 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7122 chan->chan_type != L2CAP_CHAN_RAW) {
7123 err = -EINVAL;
7124 goto done;
7125 }
7126
7127 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7128 err = -EINVAL;
7129 goto done;
7130 }
7131
7132 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7133 err = -EINVAL;
7134 goto done;
7135 }
7136
7137 switch (chan->mode) {
7138 case L2CAP_MODE_BASIC:
7139 break;
7140 case L2CAP_MODE_LE_FLOWCTL:
7141 break;
7142 case L2CAP_MODE_ERTM:
7143 case L2CAP_MODE_STREAMING:
7144 if (!disable_ertm)
7145 break;
7146 /* fall through */
7147 default:
7148 err = -EOPNOTSUPP;
7149 goto done;
7150 }
7151
7152 switch (chan->state) {
7153 case BT_CONNECT:
7154 case BT_CONNECT2:
7155 case BT_CONFIG:
7156 /* Already connecting */
7157 err = 0;
7158 goto done;
7159
7160 case BT_CONNECTED:
7161 /* Already connected */
7162 err = -EISCONN;
7163 goto done;
7164
7165 case BT_OPEN:
7166 case BT_BOUND:
7167 /* Can connect */
7168 break;
7169
7170 default:
7171 err = -EBADFD;
7172 goto done;
7173 }
7174
7175 /* Set destination address and psm */
7176 bacpy(&chan->dst, dst);
7177 chan->dst_type = dst_type;
7178
7179 chan->psm = psm;
7180 chan->dcid = cid;
7181
7182 if (bdaddr_type_is_le(dst_type)) {
7183 /* Convert from L2CAP channel address type to HCI address type
7184 */
7185 if (dst_type == BDADDR_LE_PUBLIC)
7186 dst_type = ADDR_LE_DEV_PUBLIC;
7187 else
7188 dst_type = ADDR_LE_DEV_RANDOM;
7189
7190 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7191 hcon = hci_connect_le(hdev, dst, dst_type,
7192 chan->sec_level,
7193 HCI_LE_CONN_TIMEOUT,
7194 HCI_ROLE_SLAVE, NULL);
7195 else
7196 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7197 chan->sec_level,
7198 HCI_LE_CONN_TIMEOUT);
7199
7200 } else {
7201 u8 auth_type = l2cap_get_auth_type(chan);
7202 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7203 }
7204
7205 if (IS_ERR(hcon)) {
7206 err = PTR_ERR(hcon);
7207 goto done;
7208 }
7209
7210 conn = l2cap_conn_add(hcon);
7211 if (!conn) {
7212 hci_conn_drop(hcon);
7213 err = -ENOMEM;
7214 goto done;
7215 }
7216
7217 mutex_lock(&conn->chan_lock);
7218 l2cap_chan_lock(chan);
7219
7220 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7221 hci_conn_drop(hcon);
7222 err = -EBUSY;
7223 goto chan_unlock;
7224 }
7225
7226 /* Update source addr of the socket */
7227 bacpy(&chan->src, &hcon->src);
7228 chan->src_type = bdaddr_src_type(hcon);
7229
7230 __l2cap_chan_add(conn, chan);
7231
7232 /* l2cap_chan_add takes its own ref so we can drop this one */
7233 hci_conn_drop(hcon);
7234
7235 l2cap_state_change(chan, BT_CONNECT);
7236 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7237
7238 /* Release chan->sport so that it can be reused by other
7239 * sockets (as it's only used for listening sockets).
7240 */
7241 write_lock(&chan_list_lock);
7242 chan->sport = 0;
7243 write_unlock(&chan_list_lock);
7244
7245 if (hcon->state == BT_CONNECTED) {
7246 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7247 __clear_chan_timer(chan);
7248 if (l2cap_chan_check_security(chan, true))
7249 l2cap_state_change(chan, BT_CONNECTED);
7250 } else
7251 l2cap_do_start(chan);
7252 }
7253
7254 err = 0;
7255
7256chan_unlock:
7257 l2cap_chan_unlock(chan);
7258 mutex_unlock(&conn->chan_lock);
7259done:
7260 hci_dev_unlock(hdev);
7261 hci_dev_put(hdev);
7262 return err;
7263}
7264EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7265
7266/* ---- L2CAP interface with lower layer (HCI) ---- */
7267
7268int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7269{
7270 int exact = 0, lm1 = 0, lm2 = 0;
7271 struct l2cap_chan *c;
7272
7273 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7274
7275 /* Find listening sockets and check their link_mode */
7276 read_lock(&chan_list_lock);
7277 list_for_each_entry(c, &chan_list, global_l) {
7278 if (c->state != BT_LISTEN)
7279 continue;
7280
7281 if (!bacmp(&c->src, &hdev->bdaddr)) {
7282 lm1 |= HCI_LM_ACCEPT;
7283 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7284 lm1 |= HCI_LM_MASTER;
7285 exact++;
7286 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7287 lm2 |= HCI_LM_ACCEPT;
7288 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7289 lm2 |= HCI_LM_MASTER;
7290 }
7291 }
7292 read_unlock(&chan_list_lock);
7293
7294 return exact ? lm1 : lm2;
7295}
7296
7297/* Find the next fixed channel in BT_LISTEN state, continue iteration
7298 * from an existing channel in the list or from the beginning of the
7299 * global list (by passing NULL as first parameter).
7300 */
7301static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7302 struct hci_conn *hcon)
7303{
7304 u8 src_type = bdaddr_src_type(hcon);
7305
7306 read_lock(&chan_list_lock);
7307
7308 if (c)
7309 c = list_next_entry(c, global_l);
7310 else
7311 c = list_entry(chan_list.next, typeof(*c), global_l);
7312
7313 list_for_each_entry_from(c, &chan_list, global_l) {
7314 if (c->chan_type != L2CAP_CHAN_FIXED)
7315 continue;
7316 if (c->state != BT_LISTEN)
7317 continue;
7318 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7319 continue;
7320 if (src_type != c->src_type)
7321 continue;
7322
7323 l2cap_chan_hold(c);
7324 read_unlock(&chan_list_lock);
7325 return c;
7326 }
7327
7328 read_unlock(&chan_list_lock);
7329
7330 return NULL;
7331}
7332
7333static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7334{
7335 struct hci_dev *hdev = hcon->hdev;
7336 struct l2cap_conn *conn;
7337 struct l2cap_chan *pchan;
7338 u8 dst_type;
7339
7340 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7341 return;
7342
7343 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7344
7345 if (status) {
7346 l2cap_conn_del(hcon, bt_to_errno(status));
7347 return;
7348 }
7349
7350 conn = l2cap_conn_add(hcon);
7351 if (!conn)
7352 return;
7353
7354 dst_type = bdaddr_dst_type(hcon);
7355
7356 /* If device is blocked, do not create channels for it */
7357 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7358 return;
7359
7360 /* Find fixed channels and notify them of the new connection. We
7361 * use multiple individual lookups, continuing each time where
7362 * we left off, because the list lock would prevent calling the
7363 * potentially sleeping l2cap_chan_lock() function.
7364 */
7365 pchan = l2cap_global_fixed_chan(NULL, hcon);
7366 while (pchan) {
7367 struct l2cap_chan *chan, *next;
7368
7369 /* Client fixed channels should override server ones */
7370 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7371 goto next;
7372
7373 l2cap_chan_lock(pchan);
7374 chan = pchan->ops->new_connection(pchan);
7375 if (chan) {
7376 bacpy(&chan->src, &hcon->src);
7377 bacpy(&chan->dst, &hcon->dst);
7378 chan->src_type = bdaddr_src_type(hcon);
7379 chan->dst_type = dst_type;
7380
7381 __l2cap_chan_add(conn, chan);
7382 }
7383
7384 l2cap_chan_unlock(pchan);
7385next:
7386 next = l2cap_global_fixed_chan(pchan, hcon);
7387 l2cap_chan_put(pchan);
7388 pchan = next;
7389 }
7390
7391 l2cap_conn_ready(conn);
7392}
7393
7394int l2cap_disconn_ind(struct hci_conn *hcon)
7395{
7396 struct l2cap_conn *conn = hcon->l2cap_data;
7397
7398 BT_DBG("hcon %p", hcon);
7399
7400 if (!conn)
7401 return HCI_ERROR_REMOTE_USER_TERM;
7402 return conn->disc_reason;
7403}
7404
7405static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7406{
7407 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7408 return;
7409
7410 BT_DBG("hcon %p reason %d", hcon, reason);
7411
7412 l2cap_conn_del(hcon, bt_to_errno(reason));
7413}
7414
7415static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7416{
7417 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7418 return;
7419
7420 if (encrypt == 0x00) {
7421 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7422 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7423 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7424 chan->sec_level == BT_SECURITY_FIPS)
7425 l2cap_chan_close(chan, ECONNREFUSED);
7426 } else {
7427 if (chan->sec_level == BT_SECURITY_MEDIUM)
7428 __clear_chan_timer(chan);
7429 }
7430}
7431
7432static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7433{
7434 struct l2cap_conn *conn = hcon->l2cap_data;
7435 struct l2cap_chan *chan;
7436
7437 if (!conn)
7438 return;
7439
7440 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7441
7442 mutex_lock(&conn->chan_lock);
7443
7444 list_for_each_entry(chan, &conn->chan_l, list) {
7445 l2cap_chan_lock(chan);
7446
7447 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7448 state_to_string(chan->state));
7449
7450 if (chan->scid == L2CAP_CID_A2MP) {
7451 l2cap_chan_unlock(chan);
7452 continue;
7453 }
7454
7455 if (!status && encrypt)
7456 chan->sec_level = hcon->sec_level;
7457
7458 if (!__l2cap_no_conn_pending(chan)) {
7459 l2cap_chan_unlock(chan);
7460 continue;
7461 }
7462
7463 if (!status && (chan->state == BT_CONNECTED ||
7464 chan->state == BT_CONFIG)) {
7465 chan->ops->resume(chan);
7466 l2cap_check_encryption(chan, encrypt);
7467 l2cap_chan_unlock(chan);
7468 continue;
7469 }
7470
7471 if (chan->state == BT_CONNECT) {
7472 if (!status)
7473 l2cap_start_connection(chan);
7474 else
7475 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7476 } else if (chan->state == BT_CONNECT2 &&
7477 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7478 struct l2cap_conn_rsp rsp;
7479 __u16 res, stat;
7480
7481 if (!status) {
7482 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7483 res = L2CAP_CR_PEND;
7484 stat = L2CAP_CS_AUTHOR_PEND;
7485 chan->ops->defer(chan);
7486 } else {
7487 l2cap_state_change(chan, BT_CONFIG);
7488 res = L2CAP_CR_SUCCESS;
7489 stat = L2CAP_CS_NO_INFO;
7490 }
7491 } else {
7492 l2cap_state_change(chan, BT_DISCONN);
7493 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7494 res = L2CAP_CR_SEC_BLOCK;
7495 stat = L2CAP_CS_NO_INFO;
7496 }
7497
7498 rsp.scid = cpu_to_le16(chan->dcid);
7499 rsp.dcid = cpu_to_le16(chan->scid);
7500 rsp.result = cpu_to_le16(res);
7501 rsp.status = cpu_to_le16(stat);
7502 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7503 sizeof(rsp), &rsp);
7504
7505 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7506 res == L2CAP_CR_SUCCESS) {
7507 char buf[128];
7508 set_bit(CONF_REQ_SENT, &chan->conf_state);
7509 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7510 L2CAP_CONF_REQ,
7511 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7512 buf);
7513 chan->num_conf_req++;
7514 }
7515 }
7516
7517 l2cap_chan_unlock(chan);
7518 }
7519
7520 mutex_unlock(&conn->chan_lock);
7521}
7522
7523void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7524{
7525 struct l2cap_conn *conn = hcon->l2cap_data;
7526 struct l2cap_hdr *hdr;
7527 int len;
7528
7529 /* For AMP controller do not create l2cap conn */
7530 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7531 goto drop;
7532
7533 if (!conn)
7534 conn = l2cap_conn_add(hcon);
7535
7536 if (!conn)
7537 goto drop;
7538
7539 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7540
7541 switch (flags) {
7542 case ACL_START:
7543 case ACL_START_NO_FLUSH:
7544 case ACL_COMPLETE:
7545 if (conn->rx_len) {
7546 BT_ERR("Unexpected start frame (len %d)", skb->len);
7547 kfree_skb(conn->rx_skb);
7548 conn->rx_skb = NULL;
7549 conn->rx_len = 0;
7550 l2cap_conn_unreliable(conn, ECOMM);
7551 }
7552
7553 /* Start fragment always begin with Basic L2CAP header */
7554 if (skb->len < L2CAP_HDR_SIZE) {
7555 BT_ERR("Frame is too short (len %d)", skb->len);
7556 l2cap_conn_unreliable(conn, ECOMM);
7557 goto drop;
7558 }
7559
7560 hdr = (struct l2cap_hdr *) skb->data;
7561 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7562
7563 if (len == skb->len) {
7564 /* Complete frame received */
7565 l2cap_recv_frame(conn, skb);
7566 return;
7567 }
7568
7569 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7570
7571 if (skb->len > len) {
7572 BT_ERR("Frame is too long (len %d, expected len %d)",
7573 skb->len, len);
7574 l2cap_conn_unreliable(conn, ECOMM);
7575 goto drop;
7576 }
7577
7578 /* Allocate skb for the complete frame (with header) */
7579 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7580 if (!conn->rx_skb)
7581 goto drop;
7582
7583 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7584 skb->len);
7585 conn->rx_len = len - skb->len;
7586 break;
7587
7588 case ACL_CONT:
7589 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7590
7591 if (!conn->rx_len) {
7592 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7593 l2cap_conn_unreliable(conn, ECOMM);
7594 goto drop;
7595 }
7596
7597 if (skb->len > conn->rx_len) {
7598 BT_ERR("Fragment is too long (len %d, expected %d)",
7599 skb->len, conn->rx_len);
7600 kfree_skb(conn->rx_skb);
7601 conn->rx_skb = NULL;
7602 conn->rx_len = 0;
7603 l2cap_conn_unreliable(conn, ECOMM);
7604 goto drop;
7605 }
7606
7607 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7608 skb->len);
7609 conn->rx_len -= skb->len;
7610
7611 if (!conn->rx_len) {
7612 /* Complete frame received. l2cap_recv_frame
7613 * takes ownership of the skb so set the global
7614 * rx_skb pointer to NULL first.
7615 */
7616 struct sk_buff *rx_skb = conn->rx_skb;
7617 conn->rx_skb = NULL;
7618 l2cap_recv_frame(conn, rx_skb);
7619 }
7620 break;
7621 }
7622
7623drop:
7624 kfree_skb(skb);
7625}
7626
7627static struct hci_cb l2cap_cb = {
7628 .name = "L2CAP",
7629 .connect_cfm = l2cap_connect_cfm,
7630 .disconn_cfm = l2cap_disconn_cfm,
7631 .security_cfm = l2cap_security_cfm,
7632};
7633
7634static int l2cap_debugfs_show(struct seq_file *f, void *p)
7635{
7636 struct l2cap_chan *c;
7637
7638 read_lock(&chan_list_lock);
7639
7640 list_for_each_entry(c, &chan_list, global_l) {
7641 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7642 &c->src, c->src_type, &c->dst, c->dst_type,
7643 c->state, __le16_to_cpu(c->psm),
7644 c->scid, c->dcid, c->imtu, c->omtu,
7645 c->sec_level, c->mode);
7646 }
7647
7648 read_unlock(&chan_list_lock);
7649
7650 return 0;
7651}
7652
7653DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7654
7655static struct dentry *l2cap_debugfs;
7656
7657int __init l2cap_init(void)
7658{
7659 int err;
7660
7661 err = l2cap_init_sockets();
7662 if (err < 0)
7663 return err;
7664
7665 hci_register_cb(&l2cap_cb);
7666
7667 if (IS_ERR_OR_NULL(bt_debugfs))
7668 return 0;
7669
7670 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7671 NULL, &l2cap_debugfs_fops);
7672
7673 return 0;
7674}
7675
7676void l2cap_exit(void)
7677{
7678 debugfs_remove(l2cap_debugfs);
7679 hci_unregister_cb(&l2cap_cb);
7680 l2cap_cleanup_sockets();
7681}
7682
7683module_param(disable_ertm, bool, 0644);
7684MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");