Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27*/
28
29/* Bluetooth L2CAP core. */
30
31#include <linux/module.h>
32
33#include <linux/debugfs.h>
34#include <linux/crc16.h>
35#include <linux/filter.h>
36
37#include <net/bluetooth/bluetooth.h>
38#include <net/bluetooth/hci_core.h>
39#include <net/bluetooth/l2cap.h>
40
41#include "smp.h"
42#include "a2mp.h"
43#include "amp.h"
44
45#define LE_FLOWCTL_MAX_CREDITS 65535
46
47bool disable_ertm;
48
49static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50
51static LIST_HEAD(chan_list);
52static DEFINE_RWLOCK(chan_list_lock);
53
54static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
55 u8 code, u8 ident, u16 dlen, void *data);
56static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
57 void *data);
58static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
59static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
60
61static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
62 struct sk_buff_head *skbs, u8 event);
63
64static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
65{
66 if (link_type == LE_LINK) {
67 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
68 return BDADDR_LE_PUBLIC;
69 else
70 return BDADDR_LE_RANDOM;
71 }
72
73 return BDADDR_BREDR;
74}
75
76static inline u8 bdaddr_src_type(struct hci_conn *hcon)
77{
78 return bdaddr_type(hcon->type, hcon->src_type);
79}
80
81static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
82{
83 return bdaddr_type(hcon->type, hcon->dst_type);
84}
85
86/* ---- L2CAP channels ---- */
87
88static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
89 u16 cid)
90{
91 struct l2cap_chan *c;
92
93 list_for_each_entry(c, &conn->chan_l, list) {
94 if (c->dcid == cid)
95 return c;
96 }
97 return NULL;
98}
99
100static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
101 u16 cid)
102{
103 struct l2cap_chan *c;
104
105 list_for_each_entry(c, &conn->chan_l, list) {
106 if (c->scid == cid)
107 return c;
108 }
109 return NULL;
110}
111
112/* Find channel with given SCID.
113 * Returns locked channel. */
114static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
115 u16 cid)
116{
117 struct l2cap_chan *c;
118
119 mutex_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
121 if (c)
122 l2cap_chan_lock(c);
123 mutex_unlock(&conn->chan_lock);
124
125 return c;
126}
127
128/* Find channel with given DCID.
129 * Returns locked channel.
130 */
131static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
132 u16 cid)
133{
134 struct l2cap_chan *c;
135
136 mutex_lock(&conn->chan_lock);
137 c = __l2cap_get_chan_by_dcid(conn, cid);
138 if (c)
139 l2cap_chan_lock(c);
140 mutex_unlock(&conn->chan_lock);
141
142 return c;
143}
144
145static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
146 u8 ident)
147{
148 struct l2cap_chan *c;
149
150 list_for_each_entry(c, &conn->chan_l, list) {
151 if (c->ident == ident)
152 return c;
153 }
154 return NULL;
155}
156
157static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
158 u8 ident)
159{
160 struct l2cap_chan *c;
161
162 mutex_lock(&conn->chan_lock);
163 c = __l2cap_get_chan_by_ident(conn, ident);
164 if (c)
165 l2cap_chan_lock(c);
166 mutex_unlock(&conn->chan_lock);
167
168 return c;
169}
170
171static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
172 u8 src_type)
173{
174 struct l2cap_chan *c;
175
176 list_for_each_entry(c, &chan_list, global_l) {
177 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
178 continue;
179
180 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
181 continue;
182
183 if (c->sport == psm && !bacmp(&c->src, src))
184 return c;
185 }
186 return NULL;
187}
188
189int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
190{
191 int err;
192
193 write_lock(&chan_list_lock);
194
195 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
196 err = -EADDRINUSE;
197 goto done;
198 }
199
200 if (psm) {
201 chan->psm = psm;
202 chan->sport = psm;
203 err = 0;
204 } else {
205 u16 p, start, end, incr;
206
207 if (chan->src_type == BDADDR_BREDR) {
208 start = L2CAP_PSM_DYN_START;
209 end = L2CAP_PSM_AUTO_END;
210 incr = 2;
211 } else {
212 start = L2CAP_PSM_LE_DYN_START;
213 end = L2CAP_PSM_LE_DYN_END;
214 incr = 1;
215 }
216
217 err = -EINVAL;
218 for (p = start; p <= end; p += incr)
219 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
220 chan->src_type)) {
221 chan->psm = cpu_to_le16(p);
222 chan->sport = cpu_to_le16(p);
223 err = 0;
224 break;
225 }
226 }
227
228done:
229 write_unlock(&chan_list_lock);
230 return err;
231}
232EXPORT_SYMBOL_GPL(l2cap_add_psm);
233
234int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
235{
236 write_lock(&chan_list_lock);
237
238 /* Override the defaults (which are for conn-oriented) */
239 chan->omtu = L2CAP_DEFAULT_MTU;
240 chan->chan_type = L2CAP_CHAN_FIXED;
241
242 chan->scid = scid;
243
244 write_unlock(&chan_list_lock);
245
246 return 0;
247}
248
249static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
250{
251 u16 cid, dyn_end;
252
253 if (conn->hcon->type == LE_LINK)
254 dyn_end = L2CAP_CID_LE_DYN_END;
255 else
256 dyn_end = L2CAP_CID_DYN_END;
257
258 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
259 if (!__l2cap_get_chan_by_scid(conn, cid))
260 return cid;
261 }
262
263 return 0;
264}
265
266static void l2cap_state_change(struct l2cap_chan *chan, int state)
267{
268 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
269 state_to_string(state));
270
271 chan->state = state;
272 chan->ops->state_change(chan, state, 0);
273}
274
275static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
276 int state, int err)
277{
278 chan->state = state;
279 chan->ops->state_change(chan, chan->state, err);
280}
281
282static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
283{
284 chan->ops->state_change(chan, chan->state, err);
285}
286
287static void __set_retrans_timer(struct l2cap_chan *chan)
288{
289 if (!delayed_work_pending(&chan->monitor_timer) &&
290 chan->retrans_timeout) {
291 l2cap_set_timer(chan, &chan->retrans_timer,
292 msecs_to_jiffies(chan->retrans_timeout));
293 }
294}
295
296static void __set_monitor_timer(struct l2cap_chan *chan)
297{
298 __clear_retrans_timer(chan);
299 if (chan->monitor_timeout) {
300 l2cap_set_timer(chan, &chan->monitor_timer,
301 msecs_to_jiffies(chan->monitor_timeout));
302 }
303}
304
305static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
306 u16 seq)
307{
308 struct sk_buff *skb;
309
310 skb_queue_walk(head, skb) {
311 if (bt_cb(skb)->l2cap.txseq == seq)
312 return skb;
313 }
314
315 return NULL;
316}
317
318/* ---- L2CAP sequence number lists ---- */
319
320/* For ERTM, ordered lists of sequence numbers must be tracked for
321 * SREJ requests that are received and for frames that are to be
322 * retransmitted. These seq_list functions implement a singly-linked
323 * list in an array, where membership in the list can also be checked
324 * in constant time. Items can also be added to the tail of the list
325 * and removed from the head in constant time, without further memory
326 * allocs or frees.
327 */
328
329static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
330{
331 size_t alloc_size, i;
332
333 /* Allocated size is a power of 2 to map sequence numbers
334 * (which may be up to 14 bits) in to a smaller array that is
335 * sized for the negotiated ERTM transmit windows.
336 */
337 alloc_size = roundup_pow_of_two(size);
338
339 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
340 if (!seq_list->list)
341 return -ENOMEM;
342
343 seq_list->mask = alloc_size - 1;
344 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
346 for (i = 0; i < alloc_size; i++)
347 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
348
349 return 0;
350}
351
352static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
353{
354 kfree(seq_list->list);
355}
356
357static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
358 u16 seq)
359{
360 /* Constant-time check for list membership */
361 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
362}
363
364static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
365{
366 u16 seq = seq_list->head;
367 u16 mask = seq_list->mask;
368
369 seq_list->head = seq_list->list[seq & mask];
370 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
371
372 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
373 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
375 }
376
377 return seq;
378}
379
380static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
381{
382 u16 i;
383
384 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
385 return;
386
387 for (i = 0; i <= seq_list->mask; i++)
388 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
389
390 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
392}
393
394static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
395{
396 u16 mask = seq_list->mask;
397
398 /* All appends happen in constant time */
399
400 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
401 return;
402
403 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
404 seq_list->head = seq;
405 else
406 seq_list->list[seq_list->tail & mask] = seq;
407
408 seq_list->tail = seq;
409 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
410}
411
412static void l2cap_chan_timeout(struct work_struct *work)
413{
414 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
415 chan_timer.work);
416 struct l2cap_conn *conn = chan->conn;
417 int reason;
418
419 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
420
421 mutex_lock(&conn->chan_lock);
422 l2cap_chan_lock(chan);
423
424 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
425 reason = ECONNREFUSED;
426 else if (chan->state == BT_CONNECT &&
427 chan->sec_level != BT_SECURITY_SDP)
428 reason = ECONNREFUSED;
429 else
430 reason = ETIMEDOUT;
431
432 l2cap_chan_close(chan, reason);
433
434 l2cap_chan_unlock(chan);
435
436 chan->ops->close(chan);
437 mutex_unlock(&conn->chan_lock);
438
439 l2cap_chan_put(chan);
440}
441
442struct l2cap_chan *l2cap_chan_create(void)
443{
444 struct l2cap_chan *chan;
445
446 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
447 if (!chan)
448 return NULL;
449
450 mutex_init(&chan->lock);
451
452 /* Set default lock nesting level */
453 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
454
455 write_lock(&chan_list_lock);
456 list_add(&chan->global_l, &chan_list);
457 write_unlock(&chan_list_lock);
458
459 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
460
461 chan->state = BT_OPEN;
462
463 kref_init(&chan->kref);
464
465 /* This flag is cleared in l2cap_chan_ready() */
466 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
467
468 BT_DBG("chan %p", chan);
469
470 return chan;
471}
472EXPORT_SYMBOL_GPL(l2cap_chan_create);
473
474static void l2cap_chan_destroy(struct kref *kref)
475{
476 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
477
478 BT_DBG("chan %p", chan);
479
480 write_lock(&chan_list_lock);
481 list_del(&chan->global_l);
482 write_unlock(&chan_list_lock);
483
484 kfree(chan);
485}
486
487void l2cap_chan_hold(struct l2cap_chan *c)
488{
489 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
490
491 kref_get(&c->kref);
492}
493
494void l2cap_chan_put(struct l2cap_chan *c)
495{
496 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
497
498 kref_put(&c->kref, l2cap_chan_destroy);
499}
500EXPORT_SYMBOL_GPL(l2cap_chan_put);
501
502void l2cap_chan_set_defaults(struct l2cap_chan *chan)
503{
504 chan->fcs = L2CAP_FCS_CRC16;
505 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
506 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
507 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
508 chan->remote_max_tx = chan->max_tx;
509 chan->remote_tx_win = chan->tx_win;
510 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
511 chan->sec_level = BT_SECURITY_LOW;
512 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
513 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
514 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
515 chan->conf_state = 0;
516
517 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
518}
519EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
520
521static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
522{
523 chan->sdu = NULL;
524 chan->sdu_last_frag = NULL;
525 chan->sdu_len = 0;
526 chan->tx_credits = tx_credits;
527 /* Derive MPS from connection MTU to stop HCI fragmentation */
528 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
529 /* Give enough credits for a full packet */
530 chan->rx_credits = (chan->imtu / chan->mps) + 1;
531
532 skb_queue_head_init(&chan->tx_q);
533}
534
535void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
536{
537 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
538 __le16_to_cpu(chan->psm), chan->dcid);
539
540 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
541
542 chan->conn = conn;
543
544 switch (chan->chan_type) {
545 case L2CAP_CHAN_CONN_ORIENTED:
546 /* Alloc CID for connection-oriented socket */
547 chan->scid = l2cap_alloc_cid(conn);
548 if (conn->hcon->type == ACL_LINK)
549 chan->omtu = L2CAP_DEFAULT_MTU;
550 break;
551
552 case L2CAP_CHAN_CONN_LESS:
553 /* Connectionless socket */
554 chan->scid = L2CAP_CID_CONN_LESS;
555 chan->dcid = L2CAP_CID_CONN_LESS;
556 chan->omtu = L2CAP_DEFAULT_MTU;
557 break;
558
559 case L2CAP_CHAN_FIXED:
560 /* Caller will set CID and CID specific MTU values */
561 break;
562
563 default:
564 /* Raw socket can send/recv signalling messages only */
565 chan->scid = L2CAP_CID_SIGNALING;
566 chan->dcid = L2CAP_CID_SIGNALING;
567 chan->omtu = L2CAP_DEFAULT_MTU;
568 }
569
570 chan->local_id = L2CAP_BESTEFFORT_ID;
571 chan->local_stype = L2CAP_SERV_BESTEFFORT;
572 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
573 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
574 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
575 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
576
577 l2cap_chan_hold(chan);
578
579 /* Only keep a reference for fixed channels if they requested it */
580 if (chan->chan_type != L2CAP_CHAN_FIXED ||
581 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
582 hci_conn_hold(conn->hcon);
583
584 list_add(&chan->list, &conn->chan_l);
585}
586
587void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
588{
589 mutex_lock(&conn->chan_lock);
590 __l2cap_chan_add(conn, chan);
591 mutex_unlock(&conn->chan_lock);
592}
593
594void l2cap_chan_del(struct l2cap_chan *chan, int err)
595{
596 struct l2cap_conn *conn = chan->conn;
597
598 __clear_chan_timer(chan);
599
600 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
601 state_to_string(chan->state));
602
603 chan->ops->teardown(chan, err);
604
605 if (conn) {
606 struct amp_mgr *mgr = conn->hcon->amp_mgr;
607 /* Delete from channel list */
608 list_del(&chan->list);
609
610 l2cap_chan_put(chan);
611
612 chan->conn = NULL;
613
614 /* Reference was only held for non-fixed channels or
615 * fixed channels that explicitly requested it using the
616 * FLAG_HOLD_HCI_CONN flag.
617 */
618 if (chan->chan_type != L2CAP_CHAN_FIXED ||
619 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
620 hci_conn_drop(conn->hcon);
621
622 if (mgr && mgr->bredr_chan == chan)
623 mgr->bredr_chan = NULL;
624 }
625
626 if (chan->hs_hchan) {
627 struct hci_chan *hs_hchan = chan->hs_hchan;
628
629 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
630 amp_disconnect_logical_link(hs_hchan);
631 }
632
633 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
634 return;
635
636 switch(chan->mode) {
637 case L2CAP_MODE_BASIC:
638 break;
639
640 case L2CAP_MODE_LE_FLOWCTL:
641 skb_queue_purge(&chan->tx_q);
642 break;
643
644 case L2CAP_MODE_ERTM:
645 __clear_retrans_timer(chan);
646 __clear_monitor_timer(chan);
647 __clear_ack_timer(chan);
648
649 skb_queue_purge(&chan->srej_q);
650
651 l2cap_seq_list_free(&chan->srej_list);
652 l2cap_seq_list_free(&chan->retrans_list);
653
654 /* fall through */
655
656 case L2CAP_MODE_STREAMING:
657 skb_queue_purge(&chan->tx_q);
658 break;
659 }
660
661 return;
662}
663EXPORT_SYMBOL_GPL(l2cap_chan_del);
664
665static void l2cap_conn_update_id_addr(struct work_struct *work)
666{
667 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
668 id_addr_update_work);
669 struct hci_conn *hcon = conn->hcon;
670 struct l2cap_chan *chan;
671
672 mutex_lock(&conn->chan_lock);
673
674 list_for_each_entry(chan, &conn->chan_l, list) {
675 l2cap_chan_lock(chan);
676 bacpy(&chan->dst, &hcon->dst);
677 chan->dst_type = bdaddr_dst_type(hcon);
678 l2cap_chan_unlock(chan);
679 }
680
681 mutex_unlock(&conn->chan_lock);
682}
683
684static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
685{
686 struct l2cap_conn *conn = chan->conn;
687 struct l2cap_le_conn_rsp rsp;
688 u16 result;
689
690 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
691 result = L2CAP_CR_LE_AUTHORIZATION;
692 else
693 result = L2CAP_CR_LE_BAD_PSM;
694
695 l2cap_state_change(chan, BT_DISCONN);
696
697 rsp.dcid = cpu_to_le16(chan->scid);
698 rsp.mtu = cpu_to_le16(chan->imtu);
699 rsp.mps = cpu_to_le16(chan->mps);
700 rsp.credits = cpu_to_le16(chan->rx_credits);
701 rsp.result = cpu_to_le16(result);
702
703 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
704 &rsp);
705}
706
707static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
708{
709 struct l2cap_conn *conn = chan->conn;
710 struct l2cap_conn_rsp rsp;
711 u16 result;
712
713 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
714 result = L2CAP_CR_SEC_BLOCK;
715 else
716 result = L2CAP_CR_BAD_PSM;
717
718 l2cap_state_change(chan, BT_DISCONN);
719
720 rsp.scid = cpu_to_le16(chan->dcid);
721 rsp.dcid = cpu_to_le16(chan->scid);
722 rsp.result = cpu_to_le16(result);
723 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
724
725 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
726}
727
728void l2cap_chan_close(struct l2cap_chan *chan, int reason)
729{
730 struct l2cap_conn *conn = chan->conn;
731
732 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
733
734 switch (chan->state) {
735 case BT_LISTEN:
736 chan->ops->teardown(chan, 0);
737 break;
738
739 case BT_CONNECTED:
740 case BT_CONFIG:
741 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
742 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
743 l2cap_send_disconn_req(chan, reason);
744 } else
745 l2cap_chan_del(chan, reason);
746 break;
747
748 case BT_CONNECT2:
749 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
750 if (conn->hcon->type == ACL_LINK)
751 l2cap_chan_connect_reject(chan);
752 else if (conn->hcon->type == LE_LINK)
753 l2cap_chan_le_connect_reject(chan);
754 }
755
756 l2cap_chan_del(chan, reason);
757 break;
758
759 case BT_CONNECT:
760 case BT_DISCONN:
761 l2cap_chan_del(chan, reason);
762 break;
763
764 default:
765 chan->ops->teardown(chan, 0);
766 break;
767 }
768}
769EXPORT_SYMBOL(l2cap_chan_close);
770
771static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
772{
773 switch (chan->chan_type) {
774 case L2CAP_CHAN_RAW:
775 switch (chan->sec_level) {
776 case BT_SECURITY_HIGH:
777 case BT_SECURITY_FIPS:
778 return HCI_AT_DEDICATED_BONDING_MITM;
779 case BT_SECURITY_MEDIUM:
780 return HCI_AT_DEDICATED_BONDING;
781 default:
782 return HCI_AT_NO_BONDING;
783 }
784 break;
785 case L2CAP_CHAN_CONN_LESS:
786 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
787 if (chan->sec_level == BT_SECURITY_LOW)
788 chan->sec_level = BT_SECURITY_SDP;
789 }
790 if (chan->sec_level == BT_SECURITY_HIGH ||
791 chan->sec_level == BT_SECURITY_FIPS)
792 return HCI_AT_NO_BONDING_MITM;
793 else
794 return HCI_AT_NO_BONDING;
795 break;
796 case L2CAP_CHAN_CONN_ORIENTED:
797 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
798 if (chan->sec_level == BT_SECURITY_LOW)
799 chan->sec_level = BT_SECURITY_SDP;
800
801 if (chan->sec_level == BT_SECURITY_HIGH ||
802 chan->sec_level == BT_SECURITY_FIPS)
803 return HCI_AT_NO_BONDING_MITM;
804 else
805 return HCI_AT_NO_BONDING;
806 }
807 /* fall through */
808 default:
809 switch (chan->sec_level) {
810 case BT_SECURITY_HIGH:
811 case BT_SECURITY_FIPS:
812 return HCI_AT_GENERAL_BONDING_MITM;
813 case BT_SECURITY_MEDIUM:
814 return HCI_AT_GENERAL_BONDING;
815 default:
816 return HCI_AT_NO_BONDING;
817 }
818 break;
819 }
820}
821
822/* Service level security */
823int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
824{
825 struct l2cap_conn *conn = chan->conn;
826 __u8 auth_type;
827
828 if (conn->hcon->type == LE_LINK)
829 return smp_conn_security(conn->hcon, chan->sec_level);
830
831 auth_type = l2cap_get_auth_type(chan);
832
833 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
834 initiator);
835}
836
837static u8 l2cap_get_ident(struct l2cap_conn *conn)
838{
839 u8 id;
840
841 /* Get next available identificator.
842 * 1 - 128 are used by kernel.
843 * 129 - 199 are reserved.
844 * 200 - 254 are used by utilities like l2ping, etc.
845 */
846
847 mutex_lock(&conn->ident_lock);
848
849 if (++conn->tx_ident > 128)
850 conn->tx_ident = 1;
851
852 id = conn->tx_ident;
853
854 mutex_unlock(&conn->ident_lock);
855
856 return id;
857}
858
859static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
860 void *data)
861{
862 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
863 u8 flags;
864
865 BT_DBG("code 0x%2.2x", code);
866
867 if (!skb)
868 return;
869
870 /* Use NO_FLUSH if supported or we have an LE link (which does
871 * not support auto-flushing packets) */
872 if (lmp_no_flush_capable(conn->hcon->hdev) ||
873 conn->hcon->type == LE_LINK)
874 flags = ACL_START_NO_FLUSH;
875 else
876 flags = ACL_START;
877
878 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
879 skb->priority = HCI_PRIO_MAX;
880
881 hci_send_acl(conn->hchan, skb, flags);
882}
883
884static bool __chan_is_moving(struct l2cap_chan *chan)
885{
886 return chan->move_state != L2CAP_MOVE_STABLE &&
887 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
888}
889
890static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
891{
892 struct hci_conn *hcon = chan->conn->hcon;
893 u16 flags;
894
895 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
896 skb->priority);
897
898 if (chan->hs_hcon && !__chan_is_moving(chan)) {
899 if (chan->hs_hchan)
900 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
901 else
902 kfree_skb(skb);
903
904 return;
905 }
906
907 /* Use NO_FLUSH for LE links (where this is the only option) or
908 * if the BR/EDR link supports it and flushing has not been
909 * explicitly requested (through FLAG_FLUSHABLE).
910 */
911 if (hcon->type == LE_LINK ||
912 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
913 lmp_no_flush_capable(hcon->hdev)))
914 flags = ACL_START_NO_FLUSH;
915 else
916 flags = ACL_START;
917
918 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
919 hci_send_acl(chan->conn->hchan, skb, flags);
920}
921
922static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
923{
924 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
925 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
926
927 if (enh & L2CAP_CTRL_FRAME_TYPE) {
928 /* S-Frame */
929 control->sframe = 1;
930 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
931 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
932
933 control->sar = 0;
934 control->txseq = 0;
935 } else {
936 /* I-Frame */
937 control->sframe = 0;
938 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
939 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
940
941 control->poll = 0;
942 control->super = 0;
943 }
944}
945
946static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
947{
948 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
949 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
950
951 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
952 /* S-Frame */
953 control->sframe = 1;
954 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
955 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
956
957 control->sar = 0;
958 control->txseq = 0;
959 } else {
960 /* I-Frame */
961 control->sframe = 0;
962 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
963 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
964
965 control->poll = 0;
966 control->super = 0;
967 }
968}
969
970static inline void __unpack_control(struct l2cap_chan *chan,
971 struct sk_buff *skb)
972{
973 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
974 __unpack_extended_control(get_unaligned_le32(skb->data),
975 &bt_cb(skb)->l2cap);
976 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
977 } else {
978 __unpack_enhanced_control(get_unaligned_le16(skb->data),
979 &bt_cb(skb)->l2cap);
980 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
981 }
982}
983
984static u32 __pack_extended_control(struct l2cap_ctrl *control)
985{
986 u32 packed;
987
988 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
989 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
990
991 if (control->sframe) {
992 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
993 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
994 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
995 } else {
996 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
997 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
998 }
999
1000 return packed;
1001}
1002
1003static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1004{
1005 u16 packed;
1006
1007 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1008 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1009
1010 if (control->sframe) {
1011 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1012 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1013 packed |= L2CAP_CTRL_FRAME_TYPE;
1014 } else {
1015 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1016 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1017 }
1018
1019 return packed;
1020}
1021
1022static inline void __pack_control(struct l2cap_chan *chan,
1023 struct l2cap_ctrl *control,
1024 struct sk_buff *skb)
1025{
1026 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1027 put_unaligned_le32(__pack_extended_control(control),
1028 skb->data + L2CAP_HDR_SIZE);
1029 } else {
1030 put_unaligned_le16(__pack_enhanced_control(control),
1031 skb->data + L2CAP_HDR_SIZE);
1032 }
1033}
1034
1035static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1036{
1037 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1038 return L2CAP_EXT_HDR_SIZE;
1039 else
1040 return L2CAP_ENH_HDR_SIZE;
1041}
1042
1043static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1044 u32 control)
1045{
1046 struct sk_buff *skb;
1047 struct l2cap_hdr *lh;
1048 int hlen = __ertm_hdr_size(chan);
1049
1050 if (chan->fcs == L2CAP_FCS_CRC16)
1051 hlen += L2CAP_FCS_SIZE;
1052
1053 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1054
1055 if (!skb)
1056 return ERR_PTR(-ENOMEM);
1057
1058 lh = skb_put(skb, L2CAP_HDR_SIZE);
1059 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1060 lh->cid = cpu_to_le16(chan->dcid);
1061
1062 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1063 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1064 else
1065 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1066
1067 if (chan->fcs == L2CAP_FCS_CRC16) {
1068 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1069 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1070 }
1071
1072 skb->priority = HCI_PRIO_MAX;
1073 return skb;
1074}
1075
1076static void l2cap_send_sframe(struct l2cap_chan *chan,
1077 struct l2cap_ctrl *control)
1078{
1079 struct sk_buff *skb;
1080 u32 control_field;
1081
1082 BT_DBG("chan %p, control %p", chan, control);
1083
1084 if (!control->sframe)
1085 return;
1086
1087 if (__chan_is_moving(chan))
1088 return;
1089
1090 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1091 !control->poll)
1092 control->final = 1;
1093
1094 if (control->super == L2CAP_SUPER_RR)
1095 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1096 else if (control->super == L2CAP_SUPER_RNR)
1097 set_bit(CONN_RNR_SENT, &chan->conn_state);
1098
1099 if (control->super != L2CAP_SUPER_SREJ) {
1100 chan->last_acked_seq = control->reqseq;
1101 __clear_ack_timer(chan);
1102 }
1103
1104 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1105 control->final, control->poll, control->super);
1106
1107 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1108 control_field = __pack_extended_control(control);
1109 else
1110 control_field = __pack_enhanced_control(control);
1111
1112 skb = l2cap_create_sframe_pdu(chan, control_field);
1113 if (!IS_ERR(skb))
1114 l2cap_do_send(chan, skb);
1115}
1116
1117static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1118{
1119 struct l2cap_ctrl control;
1120
1121 BT_DBG("chan %p, poll %d", chan, poll);
1122
1123 memset(&control, 0, sizeof(control));
1124 control.sframe = 1;
1125 control.poll = poll;
1126
1127 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1128 control.super = L2CAP_SUPER_RNR;
1129 else
1130 control.super = L2CAP_SUPER_RR;
1131
1132 control.reqseq = chan->buffer_seq;
1133 l2cap_send_sframe(chan, &control);
1134}
1135
1136static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1137{
1138 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1139 return true;
1140
1141 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1142}
1143
1144static bool __amp_capable(struct l2cap_chan *chan)
1145{
1146 struct l2cap_conn *conn = chan->conn;
1147 struct hci_dev *hdev;
1148 bool amp_available = false;
1149
1150 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1151 return false;
1152
1153 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1154 return false;
1155
1156 read_lock(&hci_dev_list_lock);
1157 list_for_each_entry(hdev, &hci_dev_list, list) {
1158 if (hdev->amp_type != AMP_TYPE_BREDR &&
1159 test_bit(HCI_UP, &hdev->flags)) {
1160 amp_available = true;
1161 break;
1162 }
1163 }
1164 read_unlock(&hci_dev_list_lock);
1165
1166 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1167 return amp_available;
1168
1169 return false;
1170}
1171
1172static bool l2cap_check_efs(struct l2cap_chan *chan)
1173{
1174 /* Check EFS parameters */
1175 return true;
1176}
1177
1178void l2cap_send_conn_req(struct l2cap_chan *chan)
1179{
1180 struct l2cap_conn *conn = chan->conn;
1181 struct l2cap_conn_req req;
1182
1183 req.scid = cpu_to_le16(chan->scid);
1184 req.psm = chan->psm;
1185
1186 chan->ident = l2cap_get_ident(conn);
1187
1188 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1189
1190 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1191}
1192
1193static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1194{
1195 struct l2cap_create_chan_req req;
1196 req.scid = cpu_to_le16(chan->scid);
1197 req.psm = chan->psm;
1198 req.amp_id = amp_id;
1199
1200 chan->ident = l2cap_get_ident(chan->conn);
1201
1202 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1203 sizeof(req), &req);
1204}
1205
1206static void l2cap_move_setup(struct l2cap_chan *chan)
1207{
1208 struct sk_buff *skb;
1209
1210 BT_DBG("chan %p", chan);
1211
1212 if (chan->mode != L2CAP_MODE_ERTM)
1213 return;
1214
1215 __clear_retrans_timer(chan);
1216 __clear_monitor_timer(chan);
1217 __clear_ack_timer(chan);
1218
1219 chan->retry_count = 0;
1220 skb_queue_walk(&chan->tx_q, skb) {
1221 if (bt_cb(skb)->l2cap.retries)
1222 bt_cb(skb)->l2cap.retries = 1;
1223 else
1224 break;
1225 }
1226
1227 chan->expected_tx_seq = chan->buffer_seq;
1228
1229 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1230 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1231 l2cap_seq_list_clear(&chan->retrans_list);
1232 l2cap_seq_list_clear(&chan->srej_list);
1233 skb_queue_purge(&chan->srej_q);
1234
1235 chan->tx_state = L2CAP_TX_STATE_XMIT;
1236 chan->rx_state = L2CAP_RX_STATE_MOVE;
1237
1238 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1239}
1240
1241static void l2cap_move_done(struct l2cap_chan *chan)
1242{
1243 u8 move_role = chan->move_role;
1244 BT_DBG("chan %p", chan);
1245
1246 chan->move_state = L2CAP_MOVE_STABLE;
1247 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1248
1249 if (chan->mode != L2CAP_MODE_ERTM)
1250 return;
1251
1252 switch (move_role) {
1253 case L2CAP_MOVE_ROLE_INITIATOR:
1254 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1255 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1256 break;
1257 case L2CAP_MOVE_ROLE_RESPONDER:
1258 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1259 break;
1260 }
1261}
1262
1263static void l2cap_chan_ready(struct l2cap_chan *chan)
1264{
1265 /* The channel may have already been flagged as connected in
1266 * case of receiving data before the L2CAP info req/rsp
1267 * procedure is complete.
1268 */
1269 if (chan->state == BT_CONNECTED)
1270 return;
1271
1272 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1273 chan->conf_state = 0;
1274 __clear_chan_timer(chan);
1275
1276 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1277 chan->ops->suspend(chan);
1278
1279 chan->state = BT_CONNECTED;
1280
1281 chan->ops->ready(chan);
1282}
1283
1284static void l2cap_le_connect(struct l2cap_chan *chan)
1285{
1286 struct l2cap_conn *conn = chan->conn;
1287 struct l2cap_le_conn_req req;
1288
1289 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1290 return;
1291
1292 if (!chan->imtu)
1293 chan->imtu = chan->conn->mtu;
1294
1295 l2cap_le_flowctl_init(chan, 0);
1296
1297 req.psm = chan->psm;
1298 req.scid = cpu_to_le16(chan->scid);
1299 req.mtu = cpu_to_le16(chan->imtu);
1300 req.mps = cpu_to_le16(chan->mps);
1301 req.credits = cpu_to_le16(chan->rx_credits);
1302
1303 chan->ident = l2cap_get_ident(conn);
1304
1305 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1306 sizeof(req), &req);
1307}
1308
1309static void l2cap_le_start(struct l2cap_chan *chan)
1310{
1311 struct l2cap_conn *conn = chan->conn;
1312
1313 if (!smp_conn_security(conn->hcon, chan->sec_level))
1314 return;
1315
1316 if (!chan->psm) {
1317 l2cap_chan_ready(chan);
1318 return;
1319 }
1320
1321 if (chan->state == BT_CONNECT)
1322 l2cap_le_connect(chan);
1323}
1324
1325static void l2cap_start_connection(struct l2cap_chan *chan)
1326{
1327 if (__amp_capable(chan)) {
1328 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1329 a2mp_discover_amp(chan);
1330 } else if (chan->conn->hcon->type == LE_LINK) {
1331 l2cap_le_start(chan);
1332 } else {
1333 l2cap_send_conn_req(chan);
1334 }
1335}
1336
1337static void l2cap_request_info(struct l2cap_conn *conn)
1338{
1339 struct l2cap_info_req req;
1340
1341 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1342 return;
1343
1344 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1345
1346 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1347 conn->info_ident = l2cap_get_ident(conn);
1348
1349 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1350
1351 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1352 sizeof(req), &req);
1353}
1354
1355static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1356{
1357 /* The minimum encryption key size needs to be enforced by the
1358 * host stack before establishing any L2CAP connections. The
1359 * specification in theory allows a minimum of 1, but to align
1360 * BR/EDR and LE transports, a minimum of 7 is chosen.
1361 *
1362 * This check might also be called for unencrypted connections
1363 * that have no key size requirements. Ensure that the link is
1364 * actually encrypted before enforcing a key size.
1365 */
1366 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1367 hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1368}
1369
1370static void l2cap_do_start(struct l2cap_chan *chan)
1371{
1372 struct l2cap_conn *conn = chan->conn;
1373
1374 if (conn->hcon->type == LE_LINK) {
1375 l2cap_le_start(chan);
1376 return;
1377 }
1378
1379 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1380 l2cap_request_info(conn);
1381 return;
1382 }
1383
1384 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1385 return;
1386
1387 if (!l2cap_chan_check_security(chan, true) ||
1388 !__l2cap_no_conn_pending(chan))
1389 return;
1390
1391 if (l2cap_check_enc_key_size(conn->hcon))
1392 l2cap_start_connection(chan);
1393 else
1394 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1395}
1396
1397static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1398{
1399 u32 local_feat_mask = l2cap_feat_mask;
1400 if (!disable_ertm)
1401 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1402
1403 switch (mode) {
1404 case L2CAP_MODE_ERTM:
1405 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1406 case L2CAP_MODE_STREAMING:
1407 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1408 default:
1409 return 0x00;
1410 }
1411}
1412
1413static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1414{
1415 struct l2cap_conn *conn = chan->conn;
1416 struct l2cap_disconn_req req;
1417
1418 if (!conn)
1419 return;
1420
1421 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1422 __clear_retrans_timer(chan);
1423 __clear_monitor_timer(chan);
1424 __clear_ack_timer(chan);
1425 }
1426
1427 if (chan->scid == L2CAP_CID_A2MP) {
1428 l2cap_state_change(chan, BT_DISCONN);
1429 return;
1430 }
1431
1432 req.dcid = cpu_to_le16(chan->dcid);
1433 req.scid = cpu_to_le16(chan->scid);
1434 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1435 sizeof(req), &req);
1436
1437 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1438}
1439
1440/* ---- L2CAP connections ---- */
1441static void l2cap_conn_start(struct l2cap_conn *conn)
1442{
1443 struct l2cap_chan *chan, *tmp;
1444
1445 BT_DBG("conn %p", conn);
1446
1447 mutex_lock(&conn->chan_lock);
1448
1449 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1450 l2cap_chan_lock(chan);
1451
1452 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1453 l2cap_chan_ready(chan);
1454 l2cap_chan_unlock(chan);
1455 continue;
1456 }
1457
1458 if (chan->state == BT_CONNECT) {
1459 if (!l2cap_chan_check_security(chan, true) ||
1460 !__l2cap_no_conn_pending(chan)) {
1461 l2cap_chan_unlock(chan);
1462 continue;
1463 }
1464
1465 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1466 && test_bit(CONF_STATE2_DEVICE,
1467 &chan->conf_state)) {
1468 l2cap_chan_close(chan, ECONNRESET);
1469 l2cap_chan_unlock(chan);
1470 continue;
1471 }
1472
1473 if (l2cap_check_enc_key_size(conn->hcon))
1474 l2cap_start_connection(chan);
1475 else
1476 l2cap_chan_close(chan, ECONNREFUSED);
1477
1478 } else if (chan->state == BT_CONNECT2) {
1479 struct l2cap_conn_rsp rsp;
1480 char buf[128];
1481 rsp.scid = cpu_to_le16(chan->dcid);
1482 rsp.dcid = cpu_to_le16(chan->scid);
1483
1484 if (l2cap_chan_check_security(chan, false)) {
1485 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1486 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1487 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1488 chan->ops->defer(chan);
1489
1490 } else {
1491 l2cap_state_change(chan, BT_CONFIG);
1492 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1493 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1494 }
1495 } else {
1496 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1497 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1498 }
1499
1500 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1501 sizeof(rsp), &rsp);
1502
1503 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1504 rsp.result != L2CAP_CR_SUCCESS) {
1505 l2cap_chan_unlock(chan);
1506 continue;
1507 }
1508
1509 set_bit(CONF_REQ_SENT, &chan->conf_state);
1510 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1511 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1512 chan->num_conf_req++;
1513 }
1514
1515 l2cap_chan_unlock(chan);
1516 }
1517
1518 mutex_unlock(&conn->chan_lock);
1519}
1520
1521static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1522{
1523 struct hci_conn *hcon = conn->hcon;
1524 struct hci_dev *hdev = hcon->hdev;
1525
1526 BT_DBG("%s conn %p", hdev->name, conn);
1527
1528 /* For outgoing pairing which doesn't necessarily have an
1529 * associated socket (e.g. mgmt_pair_device).
1530 */
1531 if (hcon->out)
1532 smp_conn_security(hcon, hcon->pending_sec_level);
1533
1534 /* For LE slave connections, make sure the connection interval
1535 * is in the range of the minium and maximum interval that has
1536 * been configured for this connection. If not, then trigger
1537 * the connection update procedure.
1538 */
1539 if (hcon->role == HCI_ROLE_SLAVE &&
1540 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1541 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1542 struct l2cap_conn_param_update_req req;
1543
1544 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1545 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1546 req.latency = cpu_to_le16(hcon->le_conn_latency);
1547 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1548
1549 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1550 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1551 }
1552}
1553
1554static void l2cap_conn_ready(struct l2cap_conn *conn)
1555{
1556 struct l2cap_chan *chan;
1557 struct hci_conn *hcon = conn->hcon;
1558
1559 BT_DBG("conn %p", conn);
1560
1561 if (hcon->type == ACL_LINK)
1562 l2cap_request_info(conn);
1563
1564 mutex_lock(&conn->chan_lock);
1565
1566 list_for_each_entry(chan, &conn->chan_l, list) {
1567
1568 l2cap_chan_lock(chan);
1569
1570 if (chan->scid == L2CAP_CID_A2MP) {
1571 l2cap_chan_unlock(chan);
1572 continue;
1573 }
1574
1575 if (hcon->type == LE_LINK) {
1576 l2cap_le_start(chan);
1577 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1578 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1579 l2cap_chan_ready(chan);
1580 } else if (chan->state == BT_CONNECT) {
1581 l2cap_do_start(chan);
1582 }
1583
1584 l2cap_chan_unlock(chan);
1585 }
1586
1587 mutex_unlock(&conn->chan_lock);
1588
1589 if (hcon->type == LE_LINK)
1590 l2cap_le_conn_ready(conn);
1591
1592 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1593}
1594
1595/* Notify sockets that we cannot guaranty reliability anymore */
1596static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1597{
1598 struct l2cap_chan *chan;
1599
1600 BT_DBG("conn %p", conn);
1601
1602 mutex_lock(&conn->chan_lock);
1603
1604 list_for_each_entry(chan, &conn->chan_l, list) {
1605 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1606 l2cap_chan_set_err(chan, err);
1607 }
1608
1609 mutex_unlock(&conn->chan_lock);
1610}
1611
1612static void l2cap_info_timeout(struct work_struct *work)
1613{
1614 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1615 info_timer.work);
1616
1617 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1618 conn->info_ident = 0;
1619
1620 l2cap_conn_start(conn);
1621}
1622
1623/*
1624 * l2cap_user
1625 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1626 * callback is called during registration. The ->remove callback is called
1627 * during unregistration.
1628 * An l2cap_user object can either be explicitly unregistered or when the
1629 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1630 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1631 * External modules must own a reference to the l2cap_conn object if they intend
1632 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1633 * any time if they don't.
1634 */
1635
1636int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1637{
1638 struct hci_dev *hdev = conn->hcon->hdev;
1639 int ret;
1640
1641 /* We need to check whether l2cap_conn is registered. If it is not, we
1642 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1643 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1644 * relies on the parent hci_conn object to be locked. This itself relies
1645 * on the hci_dev object to be locked. So we must lock the hci device
1646 * here, too. */
1647
1648 hci_dev_lock(hdev);
1649
1650 if (!list_empty(&user->list)) {
1651 ret = -EINVAL;
1652 goto out_unlock;
1653 }
1654
1655 /* conn->hchan is NULL after l2cap_conn_del() was called */
1656 if (!conn->hchan) {
1657 ret = -ENODEV;
1658 goto out_unlock;
1659 }
1660
1661 ret = user->probe(conn, user);
1662 if (ret)
1663 goto out_unlock;
1664
1665 list_add(&user->list, &conn->users);
1666 ret = 0;
1667
1668out_unlock:
1669 hci_dev_unlock(hdev);
1670 return ret;
1671}
1672EXPORT_SYMBOL(l2cap_register_user);
1673
1674void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1675{
1676 struct hci_dev *hdev = conn->hcon->hdev;
1677
1678 hci_dev_lock(hdev);
1679
1680 if (list_empty(&user->list))
1681 goto out_unlock;
1682
1683 list_del_init(&user->list);
1684 user->remove(conn, user);
1685
1686out_unlock:
1687 hci_dev_unlock(hdev);
1688}
1689EXPORT_SYMBOL(l2cap_unregister_user);
1690
1691static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1692{
1693 struct l2cap_user *user;
1694
1695 while (!list_empty(&conn->users)) {
1696 user = list_first_entry(&conn->users, struct l2cap_user, list);
1697 list_del_init(&user->list);
1698 user->remove(conn, user);
1699 }
1700}
1701
1702static void l2cap_conn_del(struct hci_conn *hcon, int err)
1703{
1704 struct l2cap_conn *conn = hcon->l2cap_data;
1705 struct l2cap_chan *chan, *l;
1706
1707 if (!conn)
1708 return;
1709
1710 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1711
1712 kfree_skb(conn->rx_skb);
1713
1714 skb_queue_purge(&conn->pending_rx);
1715
1716 /* We can not call flush_work(&conn->pending_rx_work) here since we
1717 * might block if we are running on a worker from the same workqueue
1718 * pending_rx_work is waiting on.
1719 */
1720 if (work_pending(&conn->pending_rx_work))
1721 cancel_work_sync(&conn->pending_rx_work);
1722
1723 if (work_pending(&conn->id_addr_update_work))
1724 cancel_work_sync(&conn->id_addr_update_work);
1725
1726 l2cap_unregister_all_users(conn);
1727
1728 /* Force the connection to be immediately dropped */
1729 hcon->disc_timeout = 0;
1730
1731 mutex_lock(&conn->chan_lock);
1732
1733 /* Kill channels */
1734 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1735 l2cap_chan_hold(chan);
1736 l2cap_chan_lock(chan);
1737
1738 l2cap_chan_del(chan, err);
1739
1740 l2cap_chan_unlock(chan);
1741
1742 chan->ops->close(chan);
1743 l2cap_chan_put(chan);
1744 }
1745
1746 mutex_unlock(&conn->chan_lock);
1747
1748 hci_chan_del(conn->hchan);
1749
1750 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1751 cancel_delayed_work_sync(&conn->info_timer);
1752
1753 hcon->l2cap_data = NULL;
1754 conn->hchan = NULL;
1755 l2cap_conn_put(conn);
1756}
1757
1758static void l2cap_conn_free(struct kref *ref)
1759{
1760 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1761
1762 hci_conn_put(conn->hcon);
1763 kfree(conn);
1764}
1765
1766struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1767{
1768 kref_get(&conn->ref);
1769 return conn;
1770}
1771EXPORT_SYMBOL(l2cap_conn_get);
1772
1773void l2cap_conn_put(struct l2cap_conn *conn)
1774{
1775 kref_put(&conn->ref, l2cap_conn_free);
1776}
1777EXPORT_SYMBOL(l2cap_conn_put);
1778
1779/* ---- Socket interface ---- */
1780
1781/* Find socket with psm and source / destination bdaddr.
1782 * Returns closest match.
1783 */
1784static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1785 bdaddr_t *src,
1786 bdaddr_t *dst,
1787 u8 link_type)
1788{
1789 struct l2cap_chan *c, *c1 = NULL;
1790
1791 read_lock(&chan_list_lock);
1792
1793 list_for_each_entry(c, &chan_list, global_l) {
1794 if (state && c->state != state)
1795 continue;
1796
1797 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1798 continue;
1799
1800 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1801 continue;
1802
1803 if (c->psm == psm) {
1804 int src_match, dst_match;
1805 int src_any, dst_any;
1806
1807 /* Exact match. */
1808 src_match = !bacmp(&c->src, src);
1809 dst_match = !bacmp(&c->dst, dst);
1810 if (src_match && dst_match) {
1811 l2cap_chan_hold(c);
1812 read_unlock(&chan_list_lock);
1813 return c;
1814 }
1815
1816 /* Closest match */
1817 src_any = !bacmp(&c->src, BDADDR_ANY);
1818 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1819 if ((src_match && dst_any) || (src_any && dst_match) ||
1820 (src_any && dst_any))
1821 c1 = c;
1822 }
1823 }
1824
1825 if (c1)
1826 l2cap_chan_hold(c1);
1827
1828 read_unlock(&chan_list_lock);
1829
1830 return c1;
1831}
1832
1833static void l2cap_monitor_timeout(struct work_struct *work)
1834{
1835 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1836 monitor_timer.work);
1837
1838 BT_DBG("chan %p", chan);
1839
1840 l2cap_chan_lock(chan);
1841
1842 if (!chan->conn) {
1843 l2cap_chan_unlock(chan);
1844 l2cap_chan_put(chan);
1845 return;
1846 }
1847
1848 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1849
1850 l2cap_chan_unlock(chan);
1851 l2cap_chan_put(chan);
1852}
1853
1854static void l2cap_retrans_timeout(struct work_struct *work)
1855{
1856 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1857 retrans_timer.work);
1858
1859 BT_DBG("chan %p", chan);
1860
1861 l2cap_chan_lock(chan);
1862
1863 if (!chan->conn) {
1864 l2cap_chan_unlock(chan);
1865 l2cap_chan_put(chan);
1866 return;
1867 }
1868
1869 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1870 l2cap_chan_unlock(chan);
1871 l2cap_chan_put(chan);
1872}
1873
1874static void l2cap_streaming_send(struct l2cap_chan *chan,
1875 struct sk_buff_head *skbs)
1876{
1877 struct sk_buff *skb;
1878 struct l2cap_ctrl *control;
1879
1880 BT_DBG("chan %p, skbs %p", chan, skbs);
1881
1882 if (__chan_is_moving(chan))
1883 return;
1884
1885 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1886
1887 while (!skb_queue_empty(&chan->tx_q)) {
1888
1889 skb = skb_dequeue(&chan->tx_q);
1890
1891 bt_cb(skb)->l2cap.retries = 1;
1892 control = &bt_cb(skb)->l2cap;
1893
1894 control->reqseq = 0;
1895 control->txseq = chan->next_tx_seq;
1896
1897 __pack_control(chan, control, skb);
1898
1899 if (chan->fcs == L2CAP_FCS_CRC16) {
1900 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1901 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1902 }
1903
1904 l2cap_do_send(chan, skb);
1905
1906 BT_DBG("Sent txseq %u", control->txseq);
1907
1908 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1909 chan->frames_sent++;
1910 }
1911}
1912
1913static int l2cap_ertm_send(struct l2cap_chan *chan)
1914{
1915 struct sk_buff *skb, *tx_skb;
1916 struct l2cap_ctrl *control;
1917 int sent = 0;
1918
1919 BT_DBG("chan %p", chan);
1920
1921 if (chan->state != BT_CONNECTED)
1922 return -ENOTCONN;
1923
1924 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1925 return 0;
1926
1927 if (__chan_is_moving(chan))
1928 return 0;
1929
1930 while (chan->tx_send_head &&
1931 chan->unacked_frames < chan->remote_tx_win &&
1932 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1933
1934 skb = chan->tx_send_head;
1935
1936 bt_cb(skb)->l2cap.retries = 1;
1937 control = &bt_cb(skb)->l2cap;
1938
1939 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1940 control->final = 1;
1941
1942 control->reqseq = chan->buffer_seq;
1943 chan->last_acked_seq = chan->buffer_seq;
1944 control->txseq = chan->next_tx_seq;
1945
1946 __pack_control(chan, control, skb);
1947
1948 if (chan->fcs == L2CAP_FCS_CRC16) {
1949 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1950 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1951 }
1952
1953 /* Clone after data has been modified. Data is assumed to be
1954 read-only (for locking purposes) on cloned sk_buffs.
1955 */
1956 tx_skb = skb_clone(skb, GFP_KERNEL);
1957
1958 if (!tx_skb)
1959 break;
1960
1961 __set_retrans_timer(chan);
1962
1963 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1964 chan->unacked_frames++;
1965 chan->frames_sent++;
1966 sent++;
1967
1968 if (skb_queue_is_last(&chan->tx_q, skb))
1969 chan->tx_send_head = NULL;
1970 else
1971 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1972
1973 l2cap_do_send(chan, tx_skb);
1974 BT_DBG("Sent txseq %u", control->txseq);
1975 }
1976
1977 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1978 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1979
1980 return sent;
1981}
1982
1983static void l2cap_ertm_resend(struct l2cap_chan *chan)
1984{
1985 struct l2cap_ctrl control;
1986 struct sk_buff *skb;
1987 struct sk_buff *tx_skb;
1988 u16 seq;
1989
1990 BT_DBG("chan %p", chan);
1991
1992 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1993 return;
1994
1995 if (__chan_is_moving(chan))
1996 return;
1997
1998 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1999 seq = l2cap_seq_list_pop(&chan->retrans_list);
2000
2001 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2002 if (!skb) {
2003 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2004 seq);
2005 continue;
2006 }
2007
2008 bt_cb(skb)->l2cap.retries++;
2009 control = bt_cb(skb)->l2cap;
2010
2011 if (chan->max_tx != 0 &&
2012 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2013 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2014 l2cap_send_disconn_req(chan, ECONNRESET);
2015 l2cap_seq_list_clear(&chan->retrans_list);
2016 break;
2017 }
2018
2019 control.reqseq = chan->buffer_seq;
2020 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2021 control.final = 1;
2022 else
2023 control.final = 0;
2024
2025 if (skb_cloned(skb)) {
2026 /* Cloned sk_buffs are read-only, so we need a
2027 * writeable copy
2028 */
2029 tx_skb = skb_copy(skb, GFP_KERNEL);
2030 } else {
2031 tx_skb = skb_clone(skb, GFP_KERNEL);
2032 }
2033
2034 if (!tx_skb) {
2035 l2cap_seq_list_clear(&chan->retrans_list);
2036 break;
2037 }
2038
2039 /* Update skb contents */
2040 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2041 put_unaligned_le32(__pack_extended_control(&control),
2042 tx_skb->data + L2CAP_HDR_SIZE);
2043 } else {
2044 put_unaligned_le16(__pack_enhanced_control(&control),
2045 tx_skb->data + L2CAP_HDR_SIZE);
2046 }
2047
2048 /* Update FCS */
2049 if (chan->fcs == L2CAP_FCS_CRC16) {
2050 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2051 tx_skb->len - L2CAP_FCS_SIZE);
2052 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2053 L2CAP_FCS_SIZE);
2054 }
2055
2056 l2cap_do_send(chan, tx_skb);
2057
2058 BT_DBG("Resent txseq %d", control.txseq);
2059
2060 chan->last_acked_seq = chan->buffer_seq;
2061 }
2062}
2063
2064static void l2cap_retransmit(struct l2cap_chan *chan,
2065 struct l2cap_ctrl *control)
2066{
2067 BT_DBG("chan %p, control %p", chan, control);
2068
2069 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2070 l2cap_ertm_resend(chan);
2071}
2072
2073static void l2cap_retransmit_all(struct l2cap_chan *chan,
2074 struct l2cap_ctrl *control)
2075{
2076 struct sk_buff *skb;
2077
2078 BT_DBG("chan %p, control %p", chan, control);
2079
2080 if (control->poll)
2081 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2082
2083 l2cap_seq_list_clear(&chan->retrans_list);
2084
2085 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2086 return;
2087
2088 if (chan->unacked_frames) {
2089 skb_queue_walk(&chan->tx_q, skb) {
2090 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2091 skb == chan->tx_send_head)
2092 break;
2093 }
2094
2095 skb_queue_walk_from(&chan->tx_q, skb) {
2096 if (skb == chan->tx_send_head)
2097 break;
2098
2099 l2cap_seq_list_append(&chan->retrans_list,
2100 bt_cb(skb)->l2cap.txseq);
2101 }
2102
2103 l2cap_ertm_resend(chan);
2104 }
2105}
2106
2107static void l2cap_send_ack(struct l2cap_chan *chan)
2108{
2109 struct l2cap_ctrl control;
2110 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2111 chan->last_acked_seq);
2112 int threshold;
2113
2114 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2115 chan, chan->last_acked_seq, chan->buffer_seq);
2116
2117 memset(&control, 0, sizeof(control));
2118 control.sframe = 1;
2119
2120 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2121 chan->rx_state == L2CAP_RX_STATE_RECV) {
2122 __clear_ack_timer(chan);
2123 control.super = L2CAP_SUPER_RNR;
2124 control.reqseq = chan->buffer_seq;
2125 l2cap_send_sframe(chan, &control);
2126 } else {
2127 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2128 l2cap_ertm_send(chan);
2129 /* If any i-frames were sent, they included an ack */
2130 if (chan->buffer_seq == chan->last_acked_seq)
2131 frames_to_ack = 0;
2132 }
2133
2134 /* Ack now if the window is 3/4ths full.
2135 * Calculate without mul or div
2136 */
2137 threshold = chan->ack_win;
2138 threshold += threshold << 1;
2139 threshold >>= 2;
2140
2141 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2142 threshold);
2143
2144 if (frames_to_ack >= threshold) {
2145 __clear_ack_timer(chan);
2146 control.super = L2CAP_SUPER_RR;
2147 control.reqseq = chan->buffer_seq;
2148 l2cap_send_sframe(chan, &control);
2149 frames_to_ack = 0;
2150 }
2151
2152 if (frames_to_ack)
2153 __set_ack_timer(chan);
2154 }
2155}
2156
2157static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2158 struct msghdr *msg, int len,
2159 int count, struct sk_buff *skb)
2160{
2161 struct l2cap_conn *conn = chan->conn;
2162 struct sk_buff **frag;
2163 int sent = 0;
2164
2165 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2166 return -EFAULT;
2167
2168 sent += count;
2169 len -= count;
2170
2171 /* Continuation fragments (no L2CAP header) */
2172 frag = &skb_shinfo(skb)->frag_list;
2173 while (len) {
2174 struct sk_buff *tmp;
2175
2176 count = min_t(unsigned int, conn->mtu, len);
2177
2178 tmp = chan->ops->alloc_skb(chan, 0, count,
2179 msg->msg_flags & MSG_DONTWAIT);
2180 if (IS_ERR(tmp))
2181 return PTR_ERR(tmp);
2182
2183 *frag = tmp;
2184
2185 if (!copy_from_iter_full(skb_put(*frag, count), count,
2186 &msg->msg_iter))
2187 return -EFAULT;
2188
2189 sent += count;
2190 len -= count;
2191
2192 skb->len += (*frag)->len;
2193 skb->data_len += (*frag)->len;
2194
2195 frag = &(*frag)->next;
2196 }
2197
2198 return sent;
2199}
2200
2201static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2202 struct msghdr *msg, size_t len)
2203{
2204 struct l2cap_conn *conn = chan->conn;
2205 struct sk_buff *skb;
2206 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2207 struct l2cap_hdr *lh;
2208
2209 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2210 __le16_to_cpu(chan->psm), len);
2211
2212 count = min_t(unsigned int, (conn->mtu - hlen), len);
2213
2214 skb = chan->ops->alloc_skb(chan, hlen, count,
2215 msg->msg_flags & MSG_DONTWAIT);
2216 if (IS_ERR(skb))
2217 return skb;
2218
2219 /* Create L2CAP header */
2220 lh = skb_put(skb, L2CAP_HDR_SIZE);
2221 lh->cid = cpu_to_le16(chan->dcid);
2222 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2223 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2224
2225 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2226 if (unlikely(err < 0)) {
2227 kfree_skb(skb);
2228 return ERR_PTR(err);
2229 }
2230 return skb;
2231}
2232
2233static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2234 struct msghdr *msg, size_t len)
2235{
2236 struct l2cap_conn *conn = chan->conn;
2237 struct sk_buff *skb;
2238 int err, count;
2239 struct l2cap_hdr *lh;
2240
2241 BT_DBG("chan %p len %zu", chan, len);
2242
2243 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2244
2245 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2246 msg->msg_flags & MSG_DONTWAIT);
2247 if (IS_ERR(skb))
2248 return skb;
2249
2250 /* Create L2CAP header */
2251 lh = skb_put(skb, L2CAP_HDR_SIZE);
2252 lh->cid = cpu_to_le16(chan->dcid);
2253 lh->len = cpu_to_le16(len);
2254
2255 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2256 if (unlikely(err < 0)) {
2257 kfree_skb(skb);
2258 return ERR_PTR(err);
2259 }
2260 return skb;
2261}
2262
2263static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2264 struct msghdr *msg, size_t len,
2265 u16 sdulen)
2266{
2267 struct l2cap_conn *conn = chan->conn;
2268 struct sk_buff *skb;
2269 int err, count, hlen;
2270 struct l2cap_hdr *lh;
2271
2272 BT_DBG("chan %p len %zu", chan, len);
2273
2274 if (!conn)
2275 return ERR_PTR(-ENOTCONN);
2276
2277 hlen = __ertm_hdr_size(chan);
2278
2279 if (sdulen)
2280 hlen += L2CAP_SDULEN_SIZE;
2281
2282 if (chan->fcs == L2CAP_FCS_CRC16)
2283 hlen += L2CAP_FCS_SIZE;
2284
2285 count = min_t(unsigned int, (conn->mtu - hlen), len);
2286
2287 skb = chan->ops->alloc_skb(chan, hlen, count,
2288 msg->msg_flags & MSG_DONTWAIT);
2289 if (IS_ERR(skb))
2290 return skb;
2291
2292 /* Create L2CAP header */
2293 lh = skb_put(skb, L2CAP_HDR_SIZE);
2294 lh->cid = cpu_to_le16(chan->dcid);
2295 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2296
2297 /* Control header is populated later */
2298 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2299 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2300 else
2301 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2302
2303 if (sdulen)
2304 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2305
2306 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2307 if (unlikely(err < 0)) {
2308 kfree_skb(skb);
2309 return ERR_PTR(err);
2310 }
2311
2312 bt_cb(skb)->l2cap.fcs = chan->fcs;
2313 bt_cb(skb)->l2cap.retries = 0;
2314 return skb;
2315}
2316
2317static int l2cap_segment_sdu(struct l2cap_chan *chan,
2318 struct sk_buff_head *seg_queue,
2319 struct msghdr *msg, size_t len)
2320{
2321 struct sk_buff *skb;
2322 u16 sdu_len;
2323 size_t pdu_len;
2324 u8 sar;
2325
2326 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2327
2328 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2329 * so fragmented skbs are not used. The HCI layer's handling
2330 * of fragmented skbs is not compatible with ERTM's queueing.
2331 */
2332
2333 /* PDU size is derived from the HCI MTU */
2334 pdu_len = chan->conn->mtu;
2335
2336 /* Constrain PDU size for BR/EDR connections */
2337 if (!chan->hs_hcon)
2338 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2339
2340 /* Adjust for largest possible L2CAP overhead. */
2341 if (chan->fcs)
2342 pdu_len -= L2CAP_FCS_SIZE;
2343
2344 pdu_len -= __ertm_hdr_size(chan);
2345
2346 /* Remote device may have requested smaller PDUs */
2347 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2348
2349 if (len <= pdu_len) {
2350 sar = L2CAP_SAR_UNSEGMENTED;
2351 sdu_len = 0;
2352 pdu_len = len;
2353 } else {
2354 sar = L2CAP_SAR_START;
2355 sdu_len = len;
2356 }
2357
2358 while (len > 0) {
2359 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2360
2361 if (IS_ERR(skb)) {
2362 __skb_queue_purge(seg_queue);
2363 return PTR_ERR(skb);
2364 }
2365
2366 bt_cb(skb)->l2cap.sar = sar;
2367 __skb_queue_tail(seg_queue, skb);
2368
2369 len -= pdu_len;
2370 if (sdu_len)
2371 sdu_len = 0;
2372
2373 if (len <= pdu_len) {
2374 sar = L2CAP_SAR_END;
2375 pdu_len = len;
2376 } else {
2377 sar = L2CAP_SAR_CONTINUE;
2378 }
2379 }
2380
2381 return 0;
2382}
2383
2384static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2385 struct msghdr *msg,
2386 size_t len, u16 sdulen)
2387{
2388 struct l2cap_conn *conn = chan->conn;
2389 struct sk_buff *skb;
2390 int err, count, hlen;
2391 struct l2cap_hdr *lh;
2392
2393 BT_DBG("chan %p len %zu", chan, len);
2394
2395 if (!conn)
2396 return ERR_PTR(-ENOTCONN);
2397
2398 hlen = L2CAP_HDR_SIZE;
2399
2400 if (sdulen)
2401 hlen += L2CAP_SDULEN_SIZE;
2402
2403 count = min_t(unsigned int, (conn->mtu - hlen), len);
2404
2405 skb = chan->ops->alloc_skb(chan, hlen, count,
2406 msg->msg_flags & MSG_DONTWAIT);
2407 if (IS_ERR(skb))
2408 return skb;
2409
2410 /* Create L2CAP header */
2411 lh = skb_put(skb, L2CAP_HDR_SIZE);
2412 lh->cid = cpu_to_le16(chan->dcid);
2413 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2414
2415 if (sdulen)
2416 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2417
2418 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2419 if (unlikely(err < 0)) {
2420 kfree_skb(skb);
2421 return ERR_PTR(err);
2422 }
2423
2424 return skb;
2425}
2426
2427static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2428 struct sk_buff_head *seg_queue,
2429 struct msghdr *msg, size_t len)
2430{
2431 struct sk_buff *skb;
2432 size_t pdu_len;
2433 u16 sdu_len;
2434
2435 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2436
2437 sdu_len = len;
2438 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2439
2440 while (len > 0) {
2441 if (len <= pdu_len)
2442 pdu_len = len;
2443
2444 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2445 if (IS_ERR(skb)) {
2446 __skb_queue_purge(seg_queue);
2447 return PTR_ERR(skb);
2448 }
2449
2450 __skb_queue_tail(seg_queue, skb);
2451
2452 len -= pdu_len;
2453
2454 if (sdu_len) {
2455 sdu_len = 0;
2456 pdu_len += L2CAP_SDULEN_SIZE;
2457 }
2458 }
2459
2460 return 0;
2461}
2462
2463static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2464{
2465 int sent = 0;
2466
2467 BT_DBG("chan %p", chan);
2468
2469 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2470 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2471 chan->tx_credits--;
2472 sent++;
2473 }
2474
2475 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2476 skb_queue_len(&chan->tx_q));
2477}
2478
2479int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2480{
2481 struct sk_buff *skb;
2482 int err;
2483 struct sk_buff_head seg_queue;
2484
2485 if (!chan->conn)
2486 return -ENOTCONN;
2487
2488 /* Connectionless channel */
2489 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2490 skb = l2cap_create_connless_pdu(chan, msg, len);
2491 if (IS_ERR(skb))
2492 return PTR_ERR(skb);
2493
2494 /* Channel lock is released before requesting new skb and then
2495 * reacquired thus we need to recheck channel state.
2496 */
2497 if (chan->state != BT_CONNECTED) {
2498 kfree_skb(skb);
2499 return -ENOTCONN;
2500 }
2501
2502 l2cap_do_send(chan, skb);
2503 return len;
2504 }
2505
2506 switch (chan->mode) {
2507 case L2CAP_MODE_LE_FLOWCTL:
2508 /* Check outgoing MTU */
2509 if (len > chan->omtu)
2510 return -EMSGSIZE;
2511
2512 __skb_queue_head_init(&seg_queue);
2513
2514 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2515
2516 if (chan->state != BT_CONNECTED) {
2517 __skb_queue_purge(&seg_queue);
2518 err = -ENOTCONN;
2519 }
2520
2521 if (err)
2522 return err;
2523
2524 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2525
2526 l2cap_le_flowctl_send(chan);
2527
2528 if (!chan->tx_credits)
2529 chan->ops->suspend(chan);
2530
2531 err = len;
2532
2533 break;
2534
2535 case L2CAP_MODE_BASIC:
2536 /* Check outgoing MTU */
2537 if (len > chan->omtu)
2538 return -EMSGSIZE;
2539
2540 /* Create a basic PDU */
2541 skb = l2cap_create_basic_pdu(chan, msg, len);
2542 if (IS_ERR(skb))
2543 return PTR_ERR(skb);
2544
2545 /* Channel lock is released before requesting new skb and then
2546 * reacquired thus we need to recheck channel state.
2547 */
2548 if (chan->state != BT_CONNECTED) {
2549 kfree_skb(skb);
2550 return -ENOTCONN;
2551 }
2552
2553 l2cap_do_send(chan, skb);
2554 err = len;
2555 break;
2556
2557 case L2CAP_MODE_ERTM:
2558 case L2CAP_MODE_STREAMING:
2559 /* Check outgoing MTU */
2560 if (len > chan->omtu) {
2561 err = -EMSGSIZE;
2562 break;
2563 }
2564
2565 __skb_queue_head_init(&seg_queue);
2566
2567 /* Do segmentation before calling in to the state machine,
2568 * since it's possible to block while waiting for memory
2569 * allocation.
2570 */
2571 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2572
2573 /* The channel could have been closed while segmenting,
2574 * check that it is still connected.
2575 */
2576 if (chan->state != BT_CONNECTED) {
2577 __skb_queue_purge(&seg_queue);
2578 err = -ENOTCONN;
2579 }
2580
2581 if (err)
2582 break;
2583
2584 if (chan->mode == L2CAP_MODE_ERTM)
2585 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2586 else
2587 l2cap_streaming_send(chan, &seg_queue);
2588
2589 err = len;
2590
2591 /* If the skbs were not queued for sending, they'll still be in
2592 * seg_queue and need to be purged.
2593 */
2594 __skb_queue_purge(&seg_queue);
2595 break;
2596
2597 default:
2598 BT_DBG("bad state %1.1x", chan->mode);
2599 err = -EBADFD;
2600 }
2601
2602 return err;
2603}
2604EXPORT_SYMBOL_GPL(l2cap_chan_send);
2605
2606static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2607{
2608 struct l2cap_ctrl control;
2609 u16 seq;
2610
2611 BT_DBG("chan %p, txseq %u", chan, txseq);
2612
2613 memset(&control, 0, sizeof(control));
2614 control.sframe = 1;
2615 control.super = L2CAP_SUPER_SREJ;
2616
2617 for (seq = chan->expected_tx_seq; seq != txseq;
2618 seq = __next_seq(chan, seq)) {
2619 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2620 control.reqseq = seq;
2621 l2cap_send_sframe(chan, &control);
2622 l2cap_seq_list_append(&chan->srej_list, seq);
2623 }
2624 }
2625
2626 chan->expected_tx_seq = __next_seq(chan, txseq);
2627}
2628
2629static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2630{
2631 struct l2cap_ctrl control;
2632
2633 BT_DBG("chan %p", chan);
2634
2635 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2636 return;
2637
2638 memset(&control, 0, sizeof(control));
2639 control.sframe = 1;
2640 control.super = L2CAP_SUPER_SREJ;
2641 control.reqseq = chan->srej_list.tail;
2642 l2cap_send_sframe(chan, &control);
2643}
2644
2645static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2646{
2647 struct l2cap_ctrl control;
2648 u16 initial_head;
2649 u16 seq;
2650
2651 BT_DBG("chan %p, txseq %u", chan, txseq);
2652
2653 memset(&control, 0, sizeof(control));
2654 control.sframe = 1;
2655 control.super = L2CAP_SUPER_SREJ;
2656
2657 /* Capture initial list head to allow only one pass through the list. */
2658 initial_head = chan->srej_list.head;
2659
2660 do {
2661 seq = l2cap_seq_list_pop(&chan->srej_list);
2662 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2663 break;
2664
2665 control.reqseq = seq;
2666 l2cap_send_sframe(chan, &control);
2667 l2cap_seq_list_append(&chan->srej_list, seq);
2668 } while (chan->srej_list.head != initial_head);
2669}
2670
2671static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2672{
2673 struct sk_buff *acked_skb;
2674 u16 ackseq;
2675
2676 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2677
2678 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2679 return;
2680
2681 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2682 chan->expected_ack_seq, chan->unacked_frames);
2683
2684 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2685 ackseq = __next_seq(chan, ackseq)) {
2686
2687 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2688 if (acked_skb) {
2689 skb_unlink(acked_skb, &chan->tx_q);
2690 kfree_skb(acked_skb);
2691 chan->unacked_frames--;
2692 }
2693 }
2694
2695 chan->expected_ack_seq = reqseq;
2696
2697 if (chan->unacked_frames == 0)
2698 __clear_retrans_timer(chan);
2699
2700 BT_DBG("unacked_frames %u", chan->unacked_frames);
2701}
2702
2703static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2704{
2705 BT_DBG("chan %p", chan);
2706
2707 chan->expected_tx_seq = chan->buffer_seq;
2708 l2cap_seq_list_clear(&chan->srej_list);
2709 skb_queue_purge(&chan->srej_q);
2710 chan->rx_state = L2CAP_RX_STATE_RECV;
2711}
2712
2713static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2714 struct l2cap_ctrl *control,
2715 struct sk_buff_head *skbs, u8 event)
2716{
2717 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2718 event);
2719
2720 switch (event) {
2721 case L2CAP_EV_DATA_REQUEST:
2722 if (chan->tx_send_head == NULL)
2723 chan->tx_send_head = skb_peek(skbs);
2724
2725 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2726 l2cap_ertm_send(chan);
2727 break;
2728 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2729 BT_DBG("Enter LOCAL_BUSY");
2730 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2731
2732 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2733 /* The SREJ_SENT state must be aborted if we are to
2734 * enter the LOCAL_BUSY state.
2735 */
2736 l2cap_abort_rx_srej_sent(chan);
2737 }
2738
2739 l2cap_send_ack(chan);
2740
2741 break;
2742 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2743 BT_DBG("Exit LOCAL_BUSY");
2744 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2745
2746 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2747 struct l2cap_ctrl local_control;
2748
2749 memset(&local_control, 0, sizeof(local_control));
2750 local_control.sframe = 1;
2751 local_control.super = L2CAP_SUPER_RR;
2752 local_control.poll = 1;
2753 local_control.reqseq = chan->buffer_seq;
2754 l2cap_send_sframe(chan, &local_control);
2755
2756 chan->retry_count = 1;
2757 __set_monitor_timer(chan);
2758 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2759 }
2760 break;
2761 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2762 l2cap_process_reqseq(chan, control->reqseq);
2763 break;
2764 case L2CAP_EV_EXPLICIT_POLL:
2765 l2cap_send_rr_or_rnr(chan, 1);
2766 chan->retry_count = 1;
2767 __set_monitor_timer(chan);
2768 __clear_ack_timer(chan);
2769 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2770 break;
2771 case L2CAP_EV_RETRANS_TO:
2772 l2cap_send_rr_or_rnr(chan, 1);
2773 chan->retry_count = 1;
2774 __set_monitor_timer(chan);
2775 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2776 break;
2777 case L2CAP_EV_RECV_FBIT:
2778 /* Nothing to process */
2779 break;
2780 default:
2781 break;
2782 }
2783}
2784
2785static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2786 struct l2cap_ctrl *control,
2787 struct sk_buff_head *skbs, u8 event)
2788{
2789 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2790 event);
2791
2792 switch (event) {
2793 case L2CAP_EV_DATA_REQUEST:
2794 if (chan->tx_send_head == NULL)
2795 chan->tx_send_head = skb_peek(skbs);
2796 /* Queue data, but don't send. */
2797 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2798 break;
2799 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2800 BT_DBG("Enter LOCAL_BUSY");
2801 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2802
2803 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2804 /* The SREJ_SENT state must be aborted if we are to
2805 * enter the LOCAL_BUSY state.
2806 */
2807 l2cap_abort_rx_srej_sent(chan);
2808 }
2809
2810 l2cap_send_ack(chan);
2811
2812 break;
2813 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2814 BT_DBG("Exit LOCAL_BUSY");
2815 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2816
2817 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2818 struct l2cap_ctrl local_control;
2819 memset(&local_control, 0, sizeof(local_control));
2820 local_control.sframe = 1;
2821 local_control.super = L2CAP_SUPER_RR;
2822 local_control.poll = 1;
2823 local_control.reqseq = chan->buffer_seq;
2824 l2cap_send_sframe(chan, &local_control);
2825
2826 chan->retry_count = 1;
2827 __set_monitor_timer(chan);
2828 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2829 }
2830 break;
2831 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2832 l2cap_process_reqseq(chan, control->reqseq);
2833
2834 /* Fall through */
2835
2836 case L2CAP_EV_RECV_FBIT:
2837 if (control && control->final) {
2838 __clear_monitor_timer(chan);
2839 if (chan->unacked_frames > 0)
2840 __set_retrans_timer(chan);
2841 chan->retry_count = 0;
2842 chan->tx_state = L2CAP_TX_STATE_XMIT;
2843 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2844 }
2845 break;
2846 case L2CAP_EV_EXPLICIT_POLL:
2847 /* Ignore */
2848 break;
2849 case L2CAP_EV_MONITOR_TO:
2850 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2851 l2cap_send_rr_or_rnr(chan, 1);
2852 __set_monitor_timer(chan);
2853 chan->retry_count++;
2854 } else {
2855 l2cap_send_disconn_req(chan, ECONNABORTED);
2856 }
2857 break;
2858 default:
2859 break;
2860 }
2861}
2862
2863static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2864 struct sk_buff_head *skbs, u8 event)
2865{
2866 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2867 chan, control, skbs, event, chan->tx_state);
2868
2869 switch (chan->tx_state) {
2870 case L2CAP_TX_STATE_XMIT:
2871 l2cap_tx_state_xmit(chan, control, skbs, event);
2872 break;
2873 case L2CAP_TX_STATE_WAIT_F:
2874 l2cap_tx_state_wait_f(chan, control, skbs, event);
2875 break;
2876 default:
2877 /* Ignore event */
2878 break;
2879 }
2880}
2881
2882static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2883 struct l2cap_ctrl *control)
2884{
2885 BT_DBG("chan %p, control %p", chan, control);
2886 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2887}
2888
2889static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2890 struct l2cap_ctrl *control)
2891{
2892 BT_DBG("chan %p, control %p", chan, control);
2893 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2894}
2895
2896/* Copy frame to all raw sockets on that connection */
2897static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2898{
2899 struct sk_buff *nskb;
2900 struct l2cap_chan *chan;
2901
2902 BT_DBG("conn %p", conn);
2903
2904 mutex_lock(&conn->chan_lock);
2905
2906 list_for_each_entry(chan, &conn->chan_l, list) {
2907 if (chan->chan_type != L2CAP_CHAN_RAW)
2908 continue;
2909
2910 /* Don't send frame to the channel it came from */
2911 if (bt_cb(skb)->l2cap.chan == chan)
2912 continue;
2913
2914 nskb = skb_clone(skb, GFP_KERNEL);
2915 if (!nskb)
2916 continue;
2917 if (chan->ops->recv(chan, nskb))
2918 kfree_skb(nskb);
2919 }
2920
2921 mutex_unlock(&conn->chan_lock);
2922}
2923
2924/* ---- L2CAP signalling commands ---- */
2925static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2926 u8 ident, u16 dlen, void *data)
2927{
2928 struct sk_buff *skb, **frag;
2929 struct l2cap_cmd_hdr *cmd;
2930 struct l2cap_hdr *lh;
2931 int len, count;
2932
2933 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2934 conn, code, ident, dlen);
2935
2936 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2937 return NULL;
2938
2939 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2940 count = min_t(unsigned int, conn->mtu, len);
2941
2942 skb = bt_skb_alloc(count, GFP_KERNEL);
2943 if (!skb)
2944 return NULL;
2945
2946 lh = skb_put(skb, L2CAP_HDR_SIZE);
2947 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2948
2949 if (conn->hcon->type == LE_LINK)
2950 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2951 else
2952 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2953
2954 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2955 cmd->code = code;
2956 cmd->ident = ident;
2957 cmd->len = cpu_to_le16(dlen);
2958
2959 if (dlen) {
2960 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2961 skb_put_data(skb, data, count);
2962 data += count;
2963 }
2964
2965 len -= skb->len;
2966
2967 /* Continuation fragments (no L2CAP header) */
2968 frag = &skb_shinfo(skb)->frag_list;
2969 while (len) {
2970 count = min_t(unsigned int, conn->mtu, len);
2971
2972 *frag = bt_skb_alloc(count, GFP_KERNEL);
2973 if (!*frag)
2974 goto fail;
2975
2976 skb_put_data(*frag, data, count);
2977
2978 len -= count;
2979 data += count;
2980
2981 frag = &(*frag)->next;
2982 }
2983
2984 return skb;
2985
2986fail:
2987 kfree_skb(skb);
2988 return NULL;
2989}
2990
2991static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2992 unsigned long *val)
2993{
2994 struct l2cap_conf_opt *opt = *ptr;
2995 int len;
2996
2997 len = L2CAP_CONF_OPT_SIZE + opt->len;
2998 *ptr += len;
2999
3000 *type = opt->type;
3001 *olen = opt->len;
3002
3003 switch (opt->len) {
3004 case 1:
3005 *val = *((u8 *) opt->val);
3006 break;
3007
3008 case 2:
3009 *val = get_unaligned_le16(opt->val);
3010 break;
3011
3012 case 4:
3013 *val = get_unaligned_le32(opt->val);
3014 break;
3015
3016 default:
3017 *val = (unsigned long) opt->val;
3018 break;
3019 }
3020
3021 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3022 return len;
3023}
3024
3025static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3026{
3027 struct l2cap_conf_opt *opt = *ptr;
3028
3029 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3030
3031 if (size < L2CAP_CONF_OPT_SIZE + len)
3032 return;
3033
3034 opt->type = type;
3035 opt->len = len;
3036
3037 switch (len) {
3038 case 1:
3039 *((u8 *) opt->val) = val;
3040 break;
3041
3042 case 2:
3043 put_unaligned_le16(val, opt->val);
3044 break;
3045
3046 case 4:
3047 put_unaligned_le32(val, opt->val);
3048 break;
3049
3050 default:
3051 memcpy(opt->val, (void *) val, len);
3052 break;
3053 }
3054
3055 *ptr += L2CAP_CONF_OPT_SIZE + len;
3056}
3057
3058static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3059{
3060 struct l2cap_conf_efs efs;
3061
3062 switch (chan->mode) {
3063 case L2CAP_MODE_ERTM:
3064 efs.id = chan->local_id;
3065 efs.stype = chan->local_stype;
3066 efs.msdu = cpu_to_le16(chan->local_msdu);
3067 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3068 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3069 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3070 break;
3071
3072 case L2CAP_MODE_STREAMING:
3073 efs.id = 1;
3074 efs.stype = L2CAP_SERV_BESTEFFORT;
3075 efs.msdu = cpu_to_le16(chan->local_msdu);
3076 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3077 efs.acc_lat = 0;
3078 efs.flush_to = 0;
3079 break;
3080
3081 default:
3082 return;
3083 }
3084
3085 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3086 (unsigned long) &efs, size);
3087}
3088
3089static void l2cap_ack_timeout(struct work_struct *work)
3090{
3091 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3092 ack_timer.work);
3093 u16 frames_to_ack;
3094
3095 BT_DBG("chan %p", chan);
3096
3097 l2cap_chan_lock(chan);
3098
3099 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3100 chan->last_acked_seq);
3101
3102 if (frames_to_ack)
3103 l2cap_send_rr_or_rnr(chan, 0);
3104
3105 l2cap_chan_unlock(chan);
3106 l2cap_chan_put(chan);
3107}
3108
3109int l2cap_ertm_init(struct l2cap_chan *chan)
3110{
3111 int err;
3112
3113 chan->next_tx_seq = 0;
3114 chan->expected_tx_seq = 0;
3115 chan->expected_ack_seq = 0;
3116 chan->unacked_frames = 0;
3117 chan->buffer_seq = 0;
3118 chan->frames_sent = 0;
3119 chan->last_acked_seq = 0;
3120 chan->sdu = NULL;
3121 chan->sdu_last_frag = NULL;
3122 chan->sdu_len = 0;
3123
3124 skb_queue_head_init(&chan->tx_q);
3125
3126 chan->local_amp_id = AMP_ID_BREDR;
3127 chan->move_id = AMP_ID_BREDR;
3128 chan->move_state = L2CAP_MOVE_STABLE;
3129 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3130
3131 if (chan->mode != L2CAP_MODE_ERTM)
3132 return 0;
3133
3134 chan->rx_state = L2CAP_RX_STATE_RECV;
3135 chan->tx_state = L2CAP_TX_STATE_XMIT;
3136
3137 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3138 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3139 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3140
3141 skb_queue_head_init(&chan->srej_q);
3142
3143 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3144 if (err < 0)
3145 return err;
3146
3147 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3148 if (err < 0)
3149 l2cap_seq_list_free(&chan->srej_list);
3150
3151 return err;
3152}
3153
3154static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3155{
3156 switch (mode) {
3157 case L2CAP_MODE_STREAMING:
3158 case L2CAP_MODE_ERTM:
3159 if (l2cap_mode_supported(mode, remote_feat_mask))
3160 return mode;
3161 /* fall through */
3162 default:
3163 return L2CAP_MODE_BASIC;
3164 }
3165}
3166
3167static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3168{
3169 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3170 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3171}
3172
3173static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3174{
3175 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3176 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3177}
3178
3179static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3180 struct l2cap_conf_rfc *rfc)
3181{
3182 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3183 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3184
3185 /* Class 1 devices have must have ERTM timeouts
3186 * exceeding the Link Supervision Timeout. The
3187 * default Link Supervision Timeout for AMP
3188 * controllers is 10 seconds.
3189 *
3190 * Class 1 devices use 0xffffffff for their
3191 * best-effort flush timeout, so the clamping logic
3192 * will result in a timeout that meets the above
3193 * requirement. ERTM timeouts are 16-bit values, so
3194 * the maximum timeout is 65.535 seconds.
3195 */
3196
3197 /* Convert timeout to milliseconds and round */
3198 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3199
3200 /* This is the recommended formula for class 2 devices
3201 * that start ERTM timers when packets are sent to the
3202 * controller.
3203 */
3204 ertm_to = 3 * ertm_to + 500;
3205
3206 if (ertm_to > 0xffff)
3207 ertm_to = 0xffff;
3208
3209 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3210 rfc->monitor_timeout = rfc->retrans_timeout;
3211 } else {
3212 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3213 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3214 }
3215}
3216
3217static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3218{
3219 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3220 __l2cap_ews_supported(chan->conn)) {
3221 /* use extended control field */
3222 set_bit(FLAG_EXT_CTRL, &chan->flags);
3223 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3224 } else {
3225 chan->tx_win = min_t(u16, chan->tx_win,
3226 L2CAP_DEFAULT_TX_WINDOW);
3227 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3228 }
3229 chan->ack_win = chan->tx_win;
3230}
3231
3232static void l2cap_mtu_auto(struct l2cap_chan *chan)
3233{
3234 struct hci_conn *conn = chan->conn->hcon;
3235
3236 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3237
3238 /* The 2-DH1 packet has between 2 and 56 information bytes
3239 * (including the 2-byte payload header)
3240 */
3241 if (!(conn->pkt_type & HCI_2DH1))
3242 chan->imtu = 54;
3243
3244 /* The 3-DH1 packet has between 2 and 85 information bytes
3245 * (including the 2-byte payload header)
3246 */
3247 if (!(conn->pkt_type & HCI_3DH1))
3248 chan->imtu = 83;
3249
3250 /* The 2-DH3 packet has between 2 and 369 information bytes
3251 * (including the 2-byte payload header)
3252 */
3253 if (!(conn->pkt_type & HCI_2DH3))
3254 chan->imtu = 367;
3255
3256 /* The 3-DH3 packet has between 2 and 554 information bytes
3257 * (including the 2-byte payload header)
3258 */
3259 if (!(conn->pkt_type & HCI_3DH3))
3260 chan->imtu = 552;
3261
3262 /* The 2-DH5 packet has between 2 and 681 information bytes
3263 * (including the 2-byte payload header)
3264 */
3265 if (!(conn->pkt_type & HCI_2DH5))
3266 chan->imtu = 679;
3267
3268 /* The 3-DH5 packet has between 2 and 1023 information bytes
3269 * (including the 2-byte payload header)
3270 */
3271 if (!(conn->pkt_type & HCI_3DH5))
3272 chan->imtu = 1021;
3273}
3274
3275static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3276{
3277 struct l2cap_conf_req *req = data;
3278 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3279 void *ptr = req->data;
3280 void *endptr = data + data_size;
3281 u16 size;
3282
3283 BT_DBG("chan %p", chan);
3284
3285 if (chan->num_conf_req || chan->num_conf_rsp)
3286 goto done;
3287
3288 switch (chan->mode) {
3289 case L2CAP_MODE_STREAMING:
3290 case L2CAP_MODE_ERTM:
3291 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3292 break;
3293
3294 if (__l2cap_efs_supported(chan->conn))
3295 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3296
3297 /* fall through */
3298 default:
3299 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3300 break;
3301 }
3302
3303done:
3304 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3305 if (!chan->imtu)
3306 l2cap_mtu_auto(chan);
3307 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3308 endptr - ptr);
3309 }
3310
3311 switch (chan->mode) {
3312 case L2CAP_MODE_BASIC:
3313 if (disable_ertm)
3314 break;
3315
3316 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3317 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3318 break;
3319
3320 rfc.mode = L2CAP_MODE_BASIC;
3321 rfc.txwin_size = 0;
3322 rfc.max_transmit = 0;
3323 rfc.retrans_timeout = 0;
3324 rfc.monitor_timeout = 0;
3325 rfc.max_pdu_size = 0;
3326
3327 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3328 (unsigned long) &rfc, endptr - ptr);
3329 break;
3330
3331 case L2CAP_MODE_ERTM:
3332 rfc.mode = L2CAP_MODE_ERTM;
3333 rfc.max_transmit = chan->max_tx;
3334
3335 __l2cap_set_ertm_timeouts(chan, &rfc);
3336
3337 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3338 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3339 L2CAP_FCS_SIZE);
3340 rfc.max_pdu_size = cpu_to_le16(size);
3341
3342 l2cap_txwin_setup(chan);
3343
3344 rfc.txwin_size = min_t(u16, chan->tx_win,
3345 L2CAP_DEFAULT_TX_WINDOW);
3346
3347 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3348 (unsigned long) &rfc, endptr - ptr);
3349
3350 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3351 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3352
3353 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3354 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3355 chan->tx_win, endptr - ptr);
3356
3357 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3358 if (chan->fcs == L2CAP_FCS_NONE ||
3359 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3360 chan->fcs = L2CAP_FCS_NONE;
3361 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3362 chan->fcs, endptr - ptr);
3363 }
3364 break;
3365
3366 case L2CAP_MODE_STREAMING:
3367 l2cap_txwin_setup(chan);
3368 rfc.mode = L2CAP_MODE_STREAMING;
3369 rfc.txwin_size = 0;
3370 rfc.max_transmit = 0;
3371 rfc.retrans_timeout = 0;
3372 rfc.monitor_timeout = 0;
3373
3374 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3375 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3376 L2CAP_FCS_SIZE);
3377 rfc.max_pdu_size = cpu_to_le16(size);
3378
3379 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3380 (unsigned long) &rfc, endptr - ptr);
3381
3382 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3383 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3384
3385 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3386 if (chan->fcs == L2CAP_FCS_NONE ||
3387 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3388 chan->fcs = L2CAP_FCS_NONE;
3389 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3390 chan->fcs, endptr - ptr);
3391 }
3392 break;
3393 }
3394
3395 req->dcid = cpu_to_le16(chan->dcid);
3396 req->flags = cpu_to_le16(0);
3397
3398 return ptr - data;
3399}
3400
3401static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3402{
3403 struct l2cap_conf_rsp *rsp = data;
3404 void *ptr = rsp->data;
3405 void *endptr = data + data_size;
3406 void *req = chan->conf_req;
3407 int len = chan->conf_len;
3408 int type, hint, olen;
3409 unsigned long val;
3410 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3411 struct l2cap_conf_efs efs;
3412 u8 remote_efs = 0;
3413 u16 mtu = L2CAP_DEFAULT_MTU;
3414 u16 result = L2CAP_CONF_SUCCESS;
3415 u16 size;
3416
3417 BT_DBG("chan %p", chan);
3418
3419 while (len >= L2CAP_CONF_OPT_SIZE) {
3420 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3421 if (len < 0)
3422 break;
3423
3424 hint = type & L2CAP_CONF_HINT;
3425 type &= L2CAP_CONF_MASK;
3426
3427 switch (type) {
3428 case L2CAP_CONF_MTU:
3429 if (olen != 2)
3430 break;
3431 mtu = val;
3432 break;
3433
3434 case L2CAP_CONF_FLUSH_TO:
3435 if (olen != 2)
3436 break;
3437 chan->flush_to = val;
3438 break;
3439
3440 case L2CAP_CONF_QOS:
3441 break;
3442
3443 case L2CAP_CONF_RFC:
3444 if (olen != sizeof(rfc))
3445 break;
3446 memcpy(&rfc, (void *) val, olen);
3447 break;
3448
3449 case L2CAP_CONF_FCS:
3450 if (olen != 1)
3451 break;
3452 if (val == L2CAP_FCS_NONE)
3453 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3454 break;
3455
3456 case L2CAP_CONF_EFS:
3457 if (olen != sizeof(efs))
3458 break;
3459 remote_efs = 1;
3460 memcpy(&efs, (void *) val, olen);
3461 break;
3462
3463 case L2CAP_CONF_EWS:
3464 if (olen != 2)
3465 break;
3466 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3467 return -ECONNREFUSED;
3468 set_bit(FLAG_EXT_CTRL, &chan->flags);
3469 set_bit(CONF_EWS_RECV, &chan->conf_state);
3470 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3471 chan->remote_tx_win = val;
3472 break;
3473
3474 default:
3475 if (hint)
3476 break;
3477 result = L2CAP_CONF_UNKNOWN;
3478 *((u8 *) ptr++) = type;
3479 break;
3480 }
3481 }
3482
3483 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3484 goto done;
3485
3486 switch (chan->mode) {
3487 case L2CAP_MODE_STREAMING:
3488 case L2CAP_MODE_ERTM:
3489 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3490 chan->mode = l2cap_select_mode(rfc.mode,
3491 chan->conn->feat_mask);
3492 break;
3493 }
3494
3495 if (remote_efs) {
3496 if (__l2cap_efs_supported(chan->conn))
3497 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3498 else
3499 return -ECONNREFUSED;
3500 }
3501
3502 if (chan->mode != rfc.mode)
3503 return -ECONNREFUSED;
3504
3505 break;
3506 }
3507
3508done:
3509 if (chan->mode != rfc.mode) {
3510 result = L2CAP_CONF_UNACCEPT;
3511 rfc.mode = chan->mode;
3512
3513 if (chan->num_conf_rsp == 1)
3514 return -ECONNREFUSED;
3515
3516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3517 (unsigned long) &rfc, endptr - ptr);
3518 }
3519
3520 if (result == L2CAP_CONF_SUCCESS) {
3521 /* Configure output options and let the other side know
3522 * which ones we don't like. */
3523
3524 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3525 result = L2CAP_CONF_UNACCEPT;
3526 else {
3527 chan->omtu = mtu;
3528 set_bit(CONF_MTU_DONE, &chan->conf_state);
3529 }
3530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3531
3532 if (remote_efs) {
3533 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3534 efs.stype != L2CAP_SERV_NOTRAFIC &&
3535 efs.stype != chan->local_stype) {
3536
3537 result = L2CAP_CONF_UNACCEPT;
3538
3539 if (chan->num_conf_req >= 1)
3540 return -ECONNREFUSED;
3541
3542 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3543 sizeof(efs),
3544 (unsigned long) &efs, endptr - ptr);
3545 } else {
3546 /* Send PENDING Conf Rsp */
3547 result = L2CAP_CONF_PENDING;
3548 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3549 }
3550 }
3551
3552 switch (rfc.mode) {
3553 case L2CAP_MODE_BASIC:
3554 chan->fcs = L2CAP_FCS_NONE;
3555 set_bit(CONF_MODE_DONE, &chan->conf_state);
3556 break;
3557
3558 case L2CAP_MODE_ERTM:
3559 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3560 chan->remote_tx_win = rfc.txwin_size;
3561 else
3562 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3563
3564 chan->remote_max_tx = rfc.max_transmit;
3565
3566 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3567 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3568 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3569 rfc.max_pdu_size = cpu_to_le16(size);
3570 chan->remote_mps = size;
3571
3572 __l2cap_set_ertm_timeouts(chan, &rfc);
3573
3574 set_bit(CONF_MODE_DONE, &chan->conf_state);
3575
3576 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3577 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3578
3579 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3580 chan->remote_id = efs.id;
3581 chan->remote_stype = efs.stype;
3582 chan->remote_msdu = le16_to_cpu(efs.msdu);
3583 chan->remote_flush_to =
3584 le32_to_cpu(efs.flush_to);
3585 chan->remote_acc_lat =
3586 le32_to_cpu(efs.acc_lat);
3587 chan->remote_sdu_itime =
3588 le32_to_cpu(efs.sdu_itime);
3589 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3590 sizeof(efs),
3591 (unsigned long) &efs, endptr - ptr);
3592 }
3593 break;
3594
3595 case L2CAP_MODE_STREAMING:
3596 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3597 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3598 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3599 rfc.max_pdu_size = cpu_to_le16(size);
3600 chan->remote_mps = size;
3601
3602 set_bit(CONF_MODE_DONE, &chan->conf_state);
3603
3604 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3605 (unsigned long) &rfc, endptr - ptr);
3606
3607 break;
3608
3609 default:
3610 result = L2CAP_CONF_UNACCEPT;
3611
3612 memset(&rfc, 0, sizeof(rfc));
3613 rfc.mode = chan->mode;
3614 }
3615
3616 if (result == L2CAP_CONF_SUCCESS)
3617 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3618 }
3619 rsp->scid = cpu_to_le16(chan->dcid);
3620 rsp->result = cpu_to_le16(result);
3621 rsp->flags = cpu_to_le16(0);
3622
3623 return ptr - data;
3624}
3625
3626static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3627 void *data, size_t size, u16 *result)
3628{
3629 struct l2cap_conf_req *req = data;
3630 void *ptr = req->data;
3631 void *endptr = data + size;
3632 int type, olen;
3633 unsigned long val;
3634 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3635 struct l2cap_conf_efs efs;
3636
3637 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3638
3639 while (len >= L2CAP_CONF_OPT_SIZE) {
3640 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3641 if (len < 0)
3642 break;
3643
3644 switch (type) {
3645 case L2CAP_CONF_MTU:
3646 if (olen != 2)
3647 break;
3648 if (val < L2CAP_DEFAULT_MIN_MTU) {
3649 *result = L2CAP_CONF_UNACCEPT;
3650 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3651 } else
3652 chan->imtu = val;
3653 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3654 endptr - ptr);
3655 break;
3656
3657 case L2CAP_CONF_FLUSH_TO:
3658 if (olen != 2)
3659 break;
3660 chan->flush_to = val;
3661 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3662 chan->flush_to, endptr - ptr);
3663 break;
3664
3665 case L2CAP_CONF_RFC:
3666 if (olen != sizeof(rfc))
3667 break;
3668 memcpy(&rfc, (void *)val, olen);
3669 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3670 rfc.mode != chan->mode)
3671 return -ECONNREFUSED;
3672 chan->fcs = 0;
3673 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3674 (unsigned long) &rfc, endptr - ptr);
3675 break;
3676
3677 case L2CAP_CONF_EWS:
3678 if (olen != 2)
3679 break;
3680 chan->ack_win = min_t(u16, val, chan->ack_win);
3681 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3682 chan->tx_win, endptr - ptr);
3683 break;
3684
3685 case L2CAP_CONF_EFS:
3686 if (olen != sizeof(efs))
3687 break;
3688 memcpy(&efs, (void *)val, olen);
3689 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3690 efs.stype != L2CAP_SERV_NOTRAFIC &&
3691 efs.stype != chan->local_stype)
3692 return -ECONNREFUSED;
3693 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3694 (unsigned long) &efs, endptr - ptr);
3695 break;
3696
3697 case L2CAP_CONF_FCS:
3698 if (olen != 1)
3699 break;
3700 if (*result == L2CAP_CONF_PENDING)
3701 if (val == L2CAP_FCS_NONE)
3702 set_bit(CONF_RECV_NO_FCS,
3703 &chan->conf_state);
3704 break;
3705 }
3706 }
3707
3708 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3709 return -ECONNREFUSED;
3710
3711 chan->mode = rfc.mode;
3712
3713 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3714 switch (rfc.mode) {
3715 case L2CAP_MODE_ERTM:
3716 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3717 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3718 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3719 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3720 chan->ack_win = min_t(u16, chan->ack_win,
3721 rfc.txwin_size);
3722
3723 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3724 chan->local_msdu = le16_to_cpu(efs.msdu);
3725 chan->local_sdu_itime =
3726 le32_to_cpu(efs.sdu_itime);
3727 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3728 chan->local_flush_to =
3729 le32_to_cpu(efs.flush_to);
3730 }
3731 break;
3732
3733 case L2CAP_MODE_STREAMING:
3734 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3735 }
3736 }
3737
3738 req->dcid = cpu_to_le16(chan->dcid);
3739 req->flags = cpu_to_le16(0);
3740
3741 return ptr - data;
3742}
3743
3744static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3745 u16 result, u16 flags)
3746{
3747 struct l2cap_conf_rsp *rsp = data;
3748 void *ptr = rsp->data;
3749
3750 BT_DBG("chan %p", chan);
3751
3752 rsp->scid = cpu_to_le16(chan->dcid);
3753 rsp->result = cpu_to_le16(result);
3754 rsp->flags = cpu_to_le16(flags);
3755
3756 return ptr - data;
3757}
3758
3759void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3760{
3761 struct l2cap_le_conn_rsp rsp;
3762 struct l2cap_conn *conn = chan->conn;
3763
3764 BT_DBG("chan %p", chan);
3765
3766 rsp.dcid = cpu_to_le16(chan->scid);
3767 rsp.mtu = cpu_to_le16(chan->imtu);
3768 rsp.mps = cpu_to_le16(chan->mps);
3769 rsp.credits = cpu_to_le16(chan->rx_credits);
3770 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3771
3772 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3773 &rsp);
3774}
3775
3776void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3777{
3778 struct l2cap_conn_rsp rsp;
3779 struct l2cap_conn *conn = chan->conn;
3780 u8 buf[128];
3781 u8 rsp_code;
3782
3783 rsp.scid = cpu_to_le16(chan->dcid);
3784 rsp.dcid = cpu_to_le16(chan->scid);
3785 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3786 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3787
3788 if (chan->hs_hcon)
3789 rsp_code = L2CAP_CREATE_CHAN_RSP;
3790 else
3791 rsp_code = L2CAP_CONN_RSP;
3792
3793 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3794
3795 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3796
3797 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3798 return;
3799
3800 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3801 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3802 chan->num_conf_req++;
3803}
3804
3805static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3806{
3807 int type, olen;
3808 unsigned long val;
3809 /* Use sane default values in case a misbehaving remote device
3810 * did not send an RFC or extended window size option.
3811 */
3812 u16 txwin_ext = chan->ack_win;
3813 struct l2cap_conf_rfc rfc = {
3814 .mode = chan->mode,
3815 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3816 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3817 .max_pdu_size = cpu_to_le16(chan->imtu),
3818 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3819 };
3820
3821 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3822
3823 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3824 return;
3825
3826 while (len >= L2CAP_CONF_OPT_SIZE) {
3827 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3828 if (len < 0)
3829 break;
3830
3831 switch (type) {
3832 case L2CAP_CONF_RFC:
3833 if (olen != sizeof(rfc))
3834 break;
3835 memcpy(&rfc, (void *)val, olen);
3836 break;
3837 case L2CAP_CONF_EWS:
3838 if (olen != 2)
3839 break;
3840 txwin_ext = val;
3841 break;
3842 }
3843 }
3844
3845 switch (rfc.mode) {
3846 case L2CAP_MODE_ERTM:
3847 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3848 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3849 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3850 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3851 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3852 else
3853 chan->ack_win = min_t(u16, chan->ack_win,
3854 rfc.txwin_size);
3855 break;
3856 case L2CAP_MODE_STREAMING:
3857 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3858 }
3859}
3860
3861static inline int l2cap_command_rej(struct l2cap_conn *conn,
3862 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3863 u8 *data)
3864{
3865 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3866
3867 if (cmd_len < sizeof(*rej))
3868 return -EPROTO;
3869
3870 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3871 return 0;
3872
3873 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3874 cmd->ident == conn->info_ident) {
3875 cancel_delayed_work(&conn->info_timer);
3876
3877 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3878 conn->info_ident = 0;
3879
3880 l2cap_conn_start(conn);
3881 }
3882
3883 return 0;
3884}
3885
3886static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3887 struct l2cap_cmd_hdr *cmd,
3888 u8 *data, u8 rsp_code, u8 amp_id)
3889{
3890 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3891 struct l2cap_conn_rsp rsp;
3892 struct l2cap_chan *chan = NULL, *pchan;
3893 int result, status = L2CAP_CS_NO_INFO;
3894
3895 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3896 __le16 psm = req->psm;
3897
3898 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3899
3900 /* Check if we have socket listening on psm */
3901 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3902 &conn->hcon->dst, ACL_LINK);
3903 if (!pchan) {
3904 result = L2CAP_CR_BAD_PSM;
3905 goto sendresp;
3906 }
3907
3908 mutex_lock(&conn->chan_lock);
3909 l2cap_chan_lock(pchan);
3910
3911 /* Check if the ACL is secure enough (if not SDP) */
3912 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3913 !hci_conn_check_link_mode(conn->hcon)) {
3914 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3915 result = L2CAP_CR_SEC_BLOCK;
3916 goto response;
3917 }
3918
3919 result = L2CAP_CR_NO_MEM;
3920
3921 /* Check for valid dynamic CID range (as per Erratum 3253) */
3922 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3923 result = L2CAP_CR_INVALID_SCID;
3924 goto response;
3925 }
3926
3927 /* Check if we already have channel with that dcid */
3928 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3929 result = L2CAP_CR_SCID_IN_USE;
3930 goto response;
3931 }
3932
3933 chan = pchan->ops->new_connection(pchan);
3934 if (!chan)
3935 goto response;
3936
3937 /* For certain devices (ex: HID mouse), support for authentication,
3938 * pairing and bonding is optional. For such devices, inorder to avoid
3939 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3940 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3941 */
3942 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3943
3944 bacpy(&chan->src, &conn->hcon->src);
3945 bacpy(&chan->dst, &conn->hcon->dst);
3946 chan->src_type = bdaddr_src_type(conn->hcon);
3947 chan->dst_type = bdaddr_dst_type(conn->hcon);
3948 chan->psm = psm;
3949 chan->dcid = scid;
3950 chan->local_amp_id = amp_id;
3951
3952 __l2cap_chan_add(conn, chan);
3953
3954 dcid = chan->scid;
3955
3956 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3957
3958 chan->ident = cmd->ident;
3959
3960 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3961 if (l2cap_chan_check_security(chan, false)) {
3962 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3963 l2cap_state_change(chan, BT_CONNECT2);
3964 result = L2CAP_CR_PEND;
3965 status = L2CAP_CS_AUTHOR_PEND;
3966 chan->ops->defer(chan);
3967 } else {
3968 /* Force pending result for AMP controllers.
3969 * The connection will succeed after the
3970 * physical link is up.
3971 */
3972 if (amp_id == AMP_ID_BREDR) {
3973 l2cap_state_change(chan, BT_CONFIG);
3974 result = L2CAP_CR_SUCCESS;
3975 } else {
3976 l2cap_state_change(chan, BT_CONNECT2);
3977 result = L2CAP_CR_PEND;
3978 }
3979 status = L2CAP_CS_NO_INFO;
3980 }
3981 } else {
3982 l2cap_state_change(chan, BT_CONNECT2);
3983 result = L2CAP_CR_PEND;
3984 status = L2CAP_CS_AUTHEN_PEND;
3985 }
3986 } else {
3987 l2cap_state_change(chan, BT_CONNECT2);
3988 result = L2CAP_CR_PEND;
3989 status = L2CAP_CS_NO_INFO;
3990 }
3991
3992response:
3993 l2cap_chan_unlock(pchan);
3994 mutex_unlock(&conn->chan_lock);
3995 l2cap_chan_put(pchan);
3996
3997sendresp:
3998 rsp.scid = cpu_to_le16(scid);
3999 rsp.dcid = cpu_to_le16(dcid);
4000 rsp.result = cpu_to_le16(result);
4001 rsp.status = cpu_to_le16(status);
4002 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4003
4004 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4005 struct l2cap_info_req info;
4006 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4007
4008 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4009 conn->info_ident = l2cap_get_ident(conn);
4010
4011 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4012
4013 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4014 sizeof(info), &info);
4015 }
4016
4017 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4018 result == L2CAP_CR_SUCCESS) {
4019 u8 buf[128];
4020 set_bit(CONF_REQ_SENT, &chan->conf_state);
4021 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4022 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4023 chan->num_conf_req++;
4024 }
4025
4026 return chan;
4027}
4028
4029static int l2cap_connect_req(struct l2cap_conn *conn,
4030 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4031{
4032 struct hci_dev *hdev = conn->hcon->hdev;
4033 struct hci_conn *hcon = conn->hcon;
4034
4035 if (cmd_len < sizeof(struct l2cap_conn_req))
4036 return -EPROTO;
4037
4038 hci_dev_lock(hdev);
4039 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4040 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4041 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
4042 hci_dev_unlock(hdev);
4043
4044 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4045 return 0;
4046}
4047
4048static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4049 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4050 u8 *data)
4051{
4052 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4053 u16 scid, dcid, result, status;
4054 struct l2cap_chan *chan;
4055 u8 req[128];
4056 int err;
4057
4058 if (cmd_len < sizeof(*rsp))
4059 return -EPROTO;
4060
4061 scid = __le16_to_cpu(rsp->scid);
4062 dcid = __le16_to_cpu(rsp->dcid);
4063 result = __le16_to_cpu(rsp->result);
4064 status = __le16_to_cpu(rsp->status);
4065
4066 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4067 dcid, scid, result, status);
4068
4069 mutex_lock(&conn->chan_lock);
4070
4071 if (scid) {
4072 chan = __l2cap_get_chan_by_scid(conn, scid);
4073 if (!chan) {
4074 err = -EBADSLT;
4075 goto unlock;
4076 }
4077 } else {
4078 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4079 if (!chan) {
4080 err = -EBADSLT;
4081 goto unlock;
4082 }
4083 }
4084
4085 err = 0;
4086
4087 l2cap_chan_lock(chan);
4088
4089 switch (result) {
4090 case L2CAP_CR_SUCCESS:
4091 l2cap_state_change(chan, BT_CONFIG);
4092 chan->ident = 0;
4093 chan->dcid = dcid;
4094 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4095
4096 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4097 break;
4098
4099 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4100 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4101 chan->num_conf_req++;
4102 break;
4103
4104 case L2CAP_CR_PEND:
4105 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4106 break;
4107
4108 default:
4109 l2cap_chan_del(chan, ECONNREFUSED);
4110 break;
4111 }
4112
4113 l2cap_chan_unlock(chan);
4114
4115unlock:
4116 mutex_unlock(&conn->chan_lock);
4117
4118 return err;
4119}
4120
4121static inline void set_default_fcs(struct l2cap_chan *chan)
4122{
4123 /* FCS is enabled only in ERTM or streaming mode, if one or both
4124 * sides request it.
4125 */
4126 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4127 chan->fcs = L2CAP_FCS_NONE;
4128 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4129 chan->fcs = L2CAP_FCS_CRC16;
4130}
4131
4132static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4133 u8 ident, u16 flags)
4134{
4135 struct l2cap_conn *conn = chan->conn;
4136
4137 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4138 flags);
4139
4140 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4141 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4142
4143 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4144 l2cap_build_conf_rsp(chan, data,
4145 L2CAP_CONF_SUCCESS, flags), data);
4146}
4147
4148static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4149 u16 scid, u16 dcid)
4150{
4151 struct l2cap_cmd_rej_cid rej;
4152
4153 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4154 rej.scid = __cpu_to_le16(scid);
4155 rej.dcid = __cpu_to_le16(dcid);
4156
4157 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4158}
4159
4160static inline int l2cap_config_req(struct l2cap_conn *conn,
4161 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4162 u8 *data)
4163{
4164 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4165 u16 dcid, flags;
4166 u8 rsp[64];
4167 struct l2cap_chan *chan;
4168 int len, err = 0;
4169
4170 if (cmd_len < sizeof(*req))
4171 return -EPROTO;
4172
4173 dcid = __le16_to_cpu(req->dcid);
4174 flags = __le16_to_cpu(req->flags);
4175
4176 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4177
4178 chan = l2cap_get_chan_by_scid(conn, dcid);
4179 if (!chan) {
4180 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4181 return 0;
4182 }
4183
4184 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4185 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4186 chan->dcid);
4187 goto unlock;
4188 }
4189
4190 /* Reject if config buffer is too small. */
4191 len = cmd_len - sizeof(*req);
4192 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4193 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4194 l2cap_build_conf_rsp(chan, rsp,
4195 L2CAP_CONF_REJECT, flags), rsp);
4196 goto unlock;
4197 }
4198
4199 /* Store config. */
4200 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4201 chan->conf_len += len;
4202
4203 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4204 /* Incomplete config. Send empty response. */
4205 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4206 l2cap_build_conf_rsp(chan, rsp,
4207 L2CAP_CONF_SUCCESS, flags), rsp);
4208 goto unlock;
4209 }
4210
4211 /* Complete config. */
4212 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4213 if (len < 0) {
4214 l2cap_send_disconn_req(chan, ECONNRESET);
4215 goto unlock;
4216 }
4217
4218 chan->ident = cmd->ident;
4219 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4220 chan->num_conf_rsp++;
4221
4222 /* Reset config buffer. */
4223 chan->conf_len = 0;
4224
4225 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4226 goto unlock;
4227
4228 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4229 set_default_fcs(chan);
4230
4231 if (chan->mode == L2CAP_MODE_ERTM ||
4232 chan->mode == L2CAP_MODE_STREAMING)
4233 err = l2cap_ertm_init(chan);
4234
4235 if (err < 0)
4236 l2cap_send_disconn_req(chan, -err);
4237 else
4238 l2cap_chan_ready(chan);
4239
4240 goto unlock;
4241 }
4242
4243 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4244 u8 buf[64];
4245 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4246 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4247 chan->num_conf_req++;
4248 }
4249
4250 /* Got Conf Rsp PENDING from remote side and assume we sent
4251 Conf Rsp PENDING in the code above */
4252 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4253 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4254
4255 /* check compatibility */
4256
4257 /* Send rsp for BR/EDR channel */
4258 if (!chan->hs_hcon)
4259 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4260 else
4261 chan->ident = cmd->ident;
4262 }
4263
4264unlock:
4265 l2cap_chan_unlock(chan);
4266 return err;
4267}
4268
4269static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4270 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4271 u8 *data)
4272{
4273 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4274 u16 scid, flags, result;
4275 struct l2cap_chan *chan;
4276 int len = cmd_len - sizeof(*rsp);
4277 int err = 0;
4278
4279 if (cmd_len < sizeof(*rsp))
4280 return -EPROTO;
4281
4282 scid = __le16_to_cpu(rsp->scid);
4283 flags = __le16_to_cpu(rsp->flags);
4284 result = __le16_to_cpu(rsp->result);
4285
4286 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4287 result, len);
4288
4289 chan = l2cap_get_chan_by_scid(conn, scid);
4290 if (!chan)
4291 return 0;
4292
4293 switch (result) {
4294 case L2CAP_CONF_SUCCESS:
4295 l2cap_conf_rfc_get(chan, rsp->data, len);
4296 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4297 break;
4298
4299 case L2CAP_CONF_PENDING:
4300 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4301
4302 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4303 char buf[64];
4304
4305 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4306 buf, sizeof(buf), &result);
4307 if (len < 0) {
4308 l2cap_send_disconn_req(chan, ECONNRESET);
4309 goto done;
4310 }
4311
4312 if (!chan->hs_hcon) {
4313 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4314 0);
4315 } else {
4316 if (l2cap_check_efs(chan)) {
4317 amp_create_logical_link(chan);
4318 chan->ident = cmd->ident;
4319 }
4320 }
4321 }
4322 goto done;
4323
4324 case L2CAP_CONF_UNACCEPT:
4325 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4326 char req[64];
4327
4328 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4329 l2cap_send_disconn_req(chan, ECONNRESET);
4330 goto done;
4331 }
4332
4333 /* throw out any old stored conf requests */
4334 result = L2CAP_CONF_SUCCESS;
4335 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4336 req, sizeof(req), &result);
4337 if (len < 0) {
4338 l2cap_send_disconn_req(chan, ECONNRESET);
4339 goto done;
4340 }
4341
4342 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4343 L2CAP_CONF_REQ, len, req);
4344 chan->num_conf_req++;
4345 if (result != L2CAP_CONF_SUCCESS)
4346 goto done;
4347 break;
4348 }
4349 /* fall through */
4350
4351 default:
4352 l2cap_chan_set_err(chan, ECONNRESET);
4353
4354 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4355 l2cap_send_disconn_req(chan, ECONNRESET);
4356 goto done;
4357 }
4358
4359 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4360 goto done;
4361
4362 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4363
4364 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4365 set_default_fcs(chan);
4366
4367 if (chan->mode == L2CAP_MODE_ERTM ||
4368 chan->mode == L2CAP_MODE_STREAMING)
4369 err = l2cap_ertm_init(chan);
4370
4371 if (err < 0)
4372 l2cap_send_disconn_req(chan, -err);
4373 else
4374 l2cap_chan_ready(chan);
4375 }
4376
4377done:
4378 l2cap_chan_unlock(chan);
4379 return err;
4380}
4381
4382static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4383 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4384 u8 *data)
4385{
4386 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4387 struct l2cap_disconn_rsp rsp;
4388 u16 dcid, scid;
4389 struct l2cap_chan *chan;
4390
4391 if (cmd_len != sizeof(*req))
4392 return -EPROTO;
4393
4394 scid = __le16_to_cpu(req->scid);
4395 dcid = __le16_to_cpu(req->dcid);
4396
4397 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4398
4399 mutex_lock(&conn->chan_lock);
4400
4401 chan = __l2cap_get_chan_by_scid(conn, dcid);
4402 if (!chan) {
4403 mutex_unlock(&conn->chan_lock);
4404 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4405 return 0;
4406 }
4407
4408 l2cap_chan_lock(chan);
4409
4410 rsp.dcid = cpu_to_le16(chan->scid);
4411 rsp.scid = cpu_to_le16(chan->dcid);
4412 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4413
4414 chan->ops->set_shutdown(chan);
4415
4416 l2cap_chan_hold(chan);
4417 l2cap_chan_del(chan, ECONNRESET);
4418
4419 l2cap_chan_unlock(chan);
4420
4421 chan->ops->close(chan);
4422 l2cap_chan_put(chan);
4423
4424 mutex_unlock(&conn->chan_lock);
4425
4426 return 0;
4427}
4428
4429static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4430 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4431 u8 *data)
4432{
4433 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4434 u16 dcid, scid;
4435 struct l2cap_chan *chan;
4436
4437 if (cmd_len != sizeof(*rsp))
4438 return -EPROTO;
4439
4440 scid = __le16_to_cpu(rsp->scid);
4441 dcid = __le16_to_cpu(rsp->dcid);
4442
4443 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4444
4445 mutex_lock(&conn->chan_lock);
4446
4447 chan = __l2cap_get_chan_by_scid(conn, scid);
4448 if (!chan) {
4449 mutex_unlock(&conn->chan_lock);
4450 return 0;
4451 }
4452
4453 l2cap_chan_lock(chan);
4454
4455 if (chan->state != BT_DISCONN) {
4456 l2cap_chan_unlock(chan);
4457 mutex_unlock(&conn->chan_lock);
4458 return 0;
4459 }
4460
4461 l2cap_chan_hold(chan);
4462 l2cap_chan_del(chan, 0);
4463
4464 l2cap_chan_unlock(chan);
4465
4466 chan->ops->close(chan);
4467 l2cap_chan_put(chan);
4468
4469 mutex_unlock(&conn->chan_lock);
4470
4471 return 0;
4472}
4473
4474static inline int l2cap_information_req(struct l2cap_conn *conn,
4475 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4476 u8 *data)
4477{
4478 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4479 u16 type;
4480
4481 if (cmd_len != sizeof(*req))
4482 return -EPROTO;
4483
4484 type = __le16_to_cpu(req->type);
4485
4486 BT_DBG("type 0x%4.4x", type);
4487
4488 if (type == L2CAP_IT_FEAT_MASK) {
4489 u8 buf[8];
4490 u32 feat_mask = l2cap_feat_mask;
4491 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4492 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4493 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4494 if (!disable_ertm)
4495 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4496 | L2CAP_FEAT_FCS;
4497 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4498 feat_mask |= L2CAP_FEAT_EXT_FLOW
4499 | L2CAP_FEAT_EXT_WINDOW;
4500
4501 put_unaligned_le32(feat_mask, rsp->data);
4502 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4503 buf);
4504 } else if (type == L2CAP_IT_FIXED_CHAN) {
4505 u8 buf[12];
4506 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4507
4508 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4509 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4510 rsp->data[0] = conn->local_fixed_chan;
4511 memset(rsp->data + 1, 0, 7);
4512 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4513 buf);
4514 } else {
4515 struct l2cap_info_rsp rsp;
4516 rsp.type = cpu_to_le16(type);
4517 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4518 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4519 &rsp);
4520 }
4521
4522 return 0;
4523}
4524
4525static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4526 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4527 u8 *data)
4528{
4529 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4530 u16 type, result;
4531
4532 if (cmd_len < sizeof(*rsp))
4533 return -EPROTO;
4534
4535 type = __le16_to_cpu(rsp->type);
4536 result = __le16_to_cpu(rsp->result);
4537
4538 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4539
4540 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4541 if (cmd->ident != conn->info_ident ||
4542 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4543 return 0;
4544
4545 cancel_delayed_work(&conn->info_timer);
4546
4547 if (result != L2CAP_IR_SUCCESS) {
4548 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4549 conn->info_ident = 0;
4550
4551 l2cap_conn_start(conn);
4552
4553 return 0;
4554 }
4555
4556 switch (type) {
4557 case L2CAP_IT_FEAT_MASK:
4558 conn->feat_mask = get_unaligned_le32(rsp->data);
4559
4560 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4561 struct l2cap_info_req req;
4562 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4563
4564 conn->info_ident = l2cap_get_ident(conn);
4565
4566 l2cap_send_cmd(conn, conn->info_ident,
4567 L2CAP_INFO_REQ, sizeof(req), &req);
4568 } else {
4569 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4570 conn->info_ident = 0;
4571
4572 l2cap_conn_start(conn);
4573 }
4574 break;
4575
4576 case L2CAP_IT_FIXED_CHAN:
4577 conn->remote_fixed_chan = rsp->data[0];
4578 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4579 conn->info_ident = 0;
4580
4581 l2cap_conn_start(conn);
4582 break;
4583 }
4584
4585 return 0;
4586}
4587
4588static int l2cap_create_channel_req(struct l2cap_conn *conn,
4589 struct l2cap_cmd_hdr *cmd,
4590 u16 cmd_len, void *data)
4591{
4592 struct l2cap_create_chan_req *req = data;
4593 struct l2cap_create_chan_rsp rsp;
4594 struct l2cap_chan *chan;
4595 struct hci_dev *hdev;
4596 u16 psm, scid;
4597
4598 if (cmd_len != sizeof(*req))
4599 return -EPROTO;
4600
4601 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4602 return -EINVAL;
4603
4604 psm = le16_to_cpu(req->psm);
4605 scid = le16_to_cpu(req->scid);
4606
4607 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4608
4609 /* For controller id 0 make BR/EDR connection */
4610 if (req->amp_id == AMP_ID_BREDR) {
4611 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4612 req->amp_id);
4613 return 0;
4614 }
4615
4616 /* Validate AMP controller id */
4617 hdev = hci_dev_get(req->amp_id);
4618 if (!hdev)
4619 goto error;
4620
4621 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4622 hci_dev_put(hdev);
4623 goto error;
4624 }
4625
4626 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4627 req->amp_id);
4628 if (chan) {
4629 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4630 struct hci_conn *hs_hcon;
4631
4632 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4633 &conn->hcon->dst);
4634 if (!hs_hcon) {
4635 hci_dev_put(hdev);
4636 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4637 chan->dcid);
4638 return 0;
4639 }
4640
4641 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4642
4643 mgr->bredr_chan = chan;
4644 chan->hs_hcon = hs_hcon;
4645 chan->fcs = L2CAP_FCS_NONE;
4646 conn->mtu = hdev->block_mtu;
4647 }
4648
4649 hci_dev_put(hdev);
4650
4651 return 0;
4652
4653error:
4654 rsp.dcid = 0;
4655 rsp.scid = cpu_to_le16(scid);
4656 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4657 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4658
4659 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4660 sizeof(rsp), &rsp);
4661
4662 return 0;
4663}
4664
4665static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4666{
4667 struct l2cap_move_chan_req req;
4668 u8 ident;
4669
4670 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4671
4672 ident = l2cap_get_ident(chan->conn);
4673 chan->ident = ident;
4674
4675 req.icid = cpu_to_le16(chan->scid);
4676 req.dest_amp_id = dest_amp_id;
4677
4678 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4679 &req);
4680
4681 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4682}
4683
4684static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4685{
4686 struct l2cap_move_chan_rsp rsp;
4687
4688 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4689
4690 rsp.icid = cpu_to_le16(chan->dcid);
4691 rsp.result = cpu_to_le16(result);
4692
4693 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4694 sizeof(rsp), &rsp);
4695}
4696
4697static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4698{
4699 struct l2cap_move_chan_cfm cfm;
4700
4701 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4702
4703 chan->ident = l2cap_get_ident(chan->conn);
4704
4705 cfm.icid = cpu_to_le16(chan->scid);
4706 cfm.result = cpu_to_le16(result);
4707
4708 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4709 sizeof(cfm), &cfm);
4710
4711 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4712}
4713
4714static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4715{
4716 struct l2cap_move_chan_cfm cfm;
4717
4718 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4719
4720 cfm.icid = cpu_to_le16(icid);
4721 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4722
4723 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4724 sizeof(cfm), &cfm);
4725}
4726
4727static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4728 u16 icid)
4729{
4730 struct l2cap_move_chan_cfm_rsp rsp;
4731
4732 BT_DBG("icid 0x%4.4x", icid);
4733
4734 rsp.icid = cpu_to_le16(icid);
4735 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4736}
4737
4738static void __release_logical_link(struct l2cap_chan *chan)
4739{
4740 chan->hs_hchan = NULL;
4741 chan->hs_hcon = NULL;
4742
4743 /* Placeholder - release the logical link */
4744}
4745
4746static void l2cap_logical_fail(struct l2cap_chan *chan)
4747{
4748 /* Logical link setup failed */
4749 if (chan->state != BT_CONNECTED) {
4750 /* Create channel failure, disconnect */
4751 l2cap_send_disconn_req(chan, ECONNRESET);
4752 return;
4753 }
4754
4755 switch (chan->move_role) {
4756 case L2CAP_MOVE_ROLE_RESPONDER:
4757 l2cap_move_done(chan);
4758 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4759 break;
4760 case L2CAP_MOVE_ROLE_INITIATOR:
4761 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4762 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4763 /* Remote has only sent pending or
4764 * success responses, clean up
4765 */
4766 l2cap_move_done(chan);
4767 }
4768
4769 /* Other amp move states imply that the move
4770 * has already aborted
4771 */
4772 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4773 break;
4774 }
4775}
4776
4777static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4778 struct hci_chan *hchan)
4779{
4780 struct l2cap_conf_rsp rsp;
4781
4782 chan->hs_hchan = hchan;
4783 chan->hs_hcon->l2cap_data = chan->conn;
4784
4785 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4786
4787 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4788 int err;
4789
4790 set_default_fcs(chan);
4791
4792 err = l2cap_ertm_init(chan);
4793 if (err < 0)
4794 l2cap_send_disconn_req(chan, -err);
4795 else
4796 l2cap_chan_ready(chan);
4797 }
4798}
4799
4800static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4801 struct hci_chan *hchan)
4802{
4803 chan->hs_hcon = hchan->conn;
4804 chan->hs_hcon->l2cap_data = chan->conn;
4805
4806 BT_DBG("move_state %d", chan->move_state);
4807
4808 switch (chan->move_state) {
4809 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4810 /* Move confirm will be sent after a success
4811 * response is received
4812 */
4813 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4814 break;
4815 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4816 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4817 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4818 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4819 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4820 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4821 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4822 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4823 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4824 }
4825 break;
4826 default:
4827 /* Move was not in expected state, free the channel */
4828 __release_logical_link(chan);
4829
4830 chan->move_state = L2CAP_MOVE_STABLE;
4831 }
4832}
4833
4834/* Call with chan locked */
4835void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4836 u8 status)
4837{
4838 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4839
4840 if (status) {
4841 l2cap_logical_fail(chan);
4842 __release_logical_link(chan);
4843 return;
4844 }
4845
4846 if (chan->state != BT_CONNECTED) {
4847 /* Ignore logical link if channel is on BR/EDR */
4848 if (chan->local_amp_id != AMP_ID_BREDR)
4849 l2cap_logical_finish_create(chan, hchan);
4850 } else {
4851 l2cap_logical_finish_move(chan, hchan);
4852 }
4853}
4854
4855void l2cap_move_start(struct l2cap_chan *chan)
4856{
4857 BT_DBG("chan %p", chan);
4858
4859 if (chan->local_amp_id == AMP_ID_BREDR) {
4860 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4861 return;
4862 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4863 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4864 /* Placeholder - start physical link setup */
4865 } else {
4866 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4867 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4868 chan->move_id = 0;
4869 l2cap_move_setup(chan);
4870 l2cap_send_move_chan_req(chan, 0);
4871 }
4872}
4873
4874static void l2cap_do_create(struct l2cap_chan *chan, int result,
4875 u8 local_amp_id, u8 remote_amp_id)
4876{
4877 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4878 local_amp_id, remote_amp_id);
4879
4880 chan->fcs = L2CAP_FCS_NONE;
4881
4882 /* Outgoing channel on AMP */
4883 if (chan->state == BT_CONNECT) {
4884 if (result == L2CAP_CR_SUCCESS) {
4885 chan->local_amp_id = local_amp_id;
4886 l2cap_send_create_chan_req(chan, remote_amp_id);
4887 } else {
4888 /* Revert to BR/EDR connect */
4889 l2cap_send_conn_req(chan);
4890 }
4891
4892 return;
4893 }
4894
4895 /* Incoming channel on AMP */
4896 if (__l2cap_no_conn_pending(chan)) {
4897 struct l2cap_conn_rsp rsp;
4898 char buf[128];
4899 rsp.scid = cpu_to_le16(chan->dcid);
4900 rsp.dcid = cpu_to_le16(chan->scid);
4901
4902 if (result == L2CAP_CR_SUCCESS) {
4903 /* Send successful response */
4904 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4905 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4906 } else {
4907 /* Send negative response */
4908 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4909 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4910 }
4911
4912 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4913 sizeof(rsp), &rsp);
4914
4915 if (result == L2CAP_CR_SUCCESS) {
4916 l2cap_state_change(chan, BT_CONFIG);
4917 set_bit(CONF_REQ_SENT, &chan->conf_state);
4918 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4919 L2CAP_CONF_REQ,
4920 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4921 chan->num_conf_req++;
4922 }
4923 }
4924}
4925
4926static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4927 u8 remote_amp_id)
4928{
4929 l2cap_move_setup(chan);
4930 chan->move_id = local_amp_id;
4931 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4932
4933 l2cap_send_move_chan_req(chan, remote_amp_id);
4934}
4935
4936static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4937{
4938 struct hci_chan *hchan = NULL;
4939
4940 /* Placeholder - get hci_chan for logical link */
4941
4942 if (hchan) {
4943 if (hchan->state == BT_CONNECTED) {
4944 /* Logical link is ready to go */
4945 chan->hs_hcon = hchan->conn;
4946 chan->hs_hcon->l2cap_data = chan->conn;
4947 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4948 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4949
4950 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4951 } else {
4952 /* Wait for logical link to be ready */
4953 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4954 }
4955 } else {
4956 /* Logical link not available */
4957 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4958 }
4959}
4960
4961static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4962{
4963 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4964 u8 rsp_result;
4965 if (result == -EINVAL)
4966 rsp_result = L2CAP_MR_BAD_ID;
4967 else
4968 rsp_result = L2CAP_MR_NOT_ALLOWED;
4969
4970 l2cap_send_move_chan_rsp(chan, rsp_result);
4971 }
4972
4973 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4974 chan->move_state = L2CAP_MOVE_STABLE;
4975
4976 /* Restart data transmission */
4977 l2cap_ertm_send(chan);
4978}
4979
4980/* Invoke with locked chan */
4981void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4982{
4983 u8 local_amp_id = chan->local_amp_id;
4984 u8 remote_amp_id = chan->remote_amp_id;
4985
4986 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4987 chan, result, local_amp_id, remote_amp_id);
4988
4989 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
4990 return;
4991
4992 if (chan->state != BT_CONNECTED) {
4993 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4994 } else if (result != L2CAP_MR_SUCCESS) {
4995 l2cap_do_move_cancel(chan, result);
4996 } else {
4997 switch (chan->move_role) {
4998 case L2CAP_MOVE_ROLE_INITIATOR:
4999 l2cap_do_move_initiate(chan, local_amp_id,
5000 remote_amp_id);
5001 break;
5002 case L2CAP_MOVE_ROLE_RESPONDER:
5003 l2cap_do_move_respond(chan, result);
5004 break;
5005 default:
5006 l2cap_do_move_cancel(chan, result);
5007 break;
5008 }
5009 }
5010}
5011
5012static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5013 struct l2cap_cmd_hdr *cmd,
5014 u16 cmd_len, void *data)
5015{
5016 struct l2cap_move_chan_req *req = data;
5017 struct l2cap_move_chan_rsp rsp;
5018 struct l2cap_chan *chan;
5019 u16 icid = 0;
5020 u16 result = L2CAP_MR_NOT_ALLOWED;
5021
5022 if (cmd_len != sizeof(*req))
5023 return -EPROTO;
5024
5025 icid = le16_to_cpu(req->icid);
5026
5027 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5028
5029 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5030 return -EINVAL;
5031
5032 chan = l2cap_get_chan_by_dcid(conn, icid);
5033 if (!chan) {
5034 rsp.icid = cpu_to_le16(icid);
5035 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5036 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5037 sizeof(rsp), &rsp);
5038 return 0;
5039 }
5040
5041 chan->ident = cmd->ident;
5042
5043 if (chan->scid < L2CAP_CID_DYN_START ||
5044 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5045 (chan->mode != L2CAP_MODE_ERTM &&
5046 chan->mode != L2CAP_MODE_STREAMING)) {
5047 result = L2CAP_MR_NOT_ALLOWED;
5048 goto send_move_response;
5049 }
5050
5051 if (chan->local_amp_id == req->dest_amp_id) {
5052 result = L2CAP_MR_SAME_ID;
5053 goto send_move_response;
5054 }
5055
5056 if (req->dest_amp_id != AMP_ID_BREDR) {
5057 struct hci_dev *hdev;
5058 hdev = hci_dev_get(req->dest_amp_id);
5059 if (!hdev || hdev->dev_type != HCI_AMP ||
5060 !test_bit(HCI_UP, &hdev->flags)) {
5061 if (hdev)
5062 hci_dev_put(hdev);
5063
5064 result = L2CAP_MR_BAD_ID;
5065 goto send_move_response;
5066 }
5067 hci_dev_put(hdev);
5068 }
5069
5070 /* Detect a move collision. Only send a collision response
5071 * if this side has "lost", otherwise proceed with the move.
5072 * The winner has the larger bd_addr.
5073 */
5074 if ((__chan_is_moving(chan) ||
5075 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5076 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5077 result = L2CAP_MR_COLLISION;
5078 goto send_move_response;
5079 }
5080
5081 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5082 l2cap_move_setup(chan);
5083 chan->move_id = req->dest_amp_id;
5084
5085 if (req->dest_amp_id == AMP_ID_BREDR) {
5086 /* Moving to BR/EDR */
5087 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5088 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5089 result = L2CAP_MR_PEND;
5090 } else {
5091 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5092 result = L2CAP_MR_SUCCESS;
5093 }
5094 } else {
5095 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5096 /* Placeholder - uncomment when amp functions are available */
5097 /*amp_accept_physical(chan, req->dest_amp_id);*/
5098 result = L2CAP_MR_PEND;
5099 }
5100
5101send_move_response:
5102 l2cap_send_move_chan_rsp(chan, result);
5103
5104 l2cap_chan_unlock(chan);
5105
5106 return 0;
5107}
5108
5109static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5110{
5111 struct l2cap_chan *chan;
5112 struct hci_chan *hchan = NULL;
5113
5114 chan = l2cap_get_chan_by_scid(conn, icid);
5115 if (!chan) {
5116 l2cap_send_move_chan_cfm_icid(conn, icid);
5117 return;
5118 }
5119
5120 __clear_chan_timer(chan);
5121 if (result == L2CAP_MR_PEND)
5122 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5123
5124 switch (chan->move_state) {
5125 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5126 /* Move confirm will be sent when logical link
5127 * is complete.
5128 */
5129 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5130 break;
5131 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5132 if (result == L2CAP_MR_PEND) {
5133 break;
5134 } else if (test_bit(CONN_LOCAL_BUSY,
5135 &chan->conn_state)) {
5136 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5137 } else {
5138 /* Logical link is up or moving to BR/EDR,
5139 * proceed with move
5140 */
5141 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5142 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5143 }
5144 break;
5145 case L2CAP_MOVE_WAIT_RSP:
5146 /* Moving to AMP */
5147 if (result == L2CAP_MR_SUCCESS) {
5148 /* Remote is ready, send confirm immediately
5149 * after logical link is ready
5150 */
5151 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5152 } else {
5153 /* Both logical link and move success
5154 * are required to confirm
5155 */
5156 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5157 }
5158
5159 /* Placeholder - get hci_chan for logical link */
5160 if (!hchan) {
5161 /* Logical link not available */
5162 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5163 break;
5164 }
5165
5166 /* If the logical link is not yet connected, do not
5167 * send confirmation.
5168 */
5169 if (hchan->state != BT_CONNECTED)
5170 break;
5171
5172 /* Logical link is already ready to go */
5173
5174 chan->hs_hcon = hchan->conn;
5175 chan->hs_hcon->l2cap_data = chan->conn;
5176
5177 if (result == L2CAP_MR_SUCCESS) {
5178 /* Can confirm now */
5179 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5180 } else {
5181 /* Now only need move success
5182 * to confirm
5183 */
5184 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5185 }
5186
5187 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5188 break;
5189 default:
5190 /* Any other amp move state means the move failed. */
5191 chan->move_id = chan->local_amp_id;
5192 l2cap_move_done(chan);
5193 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5194 }
5195
5196 l2cap_chan_unlock(chan);
5197}
5198
5199static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5200 u16 result)
5201{
5202 struct l2cap_chan *chan;
5203
5204 chan = l2cap_get_chan_by_ident(conn, ident);
5205 if (!chan) {
5206 /* Could not locate channel, icid is best guess */
5207 l2cap_send_move_chan_cfm_icid(conn, icid);
5208 return;
5209 }
5210
5211 __clear_chan_timer(chan);
5212
5213 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5214 if (result == L2CAP_MR_COLLISION) {
5215 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5216 } else {
5217 /* Cleanup - cancel move */
5218 chan->move_id = chan->local_amp_id;
5219 l2cap_move_done(chan);
5220 }
5221 }
5222
5223 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5224
5225 l2cap_chan_unlock(chan);
5226}
5227
5228static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5229 struct l2cap_cmd_hdr *cmd,
5230 u16 cmd_len, void *data)
5231{
5232 struct l2cap_move_chan_rsp *rsp = data;
5233 u16 icid, result;
5234
5235 if (cmd_len != sizeof(*rsp))
5236 return -EPROTO;
5237
5238 icid = le16_to_cpu(rsp->icid);
5239 result = le16_to_cpu(rsp->result);
5240
5241 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5242
5243 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5244 l2cap_move_continue(conn, icid, result);
5245 else
5246 l2cap_move_fail(conn, cmd->ident, icid, result);
5247
5248 return 0;
5249}
5250
5251static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5252 struct l2cap_cmd_hdr *cmd,
5253 u16 cmd_len, void *data)
5254{
5255 struct l2cap_move_chan_cfm *cfm = data;
5256 struct l2cap_chan *chan;
5257 u16 icid, result;
5258
5259 if (cmd_len != sizeof(*cfm))
5260 return -EPROTO;
5261
5262 icid = le16_to_cpu(cfm->icid);
5263 result = le16_to_cpu(cfm->result);
5264
5265 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5266
5267 chan = l2cap_get_chan_by_dcid(conn, icid);
5268 if (!chan) {
5269 /* Spec requires a response even if the icid was not found */
5270 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5271 return 0;
5272 }
5273
5274 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5275 if (result == L2CAP_MC_CONFIRMED) {
5276 chan->local_amp_id = chan->move_id;
5277 if (chan->local_amp_id == AMP_ID_BREDR)
5278 __release_logical_link(chan);
5279 } else {
5280 chan->move_id = chan->local_amp_id;
5281 }
5282
5283 l2cap_move_done(chan);
5284 }
5285
5286 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5287
5288 l2cap_chan_unlock(chan);
5289
5290 return 0;
5291}
5292
5293static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5294 struct l2cap_cmd_hdr *cmd,
5295 u16 cmd_len, void *data)
5296{
5297 struct l2cap_move_chan_cfm_rsp *rsp = data;
5298 struct l2cap_chan *chan;
5299 u16 icid;
5300
5301 if (cmd_len != sizeof(*rsp))
5302 return -EPROTO;
5303
5304 icid = le16_to_cpu(rsp->icid);
5305
5306 BT_DBG("icid 0x%4.4x", icid);
5307
5308 chan = l2cap_get_chan_by_scid(conn, icid);
5309 if (!chan)
5310 return 0;
5311
5312 __clear_chan_timer(chan);
5313
5314 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5315 chan->local_amp_id = chan->move_id;
5316
5317 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5318 __release_logical_link(chan);
5319
5320 l2cap_move_done(chan);
5321 }
5322
5323 l2cap_chan_unlock(chan);
5324
5325 return 0;
5326}
5327
5328static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5329 struct l2cap_cmd_hdr *cmd,
5330 u16 cmd_len, u8 *data)
5331{
5332 struct hci_conn *hcon = conn->hcon;
5333 struct l2cap_conn_param_update_req *req;
5334 struct l2cap_conn_param_update_rsp rsp;
5335 u16 min, max, latency, to_multiplier;
5336 int err;
5337
5338 if (hcon->role != HCI_ROLE_MASTER)
5339 return -EINVAL;
5340
5341 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5342 return -EPROTO;
5343
5344 req = (struct l2cap_conn_param_update_req *) data;
5345 min = __le16_to_cpu(req->min);
5346 max = __le16_to_cpu(req->max);
5347 latency = __le16_to_cpu(req->latency);
5348 to_multiplier = __le16_to_cpu(req->to_multiplier);
5349
5350 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5351 min, max, latency, to_multiplier);
5352
5353 memset(&rsp, 0, sizeof(rsp));
5354
5355 err = hci_check_conn_params(min, max, latency, to_multiplier);
5356 if (err)
5357 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5358 else
5359 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5360
5361 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5362 sizeof(rsp), &rsp);
5363
5364 if (!err) {
5365 u8 store_hint;
5366
5367 store_hint = hci_le_conn_update(hcon, min, max, latency,
5368 to_multiplier);
5369 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5370 store_hint, min, max, latency,
5371 to_multiplier);
5372
5373 }
5374
5375 return 0;
5376}
5377
5378static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5379 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5380 u8 *data)
5381{
5382 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5383 struct hci_conn *hcon = conn->hcon;
5384 u16 dcid, mtu, mps, credits, result;
5385 struct l2cap_chan *chan;
5386 int err, sec_level;
5387
5388 if (cmd_len < sizeof(*rsp))
5389 return -EPROTO;
5390
5391 dcid = __le16_to_cpu(rsp->dcid);
5392 mtu = __le16_to_cpu(rsp->mtu);
5393 mps = __le16_to_cpu(rsp->mps);
5394 credits = __le16_to_cpu(rsp->credits);
5395 result = __le16_to_cpu(rsp->result);
5396
5397 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5398 dcid < L2CAP_CID_DYN_START ||
5399 dcid > L2CAP_CID_LE_DYN_END))
5400 return -EPROTO;
5401
5402 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5403 dcid, mtu, mps, credits, result);
5404
5405 mutex_lock(&conn->chan_lock);
5406
5407 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5408 if (!chan) {
5409 err = -EBADSLT;
5410 goto unlock;
5411 }
5412
5413 err = 0;
5414
5415 l2cap_chan_lock(chan);
5416
5417 switch (result) {
5418 case L2CAP_CR_LE_SUCCESS:
5419 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5420 err = -EBADSLT;
5421 break;
5422 }
5423
5424 chan->ident = 0;
5425 chan->dcid = dcid;
5426 chan->omtu = mtu;
5427 chan->remote_mps = mps;
5428 chan->tx_credits = credits;
5429 l2cap_chan_ready(chan);
5430 break;
5431
5432 case L2CAP_CR_LE_AUTHENTICATION:
5433 case L2CAP_CR_LE_ENCRYPTION:
5434 /* If we already have MITM protection we can't do
5435 * anything.
5436 */
5437 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5438 l2cap_chan_del(chan, ECONNREFUSED);
5439 break;
5440 }
5441
5442 sec_level = hcon->sec_level + 1;
5443 if (chan->sec_level < sec_level)
5444 chan->sec_level = sec_level;
5445
5446 /* We'll need to send a new Connect Request */
5447 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5448
5449 smp_conn_security(hcon, chan->sec_level);
5450 break;
5451
5452 default:
5453 l2cap_chan_del(chan, ECONNREFUSED);
5454 break;
5455 }
5456
5457 l2cap_chan_unlock(chan);
5458
5459unlock:
5460 mutex_unlock(&conn->chan_lock);
5461
5462 return err;
5463}
5464
5465static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5466 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5467 u8 *data)
5468{
5469 int err = 0;
5470
5471 switch (cmd->code) {
5472 case L2CAP_COMMAND_REJ:
5473 l2cap_command_rej(conn, cmd, cmd_len, data);
5474 break;
5475
5476 case L2CAP_CONN_REQ:
5477 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5478 break;
5479
5480 case L2CAP_CONN_RSP:
5481 case L2CAP_CREATE_CHAN_RSP:
5482 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5483 break;
5484
5485 case L2CAP_CONF_REQ:
5486 err = l2cap_config_req(conn, cmd, cmd_len, data);
5487 break;
5488
5489 case L2CAP_CONF_RSP:
5490 l2cap_config_rsp(conn, cmd, cmd_len, data);
5491 break;
5492
5493 case L2CAP_DISCONN_REQ:
5494 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5495 break;
5496
5497 case L2CAP_DISCONN_RSP:
5498 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5499 break;
5500
5501 case L2CAP_ECHO_REQ:
5502 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5503 break;
5504
5505 case L2CAP_ECHO_RSP:
5506 break;
5507
5508 case L2CAP_INFO_REQ:
5509 err = l2cap_information_req(conn, cmd, cmd_len, data);
5510 break;
5511
5512 case L2CAP_INFO_RSP:
5513 l2cap_information_rsp(conn, cmd, cmd_len, data);
5514 break;
5515
5516 case L2CAP_CREATE_CHAN_REQ:
5517 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5518 break;
5519
5520 case L2CAP_MOVE_CHAN_REQ:
5521 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5522 break;
5523
5524 case L2CAP_MOVE_CHAN_RSP:
5525 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5526 break;
5527
5528 case L2CAP_MOVE_CHAN_CFM:
5529 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5530 break;
5531
5532 case L2CAP_MOVE_CHAN_CFM_RSP:
5533 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5534 break;
5535
5536 default:
5537 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5538 err = -EINVAL;
5539 break;
5540 }
5541
5542 return err;
5543}
5544
5545static int l2cap_le_connect_req(struct l2cap_conn *conn,
5546 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5547 u8 *data)
5548{
5549 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5550 struct l2cap_le_conn_rsp rsp;
5551 struct l2cap_chan *chan, *pchan;
5552 u16 dcid, scid, credits, mtu, mps;
5553 __le16 psm;
5554 u8 result;
5555
5556 if (cmd_len != sizeof(*req))
5557 return -EPROTO;
5558
5559 scid = __le16_to_cpu(req->scid);
5560 mtu = __le16_to_cpu(req->mtu);
5561 mps = __le16_to_cpu(req->mps);
5562 psm = req->psm;
5563 dcid = 0;
5564 credits = 0;
5565
5566 if (mtu < 23 || mps < 23)
5567 return -EPROTO;
5568
5569 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5570 scid, mtu, mps);
5571
5572 /* Check if we have socket listening on psm */
5573 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5574 &conn->hcon->dst, LE_LINK);
5575 if (!pchan) {
5576 result = L2CAP_CR_LE_BAD_PSM;
5577 chan = NULL;
5578 goto response;
5579 }
5580
5581 mutex_lock(&conn->chan_lock);
5582 l2cap_chan_lock(pchan);
5583
5584 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5585 SMP_ALLOW_STK)) {
5586 result = L2CAP_CR_LE_AUTHENTICATION;
5587 chan = NULL;
5588 goto response_unlock;
5589 }
5590
5591 /* Check for valid dynamic CID range */
5592 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5593 result = L2CAP_CR_LE_INVALID_SCID;
5594 chan = NULL;
5595 goto response_unlock;
5596 }
5597
5598 /* Check if we already have channel with that dcid */
5599 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5600 result = L2CAP_CR_LE_SCID_IN_USE;
5601 chan = NULL;
5602 goto response_unlock;
5603 }
5604
5605 chan = pchan->ops->new_connection(pchan);
5606 if (!chan) {
5607 result = L2CAP_CR_LE_NO_MEM;
5608 goto response_unlock;
5609 }
5610
5611 bacpy(&chan->src, &conn->hcon->src);
5612 bacpy(&chan->dst, &conn->hcon->dst);
5613 chan->src_type = bdaddr_src_type(conn->hcon);
5614 chan->dst_type = bdaddr_dst_type(conn->hcon);
5615 chan->psm = psm;
5616 chan->dcid = scid;
5617 chan->omtu = mtu;
5618 chan->remote_mps = mps;
5619
5620 __l2cap_chan_add(conn, chan);
5621
5622 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5623
5624 dcid = chan->scid;
5625 credits = chan->rx_credits;
5626
5627 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5628
5629 chan->ident = cmd->ident;
5630
5631 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5632 l2cap_state_change(chan, BT_CONNECT2);
5633 /* The following result value is actually not defined
5634 * for LE CoC but we use it to let the function know
5635 * that it should bail out after doing its cleanup
5636 * instead of sending a response.
5637 */
5638 result = L2CAP_CR_PEND;
5639 chan->ops->defer(chan);
5640 } else {
5641 l2cap_chan_ready(chan);
5642 result = L2CAP_CR_LE_SUCCESS;
5643 }
5644
5645response_unlock:
5646 l2cap_chan_unlock(pchan);
5647 mutex_unlock(&conn->chan_lock);
5648 l2cap_chan_put(pchan);
5649
5650 if (result == L2CAP_CR_PEND)
5651 return 0;
5652
5653response:
5654 if (chan) {
5655 rsp.mtu = cpu_to_le16(chan->imtu);
5656 rsp.mps = cpu_to_le16(chan->mps);
5657 } else {
5658 rsp.mtu = 0;
5659 rsp.mps = 0;
5660 }
5661
5662 rsp.dcid = cpu_to_le16(dcid);
5663 rsp.credits = cpu_to_le16(credits);
5664 rsp.result = cpu_to_le16(result);
5665
5666 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5667
5668 return 0;
5669}
5670
5671static inline int l2cap_le_credits(struct l2cap_conn *conn,
5672 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5673 u8 *data)
5674{
5675 struct l2cap_le_credits *pkt;
5676 struct l2cap_chan *chan;
5677 u16 cid, credits, max_credits;
5678
5679 if (cmd_len != sizeof(*pkt))
5680 return -EPROTO;
5681
5682 pkt = (struct l2cap_le_credits *) data;
5683 cid = __le16_to_cpu(pkt->cid);
5684 credits = __le16_to_cpu(pkt->credits);
5685
5686 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5687
5688 chan = l2cap_get_chan_by_dcid(conn, cid);
5689 if (!chan)
5690 return -EBADSLT;
5691
5692 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5693 if (credits > max_credits) {
5694 BT_ERR("LE credits overflow");
5695 l2cap_send_disconn_req(chan, ECONNRESET);
5696 l2cap_chan_unlock(chan);
5697
5698 /* Return 0 so that we don't trigger an unnecessary
5699 * command reject packet.
5700 */
5701 return 0;
5702 }
5703
5704 chan->tx_credits += credits;
5705
5706 /* Resume sending */
5707 l2cap_le_flowctl_send(chan);
5708
5709 if (chan->tx_credits)
5710 chan->ops->resume(chan);
5711
5712 l2cap_chan_unlock(chan);
5713
5714 return 0;
5715}
5716
5717static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5718 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5719 u8 *data)
5720{
5721 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5722 struct l2cap_chan *chan;
5723
5724 if (cmd_len < sizeof(*rej))
5725 return -EPROTO;
5726
5727 mutex_lock(&conn->chan_lock);
5728
5729 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5730 if (!chan)
5731 goto done;
5732
5733 l2cap_chan_lock(chan);
5734 l2cap_chan_del(chan, ECONNREFUSED);
5735 l2cap_chan_unlock(chan);
5736
5737done:
5738 mutex_unlock(&conn->chan_lock);
5739 return 0;
5740}
5741
5742static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5743 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5744 u8 *data)
5745{
5746 int err = 0;
5747
5748 switch (cmd->code) {
5749 case L2CAP_COMMAND_REJ:
5750 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5751 break;
5752
5753 case L2CAP_CONN_PARAM_UPDATE_REQ:
5754 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5755 break;
5756
5757 case L2CAP_CONN_PARAM_UPDATE_RSP:
5758 break;
5759
5760 case L2CAP_LE_CONN_RSP:
5761 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5762 break;
5763
5764 case L2CAP_LE_CONN_REQ:
5765 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5766 break;
5767
5768 case L2CAP_LE_CREDITS:
5769 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5770 break;
5771
5772 case L2CAP_DISCONN_REQ:
5773 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5774 break;
5775
5776 case L2CAP_DISCONN_RSP:
5777 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5778 break;
5779
5780 default:
5781 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5782 err = -EINVAL;
5783 break;
5784 }
5785
5786 return err;
5787}
5788
5789static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5790 struct sk_buff *skb)
5791{
5792 struct hci_conn *hcon = conn->hcon;
5793 struct l2cap_cmd_hdr *cmd;
5794 u16 len;
5795 int err;
5796
5797 if (hcon->type != LE_LINK)
5798 goto drop;
5799
5800 if (skb->len < L2CAP_CMD_HDR_SIZE)
5801 goto drop;
5802
5803 cmd = (void *) skb->data;
5804 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5805
5806 len = le16_to_cpu(cmd->len);
5807
5808 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5809
5810 if (len != skb->len || !cmd->ident) {
5811 BT_DBG("corrupted command");
5812 goto drop;
5813 }
5814
5815 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5816 if (err) {
5817 struct l2cap_cmd_rej_unk rej;
5818
5819 BT_ERR("Wrong link type (%d)", err);
5820
5821 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5822 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5823 sizeof(rej), &rej);
5824 }
5825
5826drop:
5827 kfree_skb(skb);
5828}
5829
5830static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5831 struct sk_buff *skb)
5832{
5833 struct hci_conn *hcon = conn->hcon;
5834 u8 *data = skb->data;
5835 int len = skb->len;
5836 struct l2cap_cmd_hdr cmd;
5837 int err;
5838
5839 l2cap_raw_recv(conn, skb);
5840
5841 if (hcon->type != ACL_LINK)
5842 goto drop;
5843
5844 while (len >= L2CAP_CMD_HDR_SIZE) {
5845 u16 cmd_len;
5846 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5847 data += L2CAP_CMD_HDR_SIZE;
5848 len -= L2CAP_CMD_HDR_SIZE;
5849
5850 cmd_len = le16_to_cpu(cmd.len);
5851
5852 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5853 cmd.ident);
5854
5855 if (cmd_len > len || !cmd.ident) {
5856 BT_DBG("corrupted command");
5857 break;
5858 }
5859
5860 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5861 if (err) {
5862 struct l2cap_cmd_rej_unk rej;
5863
5864 BT_ERR("Wrong link type (%d)", err);
5865
5866 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5867 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5868 sizeof(rej), &rej);
5869 }
5870
5871 data += cmd_len;
5872 len -= cmd_len;
5873 }
5874
5875drop:
5876 kfree_skb(skb);
5877}
5878
5879static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5880{
5881 u16 our_fcs, rcv_fcs;
5882 int hdr_size;
5883
5884 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5885 hdr_size = L2CAP_EXT_HDR_SIZE;
5886 else
5887 hdr_size = L2CAP_ENH_HDR_SIZE;
5888
5889 if (chan->fcs == L2CAP_FCS_CRC16) {
5890 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5891 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5892 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5893
5894 if (our_fcs != rcv_fcs)
5895 return -EBADMSG;
5896 }
5897 return 0;
5898}
5899
5900static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5901{
5902 struct l2cap_ctrl control;
5903
5904 BT_DBG("chan %p", chan);
5905
5906 memset(&control, 0, sizeof(control));
5907 control.sframe = 1;
5908 control.final = 1;
5909 control.reqseq = chan->buffer_seq;
5910 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5911
5912 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5913 control.super = L2CAP_SUPER_RNR;
5914 l2cap_send_sframe(chan, &control);
5915 }
5916
5917 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5918 chan->unacked_frames > 0)
5919 __set_retrans_timer(chan);
5920
5921 /* Send pending iframes */
5922 l2cap_ertm_send(chan);
5923
5924 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5925 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5926 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5927 * send it now.
5928 */
5929 control.super = L2CAP_SUPER_RR;
5930 l2cap_send_sframe(chan, &control);
5931 }
5932}
5933
5934static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5935 struct sk_buff **last_frag)
5936{
5937 /* skb->len reflects data in skb as well as all fragments
5938 * skb->data_len reflects only data in fragments
5939 */
5940 if (!skb_has_frag_list(skb))
5941 skb_shinfo(skb)->frag_list = new_frag;
5942
5943 new_frag->next = NULL;
5944
5945 (*last_frag)->next = new_frag;
5946 *last_frag = new_frag;
5947
5948 skb->len += new_frag->len;
5949 skb->data_len += new_frag->len;
5950 skb->truesize += new_frag->truesize;
5951}
5952
5953static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5954 struct l2cap_ctrl *control)
5955{
5956 int err = -EINVAL;
5957
5958 switch (control->sar) {
5959 case L2CAP_SAR_UNSEGMENTED:
5960 if (chan->sdu)
5961 break;
5962
5963 err = chan->ops->recv(chan, skb);
5964 break;
5965
5966 case L2CAP_SAR_START:
5967 if (chan->sdu)
5968 break;
5969
5970 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5971 break;
5972
5973 chan->sdu_len = get_unaligned_le16(skb->data);
5974 skb_pull(skb, L2CAP_SDULEN_SIZE);
5975
5976 if (chan->sdu_len > chan->imtu) {
5977 err = -EMSGSIZE;
5978 break;
5979 }
5980
5981 if (skb->len >= chan->sdu_len)
5982 break;
5983
5984 chan->sdu = skb;
5985 chan->sdu_last_frag = skb;
5986
5987 skb = NULL;
5988 err = 0;
5989 break;
5990
5991 case L2CAP_SAR_CONTINUE:
5992 if (!chan->sdu)
5993 break;
5994
5995 append_skb_frag(chan->sdu, skb,
5996 &chan->sdu_last_frag);
5997 skb = NULL;
5998
5999 if (chan->sdu->len >= chan->sdu_len)
6000 break;
6001
6002 err = 0;
6003 break;
6004
6005 case L2CAP_SAR_END:
6006 if (!chan->sdu)
6007 break;
6008
6009 append_skb_frag(chan->sdu, skb,
6010 &chan->sdu_last_frag);
6011 skb = NULL;
6012
6013 if (chan->sdu->len != chan->sdu_len)
6014 break;
6015
6016 err = chan->ops->recv(chan, chan->sdu);
6017
6018 if (!err) {
6019 /* Reassembly complete */
6020 chan->sdu = NULL;
6021 chan->sdu_last_frag = NULL;
6022 chan->sdu_len = 0;
6023 }
6024 break;
6025 }
6026
6027 if (err) {
6028 kfree_skb(skb);
6029 kfree_skb(chan->sdu);
6030 chan->sdu = NULL;
6031 chan->sdu_last_frag = NULL;
6032 chan->sdu_len = 0;
6033 }
6034
6035 return err;
6036}
6037
6038static int l2cap_resegment(struct l2cap_chan *chan)
6039{
6040 /* Placeholder */
6041 return 0;
6042}
6043
6044void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6045{
6046 u8 event;
6047
6048 if (chan->mode != L2CAP_MODE_ERTM)
6049 return;
6050
6051 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6052 l2cap_tx(chan, NULL, NULL, event);
6053}
6054
6055static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6056{
6057 int err = 0;
6058 /* Pass sequential frames to l2cap_reassemble_sdu()
6059 * until a gap is encountered.
6060 */
6061
6062 BT_DBG("chan %p", chan);
6063
6064 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6065 struct sk_buff *skb;
6066 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6067 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6068
6069 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6070
6071 if (!skb)
6072 break;
6073
6074 skb_unlink(skb, &chan->srej_q);
6075 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6076 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6077 if (err)
6078 break;
6079 }
6080
6081 if (skb_queue_empty(&chan->srej_q)) {
6082 chan->rx_state = L2CAP_RX_STATE_RECV;
6083 l2cap_send_ack(chan);
6084 }
6085
6086 return err;
6087}
6088
6089static void l2cap_handle_srej(struct l2cap_chan *chan,
6090 struct l2cap_ctrl *control)
6091{
6092 struct sk_buff *skb;
6093
6094 BT_DBG("chan %p, control %p", chan, control);
6095
6096 if (control->reqseq == chan->next_tx_seq) {
6097 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6098 l2cap_send_disconn_req(chan, ECONNRESET);
6099 return;
6100 }
6101
6102 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6103
6104 if (skb == NULL) {
6105 BT_DBG("Seq %d not available for retransmission",
6106 control->reqseq);
6107 return;
6108 }
6109
6110 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6111 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6112 l2cap_send_disconn_req(chan, ECONNRESET);
6113 return;
6114 }
6115
6116 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6117
6118 if (control->poll) {
6119 l2cap_pass_to_tx(chan, control);
6120
6121 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6122 l2cap_retransmit(chan, control);
6123 l2cap_ertm_send(chan);
6124
6125 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6126 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6127 chan->srej_save_reqseq = control->reqseq;
6128 }
6129 } else {
6130 l2cap_pass_to_tx_fbit(chan, control);
6131
6132 if (control->final) {
6133 if (chan->srej_save_reqseq != control->reqseq ||
6134 !test_and_clear_bit(CONN_SREJ_ACT,
6135 &chan->conn_state))
6136 l2cap_retransmit(chan, control);
6137 } else {
6138 l2cap_retransmit(chan, control);
6139 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6140 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6141 chan->srej_save_reqseq = control->reqseq;
6142 }
6143 }
6144 }
6145}
6146
6147static void l2cap_handle_rej(struct l2cap_chan *chan,
6148 struct l2cap_ctrl *control)
6149{
6150 struct sk_buff *skb;
6151
6152 BT_DBG("chan %p, control %p", chan, control);
6153
6154 if (control->reqseq == chan->next_tx_seq) {
6155 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6156 l2cap_send_disconn_req(chan, ECONNRESET);
6157 return;
6158 }
6159
6160 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6161
6162 if (chan->max_tx && skb &&
6163 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6164 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6165 l2cap_send_disconn_req(chan, ECONNRESET);
6166 return;
6167 }
6168
6169 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6170
6171 l2cap_pass_to_tx(chan, control);
6172
6173 if (control->final) {
6174 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6175 l2cap_retransmit_all(chan, control);
6176 } else {
6177 l2cap_retransmit_all(chan, control);
6178 l2cap_ertm_send(chan);
6179 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6180 set_bit(CONN_REJ_ACT, &chan->conn_state);
6181 }
6182}
6183
6184static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6185{
6186 BT_DBG("chan %p, txseq %d", chan, txseq);
6187
6188 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6189 chan->expected_tx_seq);
6190
6191 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6192 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6193 chan->tx_win) {
6194 /* See notes below regarding "double poll" and
6195 * invalid packets.
6196 */
6197 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6198 BT_DBG("Invalid/Ignore - after SREJ");
6199 return L2CAP_TXSEQ_INVALID_IGNORE;
6200 } else {
6201 BT_DBG("Invalid - in window after SREJ sent");
6202 return L2CAP_TXSEQ_INVALID;
6203 }
6204 }
6205
6206 if (chan->srej_list.head == txseq) {
6207 BT_DBG("Expected SREJ");
6208 return L2CAP_TXSEQ_EXPECTED_SREJ;
6209 }
6210
6211 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6212 BT_DBG("Duplicate SREJ - txseq already stored");
6213 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6214 }
6215
6216 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6217 BT_DBG("Unexpected SREJ - not requested");
6218 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6219 }
6220 }
6221
6222 if (chan->expected_tx_seq == txseq) {
6223 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6224 chan->tx_win) {
6225 BT_DBG("Invalid - txseq outside tx window");
6226 return L2CAP_TXSEQ_INVALID;
6227 } else {
6228 BT_DBG("Expected");
6229 return L2CAP_TXSEQ_EXPECTED;
6230 }
6231 }
6232
6233 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6234 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6235 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6236 return L2CAP_TXSEQ_DUPLICATE;
6237 }
6238
6239 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6240 /* A source of invalid packets is a "double poll" condition,
6241 * where delays cause us to send multiple poll packets. If
6242 * the remote stack receives and processes both polls,
6243 * sequence numbers can wrap around in such a way that a
6244 * resent frame has a sequence number that looks like new data
6245 * with a sequence gap. This would trigger an erroneous SREJ
6246 * request.
6247 *
6248 * Fortunately, this is impossible with a tx window that's
6249 * less than half of the maximum sequence number, which allows
6250 * invalid frames to be safely ignored.
6251 *
6252 * With tx window sizes greater than half of the tx window
6253 * maximum, the frame is invalid and cannot be ignored. This
6254 * causes a disconnect.
6255 */
6256
6257 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6258 BT_DBG("Invalid/Ignore - txseq outside tx window");
6259 return L2CAP_TXSEQ_INVALID_IGNORE;
6260 } else {
6261 BT_DBG("Invalid - txseq outside tx window");
6262 return L2CAP_TXSEQ_INVALID;
6263 }
6264 } else {
6265 BT_DBG("Unexpected - txseq indicates missing frames");
6266 return L2CAP_TXSEQ_UNEXPECTED;
6267 }
6268}
6269
6270static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6271 struct l2cap_ctrl *control,
6272 struct sk_buff *skb, u8 event)
6273{
6274 int err = 0;
6275 bool skb_in_use = false;
6276
6277 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6278 event);
6279
6280 switch (event) {
6281 case L2CAP_EV_RECV_IFRAME:
6282 switch (l2cap_classify_txseq(chan, control->txseq)) {
6283 case L2CAP_TXSEQ_EXPECTED:
6284 l2cap_pass_to_tx(chan, control);
6285
6286 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6287 BT_DBG("Busy, discarding expected seq %d",
6288 control->txseq);
6289 break;
6290 }
6291
6292 chan->expected_tx_seq = __next_seq(chan,
6293 control->txseq);
6294
6295 chan->buffer_seq = chan->expected_tx_seq;
6296 skb_in_use = true;
6297
6298 err = l2cap_reassemble_sdu(chan, skb, control);
6299 if (err)
6300 break;
6301
6302 if (control->final) {
6303 if (!test_and_clear_bit(CONN_REJ_ACT,
6304 &chan->conn_state)) {
6305 control->final = 0;
6306 l2cap_retransmit_all(chan, control);
6307 l2cap_ertm_send(chan);
6308 }
6309 }
6310
6311 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6312 l2cap_send_ack(chan);
6313 break;
6314 case L2CAP_TXSEQ_UNEXPECTED:
6315 l2cap_pass_to_tx(chan, control);
6316
6317 /* Can't issue SREJ frames in the local busy state.
6318 * Drop this frame, it will be seen as missing
6319 * when local busy is exited.
6320 */
6321 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6322 BT_DBG("Busy, discarding unexpected seq %d",
6323 control->txseq);
6324 break;
6325 }
6326
6327 /* There was a gap in the sequence, so an SREJ
6328 * must be sent for each missing frame. The
6329 * current frame is stored for later use.
6330 */
6331 skb_queue_tail(&chan->srej_q, skb);
6332 skb_in_use = true;
6333 BT_DBG("Queued %p (queue len %d)", skb,
6334 skb_queue_len(&chan->srej_q));
6335
6336 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6337 l2cap_seq_list_clear(&chan->srej_list);
6338 l2cap_send_srej(chan, control->txseq);
6339
6340 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6341 break;
6342 case L2CAP_TXSEQ_DUPLICATE:
6343 l2cap_pass_to_tx(chan, control);
6344 break;
6345 case L2CAP_TXSEQ_INVALID_IGNORE:
6346 break;
6347 case L2CAP_TXSEQ_INVALID:
6348 default:
6349 l2cap_send_disconn_req(chan, ECONNRESET);
6350 break;
6351 }
6352 break;
6353 case L2CAP_EV_RECV_RR:
6354 l2cap_pass_to_tx(chan, control);
6355 if (control->final) {
6356 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6357
6358 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6359 !__chan_is_moving(chan)) {
6360 control->final = 0;
6361 l2cap_retransmit_all(chan, control);
6362 }
6363
6364 l2cap_ertm_send(chan);
6365 } else if (control->poll) {
6366 l2cap_send_i_or_rr_or_rnr(chan);
6367 } else {
6368 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6369 &chan->conn_state) &&
6370 chan->unacked_frames)
6371 __set_retrans_timer(chan);
6372
6373 l2cap_ertm_send(chan);
6374 }
6375 break;
6376 case L2CAP_EV_RECV_RNR:
6377 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6378 l2cap_pass_to_tx(chan, control);
6379 if (control && control->poll) {
6380 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6381 l2cap_send_rr_or_rnr(chan, 0);
6382 }
6383 __clear_retrans_timer(chan);
6384 l2cap_seq_list_clear(&chan->retrans_list);
6385 break;
6386 case L2CAP_EV_RECV_REJ:
6387 l2cap_handle_rej(chan, control);
6388 break;
6389 case L2CAP_EV_RECV_SREJ:
6390 l2cap_handle_srej(chan, control);
6391 break;
6392 default:
6393 break;
6394 }
6395
6396 if (skb && !skb_in_use) {
6397 BT_DBG("Freeing %p", skb);
6398 kfree_skb(skb);
6399 }
6400
6401 return err;
6402}
6403
6404static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6405 struct l2cap_ctrl *control,
6406 struct sk_buff *skb, u8 event)
6407{
6408 int err = 0;
6409 u16 txseq = control->txseq;
6410 bool skb_in_use = false;
6411
6412 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6413 event);
6414
6415 switch (event) {
6416 case L2CAP_EV_RECV_IFRAME:
6417 switch (l2cap_classify_txseq(chan, txseq)) {
6418 case L2CAP_TXSEQ_EXPECTED:
6419 /* Keep frame for reassembly later */
6420 l2cap_pass_to_tx(chan, control);
6421 skb_queue_tail(&chan->srej_q, skb);
6422 skb_in_use = true;
6423 BT_DBG("Queued %p (queue len %d)", skb,
6424 skb_queue_len(&chan->srej_q));
6425
6426 chan->expected_tx_seq = __next_seq(chan, txseq);
6427 break;
6428 case L2CAP_TXSEQ_EXPECTED_SREJ:
6429 l2cap_seq_list_pop(&chan->srej_list);
6430
6431 l2cap_pass_to_tx(chan, control);
6432 skb_queue_tail(&chan->srej_q, skb);
6433 skb_in_use = true;
6434 BT_DBG("Queued %p (queue len %d)", skb,
6435 skb_queue_len(&chan->srej_q));
6436
6437 err = l2cap_rx_queued_iframes(chan);
6438 if (err)
6439 break;
6440
6441 break;
6442 case L2CAP_TXSEQ_UNEXPECTED:
6443 /* Got a frame that can't be reassembled yet.
6444 * Save it for later, and send SREJs to cover
6445 * the missing frames.
6446 */
6447 skb_queue_tail(&chan->srej_q, skb);
6448 skb_in_use = true;
6449 BT_DBG("Queued %p (queue len %d)", skb,
6450 skb_queue_len(&chan->srej_q));
6451
6452 l2cap_pass_to_tx(chan, control);
6453 l2cap_send_srej(chan, control->txseq);
6454 break;
6455 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6456 /* This frame was requested with an SREJ, but
6457 * some expected retransmitted frames are
6458 * missing. Request retransmission of missing
6459 * SREJ'd frames.
6460 */
6461 skb_queue_tail(&chan->srej_q, skb);
6462 skb_in_use = true;
6463 BT_DBG("Queued %p (queue len %d)", skb,
6464 skb_queue_len(&chan->srej_q));
6465
6466 l2cap_pass_to_tx(chan, control);
6467 l2cap_send_srej_list(chan, control->txseq);
6468 break;
6469 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6470 /* We've already queued this frame. Drop this copy. */
6471 l2cap_pass_to_tx(chan, control);
6472 break;
6473 case L2CAP_TXSEQ_DUPLICATE:
6474 /* Expecting a later sequence number, so this frame
6475 * was already received. Ignore it completely.
6476 */
6477 break;
6478 case L2CAP_TXSEQ_INVALID_IGNORE:
6479 break;
6480 case L2CAP_TXSEQ_INVALID:
6481 default:
6482 l2cap_send_disconn_req(chan, ECONNRESET);
6483 break;
6484 }
6485 break;
6486 case L2CAP_EV_RECV_RR:
6487 l2cap_pass_to_tx(chan, control);
6488 if (control->final) {
6489 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6490
6491 if (!test_and_clear_bit(CONN_REJ_ACT,
6492 &chan->conn_state)) {
6493 control->final = 0;
6494 l2cap_retransmit_all(chan, control);
6495 }
6496
6497 l2cap_ertm_send(chan);
6498 } else if (control->poll) {
6499 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6500 &chan->conn_state) &&
6501 chan->unacked_frames) {
6502 __set_retrans_timer(chan);
6503 }
6504
6505 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6506 l2cap_send_srej_tail(chan);
6507 } else {
6508 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6509 &chan->conn_state) &&
6510 chan->unacked_frames)
6511 __set_retrans_timer(chan);
6512
6513 l2cap_send_ack(chan);
6514 }
6515 break;
6516 case L2CAP_EV_RECV_RNR:
6517 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6518 l2cap_pass_to_tx(chan, control);
6519 if (control->poll) {
6520 l2cap_send_srej_tail(chan);
6521 } else {
6522 struct l2cap_ctrl rr_control;
6523 memset(&rr_control, 0, sizeof(rr_control));
6524 rr_control.sframe = 1;
6525 rr_control.super = L2CAP_SUPER_RR;
6526 rr_control.reqseq = chan->buffer_seq;
6527 l2cap_send_sframe(chan, &rr_control);
6528 }
6529
6530 break;
6531 case L2CAP_EV_RECV_REJ:
6532 l2cap_handle_rej(chan, control);
6533 break;
6534 case L2CAP_EV_RECV_SREJ:
6535 l2cap_handle_srej(chan, control);
6536 break;
6537 }
6538
6539 if (skb && !skb_in_use) {
6540 BT_DBG("Freeing %p", skb);
6541 kfree_skb(skb);
6542 }
6543
6544 return err;
6545}
6546
6547static int l2cap_finish_move(struct l2cap_chan *chan)
6548{
6549 BT_DBG("chan %p", chan);
6550
6551 chan->rx_state = L2CAP_RX_STATE_RECV;
6552
6553 if (chan->hs_hcon)
6554 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6555 else
6556 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6557
6558 return l2cap_resegment(chan);
6559}
6560
6561static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6562 struct l2cap_ctrl *control,
6563 struct sk_buff *skb, u8 event)
6564{
6565 int err;
6566
6567 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6568 event);
6569
6570 if (!control->poll)
6571 return -EPROTO;
6572
6573 l2cap_process_reqseq(chan, control->reqseq);
6574
6575 if (!skb_queue_empty(&chan->tx_q))
6576 chan->tx_send_head = skb_peek(&chan->tx_q);
6577 else
6578 chan->tx_send_head = NULL;
6579
6580 /* Rewind next_tx_seq to the point expected
6581 * by the receiver.
6582 */
6583 chan->next_tx_seq = control->reqseq;
6584 chan->unacked_frames = 0;
6585
6586 err = l2cap_finish_move(chan);
6587 if (err)
6588 return err;
6589
6590 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6591 l2cap_send_i_or_rr_or_rnr(chan);
6592
6593 if (event == L2CAP_EV_RECV_IFRAME)
6594 return -EPROTO;
6595
6596 return l2cap_rx_state_recv(chan, control, NULL, event);
6597}
6598
6599static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6600 struct l2cap_ctrl *control,
6601 struct sk_buff *skb, u8 event)
6602{
6603 int err;
6604
6605 if (!control->final)
6606 return -EPROTO;
6607
6608 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6609
6610 chan->rx_state = L2CAP_RX_STATE_RECV;
6611 l2cap_process_reqseq(chan, control->reqseq);
6612
6613 if (!skb_queue_empty(&chan->tx_q))
6614 chan->tx_send_head = skb_peek(&chan->tx_q);
6615 else
6616 chan->tx_send_head = NULL;
6617
6618 /* Rewind next_tx_seq to the point expected
6619 * by the receiver.
6620 */
6621 chan->next_tx_seq = control->reqseq;
6622 chan->unacked_frames = 0;
6623
6624 if (chan->hs_hcon)
6625 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6626 else
6627 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6628
6629 err = l2cap_resegment(chan);
6630
6631 if (!err)
6632 err = l2cap_rx_state_recv(chan, control, skb, event);
6633
6634 return err;
6635}
6636
6637static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6638{
6639 /* Make sure reqseq is for a packet that has been sent but not acked */
6640 u16 unacked;
6641
6642 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6643 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6644}
6645
6646static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6647 struct sk_buff *skb, u8 event)
6648{
6649 int err = 0;
6650
6651 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6652 control, skb, event, chan->rx_state);
6653
6654 if (__valid_reqseq(chan, control->reqseq)) {
6655 switch (chan->rx_state) {
6656 case L2CAP_RX_STATE_RECV:
6657 err = l2cap_rx_state_recv(chan, control, skb, event);
6658 break;
6659 case L2CAP_RX_STATE_SREJ_SENT:
6660 err = l2cap_rx_state_srej_sent(chan, control, skb,
6661 event);
6662 break;
6663 case L2CAP_RX_STATE_WAIT_P:
6664 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6665 break;
6666 case L2CAP_RX_STATE_WAIT_F:
6667 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6668 break;
6669 default:
6670 /* shut it down */
6671 break;
6672 }
6673 } else {
6674 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6675 control->reqseq, chan->next_tx_seq,
6676 chan->expected_ack_seq);
6677 l2cap_send_disconn_req(chan, ECONNRESET);
6678 }
6679
6680 return err;
6681}
6682
6683static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6684 struct sk_buff *skb)
6685{
6686 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6687 chan->rx_state);
6688
6689 if (l2cap_classify_txseq(chan, control->txseq) ==
6690 L2CAP_TXSEQ_EXPECTED) {
6691 l2cap_pass_to_tx(chan, control);
6692
6693 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6694 __next_seq(chan, chan->buffer_seq));
6695
6696 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6697
6698 l2cap_reassemble_sdu(chan, skb, control);
6699 } else {
6700 if (chan->sdu) {
6701 kfree_skb(chan->sdu);
6702 chan->sdu = NULL;
6703 }
6704 chan->sdu_last_frag = NULL;
6705 chan->sdu_len = 0;
6706
6707 if (skb) {
6708 BT_DBG("Freeing %p", skb);
6709 kfree_skb(skb);
6710 }
6711 }
6712
6713 chan->last_acked_seq = control->txseq;
6714 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6715
6716 return 0;
6717}
6718
6719static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6720{
6721 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6722 u16 len;
6723 u8 event;
6724
6725 __unpack_control(chan, skb);
6726
6727 len = skb->len;
6728
6729 /*
6730 * We can just drop the corrupted I-frame here.
6731 * Receiver will miss it and start proper recovery
6732 * procedures and ask for retransmission.
6733 */
6734 if (l2cap_check_fcs(chan, skb))
6735 goto drop;
6736
6737 if (!control->sframe && control->sar == L2CAP_SAR_START)
6738 len -= L2CAP_SDULEN_SIZE;
6739
6740 if (chan->fcs == L2CAP_FCS_CRC16)
6741 len -= L2CAP_FCS_SIZE;
6742
6743 if (len > chan->mps) {
6744 l2cap_send_disconn_req(chan, ECONNRESET);
6745 goto drop;
6746 }
6747
6748 if ((chan->mode == L2CAP_MODE_ERTM ||
6749 chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
6750 goto drop;
6751
6752 if (!control->sframe) {
6753 int err;
6754
6755 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6756 control->sar, control->reqseq, control->final,
6757 control->txseq);
6758
6759 /* Validate F-bit - F=0 always valid, F=1 only
6760 * valid in TX WAIT_F
6761 */
6762 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6763 goto drop;
6764
6765 if (chan->mode != L2CAP_MODE_STREAMING) {
6766 event = L2CAP_EV_RECV_IFRAME;
6767 err = l2cap_rx(chan, control, skb, event);
6768 } else {
6769 err = l2cap_stream_rx(chan, control, skb);
6770 }
6771
6772 if (err)
6773 l2cap_send_disconn_req(chan, ECONNRESET);
6774 } else {
6775 const u8 rx_func_to_event[4] = {
6776 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6777 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6778 };
6779
6780 /* Only I-frames are expected in streaming mode */
6781 if (chan->mode == L2CAP_MODE_STREAMING)
6782 goto drop;
6783
6784 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6785 control->reqseq, control->final, control->poll,
6786 control->super);
6787
6788 if (len != 0) {
6789 BT_ERR("Trailing bytes: %d in sframe", len);
6790 l2cap_send_disconn_req(chan, ECONNRESET);
6791 goto drop;
6792 }
6793
6794 /* Validate F and P bits */
6795 if (control->final && (control->poll ||
6796 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6797 goto drop;
6798
6799 event = rx_func_to_event[control->super];
6800 if (l2cap_rx(chan, control, skb, event))
6801 l2cap_send_disconn_req(chan, ECONNRESET);
6802 }
6803
6804 return 0;
6805
6806drop:
6807 kfree_skb(skb);
6808 return 0;
6809}
6810
6811static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6812{
6813 struct l2cap_conn *conn = chan->conn;
6814 struct l2cap_le_credits pkt;
6815 u16 return_credits;
6816
6817 return_credits = ((chan->imtu / chan->mps) + 1) - chan->rx_credits;
6818
6819 if (!return_credits)
6820 return;
6821
6822 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6823
6824 chan->rx_credits += return_credits;
6825
6826 pkt.cid = cpu_to_le16(chan->scid);
6827 pkt.credits = cpu_to_le16(return_credits);
6828
6829 chan->ident = l2cap_get_ident(conn);
6830
6831 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6832}
6833
6834static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6835{
6836 int err;
6837
6838 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6839
6840 /* Wait recv to confirm reception before updating the credits */
6841 err = chan->ops->recv(chan, skb);
6842
6843 /* Update credits whenever an SDU is received */
6844 l2cap_chan_le_send_credits(chan);
6845
6846 return err;
6847}
6848
6849static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6850{
6851 int err;
6852
6853 if (!chan->rx_credits) {
6854 BT_ERR("No credits to receive LE L2CAP data");
6855 l2cap_send_disconn_req(chan, ECONNRESET);
6856 return -ENOBUFS;
6857 }
6858
6859 if (chan->imtu < skb->len) {
6860 BT_ERR("Too big LE L2CAP PDU");
6861 return -ENOBUFS;
6862 }
6863
6864 chan->rx_credits--;
6865 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6866
6867 /* Update if remote had run out of credits, this should only happens
6868 * if the remote is not using the entire MPS.
6869 */
6870 if (!chan->rx_credits)
6871 l2cap_chan_le_send_credits(chan);
6872
6873 err = 0;
6874
6875 if (!chan->sdu) {
6876 u16 sdu_len;
6877
6878 sdu_len = get_unaligned_le16(skb->data);
6879 skb_pull(skb, L2CAP_SDULEN_SIZE);
6880
6881 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6882 sdu_len, skb->len, chan->imtu);
6883
6884 if (sdu_len > chan->imtu) {
6885 BT_ERR("Too big LE L2CAP SDU length received");
6886 err = -EMSGSIZE;
6887 goto failed;
6888 }
6889
6890 if (skb->len > sdu_len) {
6891 BT_ERR("Too much LE L2CAP data received");
6892 err = -EINVAL;
6893 goto failed;
6894 }
6895
6896 if (skb->len == sdu_len)
6897 return l2cap_le_recv(chan, skb);
6898
6899 chan->sdu = skb;
6900 chan->sdu_len = sdu_len;
6901 chan->sdu_last_frag = skb;
6902
6903 /* Detect if remote is not able to use the selected MPS */
6904 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6905 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6906
6907 /* Adjust the number of credits */
6908 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6909 chan->mps = mps_len;
6910 l2cap_chan_le_send_credits(chan);
6911 }
6912
6913 return 0;
6914 }
6915
6916 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6917 chan->sdu->len, skb->len, chan->sdu_len);
6918
6919 if (chan->sdu->len + skb->len > chan->sdu_len) {
6920 BT_ERR("Too much LE L2CAP data received");
6921 err = -EINVAL;
6922 goto failed;
6923 }
6924
6925 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6926 skb = NULL;
6927
6928 if (chan->sdu->len == chan->sdu_len) {
6929 err = l2cap_le_recv(chan, chan->sdu);
6930 if (!err) {
6931 chan->sdu = NULL;
6932 chan->sdu_last_frag = NULL;
6933 chan->sdu_len = 0;
6934 }
6935 }
6936
6937failed:
6938 if (err) {
6939 kfree_skb(skb);
6940 kfree_skb(chan->sdu);
6941 chan->sdu = NULL;
6942 chan->sdu_last_frag = NULL;
6943 chan->sdu_len = 0;
6944 }
6945
6946 /* We can't return an error here since we took care of the skb
6947 * freeing internally. An error return would cause the caller to
6948 * do a double-free of the skb.
6949 */
6950 return 0;
6951}
6952
6953static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6954 struct sk_buff *skb)
6955{
6956 struct l2cap_chan *chan;
6957
6958 chan = l2cap_get_chan_by_scid(conn, cid);
6959 if (!chan) {
6960 if (cid == L2CAP_CID_A2MP) {
6961 chan = a2mp_channel_create(conn, skb);
6962 if (!chan) {
6963 kfree_skb(skb);
6964 return;
6965 }
6966
6967 l2cap_chan_lock(chan);
6968 } else {
6969 BT_DBG("unknown cid 0x%4.4x", cid);
6970 /* Drop packet and return */
6971 kfree_skb(skb);
6972 return;
6973 }
6974 }
6975
6976 BT_DBG("chan %p, len %d", chan, skb->len);
6977
6978 /* If we receive data on a fixed channel before the info req/rsp
6979 * procdure is done simply assume that the channel is supported
6980 * and mark it as ready.
6981 */
6982 if (chan->chan_type == L2CAP_CHAN_FIXED)
6983 l2cap_chan_ready(chan);
6984
6985 if (chan->state != BT_CONNECTED)
6986 goto drop;
6987
6988 switch (chan->mode) {
6989 case L2CAP_MODE_LE_FLOWCTL:
6990 if (l2cap_le_data_rcv(chan, skb) < 0)
6991 goto drop;
6992
6993 goto done;
6994
6995 case L2CAP_MODE_BASIC:
6996 /* If socket recv buffers overflows we drop data here
6997 * which is *bad* because L2CAP has to be reliable.
6998 * But we don't have any other choice. L2CAP doesn't
6999 * provide flow control mechanism. */
7000
7001 if (chan->imtu < skb->len) {
7002 BT_ERR("Dropping L2CAP data: receive buffer overflow");
7003 goto drop;
7004 }
7005
7006 if (!chan->ops->recv(chan, skb))
7007 goto done;
7008 break;
7009
7010 case L2CAP_MODE_ERTM:
7011 case L2CAP_MODE_STREAMING:
7012 l2cap_data_rcv(chan, skb);
7013 goto done;
7014
7015 default:
7016 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7017 break;
7018 }
7019
7020drop:
7021 kfree_skb(skb);
7022
7023done:
7024 l2cap_chan_unlock(chan);
7025}
7026
7027static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7028 struct sk_buff *skb)
7029{
7030 struct hci_conn *hcon = conn->hcon;
7031 struct l2cap_chan *chan;
7032
7033 if (hcon->type != ACL_LINK)
7034 goto free_skb;
7035
7036 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7037 ACL_LINK);
7038 if (!chan)
7039 goto free_skb;
7040
7041 BT_DBG("chan %p, len %d", chan, skb->len);
7042
7043 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7044 goto drop;
7045
7046 if (chan->imtu < skb->len)
7047 goto drop;
7048
7049 /* Store remote BD_ADDR and PSM for msg_name */
7050 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7051 bt_cb(skb)->l2cap.psm = psm;
7052
7053 if (!chan->ops->recv(chan, skb)) {
7054 l2cap_chan_put(chan);
7055 return;
7056 }
7057
7058drop:
7059 l2cap_chan_put(chan);
7060free_skb:
7061 kfree_skb(skb);
7062}
7063
7064static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7065{
7066 struct l2cap_hdr *lh = (void *) skb->data;
7067 struct hci_conn *hcon = conn->hcon;
7068 u16 cid, len;
7069 __le16 psm;
7070
7071 if (hcon->state != BT_CONNECTED) {
7072 BT_DBG("queueing pending rx skb");
7073 skb_queue_tail(&conn->pending_rx, skb);
7074 return;
7075 }
7076
7077 skb_pull(skb, L2CAP_HDR_SIZE);
7078 cid = __le16_to_cpu(lh->cid);
7079 len = __le16_to_cpu(lh->len);
7080
7081 if (len != skb->len) {
7082 kfree_skb(skb);
7083 return;
7084 }
7085
7086 /* Since we can't actively block incoming LE connections we must
7087 * at least ensure that we ignore incoming data from them.
7088 */
7089 if (hcon->type == LE_LINK &&
7090 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7091 bdaddr_dst_type(hcon))) {
7092 kfree_skb(skb);
7093 return;
7094 }
7095
7096 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7097
7098 switch (cid) {
7099 case L2CAP_CID_SIGNALING:
7100 l2cap_sig_channel(conn, skb);
7101 break;
7102
7103 case L2CAP_CID_CONN_LESS:
7104 psm = get_unaligned((__le16 *) skb->data);
7105 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7106 l2cap_conless_channel(conn, psm, skb);
7107 break;
7108
7109 case L2CAP_CID_LE_SIGNALING:
7110 l2cap_le_sig_channel(conn, skb);
7111 break;
7112
7113 default:
7114 l2cap_data_channel(conn, cid, skb);
7115 break;
7116 }
7117}
7118
7119static void process_pending_rx(struct work_struct *work)
7120{
7121 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7122 pending_rx_work);
7123 struct sk_buff *skb;
7124
7125 BT_DBG("");
7126
7127 while ((skb = skb_dequeue(&conn->pending_rx)))
7128 l2cap_recv_frame(conn, skb);
7129}
7130
7131static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7132{
7133 struct l2cap_conn *conn = hcon->l2cap_data;
7134 struct hci_chan *hchan;
7135
7136 if (conn)
7137 return conn;
7138
7139 hchan = hci_chan_create(hcon);
7140 if (!hchan)
7141 return NULL;
7142
7143 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7144 if (!conn) {
7145 hci_chan_del(hchan);
7146 return NULL;
7147 }
7148
7149 kref_init(&conn->ref);
7150 hcon->l2cap_data = conn;
7151 conn->hcon = hci_conn_get(hcon);
7152 conn->hchan = hchan;
7153
7154 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7155
7156 switch (hcon->type) {
7157 case LE_LINK:
7158 if (hcon->hdev->le_mtu) {
7159 conn->mtu = hcon->hdev->le_mtu;
7160 break;
7161 }
7162 /* fall through */
7163 default:
7164 conn->mtu = hcon->hdev->acl_mtu;
7165 break;
7166 }
7167
7168 conn->feat_mask = 0;
7169
7170 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7171
7172 if (hcon->type == ACL_LINK &&
7173 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7174 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7175
7176 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7177 (bredr_sc_enabled(hcon->hdev) ||
7178 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7179 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7180
7181 mutex_init(&conn->ident_lock);
7182 mutex_init(&conn->chan_lock);
7183
7184 INIT_LIST_HEAD(&conn->chan_l);
7185 INIT_LIST_HEAD(&conn->users);
7186
7187 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7188
7189 skb_queue_head_init(&conn->pending_rx);
7190 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7191 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7192
7193 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7194
7195 return conn;
7196}
7197
7198static bool is_valid_psm(u16 psm, u8 dst_type) {
7199 if (!psm)
7200 return false;
7201
7202 if (bdaddr_type_is_le(dst_type))
7203 return (psm <= 0x00ff);
7204
7205 /* PSM must be odd and lsb of upper byte must be 0 */
7206 return ((psm & 0x0101) == 0x0001);
7207}
7208
7209int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7210 bdaddr_t *dst, u8 dst_type)
7211{
7212 struct l2cap_conn *conn;
7213 struct hci_conn *hcon;
7214 struct hci_dev *hdev;
7215 int err;
7216
7217 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7218 dst_type, __le16_to_cpu(psm));
7219
7220 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7221 if (!hdev)
7222 return -EHOSTUNREACH;
7223
7224 hci_dev_lock(hdev);
7225
7226 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7227 chan->chan_type != L2CAP_CHAN_RAW) {
7228 err = -EINVAL;
7229 goto done;
7230 }
7231
7232 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7233 err = -EINVAL;
7234 goto done;
7235 }
7236
7237 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7238 err = -EINVAL;
7239 goto done;
7240 }
7241
7242 switch (chan->mode) {
7243 case L2CAP_MODE_BASIC:
7244 break;
7245 case L2CAP_MODE_LE_FLOWCTL:
7246 break;
7247 case L2CAP_MODE_ERTM:
7248 case L2CAP_MODE_STREAMING:
7249 if (!disable_ertm)
7250 break;
7251 /* fall through */
7252 default:
7253 err = -EOPNOTSUPP;
7254 goto done;
7255 }
7256
7257 switch (chan->state) {
7258 case BT_CONNECT:
7259 case BT_CONNECT2:
7260 case BT_CONFIG:
7261 /* Already connecting */
7262 err = 0;
7263 goto done;
7264
7265 case BT_CONNECTED:
7266 /* Already connected */
7267 err = -EISCONN;
7268 goto done;
7269
7270 case BT_OPEN:
7271 case BT_BOUND:
7272 /* Can connect */
7273 break;
7274
7275 default:
7276 err = -EBADFD;
7277 goto done;
7278 }
7279
7280 /* Set destination address and psm */
7281 bacpy(&chan->dst, dst);
7282 chan->dst_type = dst_type;
7283
7284 chan->psm = psm;
7285 chan->dcid = cid;
7286
7287 if (bdaddr_type_is_le(dst_type)) {
7288 /* Convert from L2CAP channel address type to HCI address type
7289 */
7290 if (dst_type == BDADDR_LE_PUBLIC)
7291 dst_type = ADDR_LE_DEV_PUBLIC;
7292 else
7293 dst_type = ADDR_LE_DEV_RANDOM;
7294
7295 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7296 hcon = hci_connect_le(hdev, dst, dst_type,
7297 chan->sec_level,
7298 HCI_LE_CONN_TIMEOUT,
7299 HCI_ROLE_SLAVE, NULL);
7300 else
7301 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7302 chan->sec_level,
7303 HCI_LE_CONN_TIMEOUT);
7304
7305 } else {
7306 u8 auth_type = l2cap_get_auth_type(chan);
7307 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7308 }
7309
7310 if (IS_ERR(hcon)) {
7311 err = PTR_ERR(hcon);
7312 goto done;
7313 }
7314
7315 conn = l2cap_conn_add(hcon);
7316 if (!conn) {
7317 hci_conn_drop(hcon);
7318 err = -ENOMEM;
7319 goto done;
7320 }
7321
7322 mutex_lock(&conn->chan_lock);
7323 l2cap_chan_lock(chan);
7324
7325 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7326 hci_conn_drop(hcon);
7327 err = -EBUSY;
7328 goto chan_unlock;
7329 }
7330
7331 /* Update source addr of the socket */
7332 bacpy(&chan->src, &hcon->src);
7333 chan->src_type = bdaddr_src_type(hcon);
7334
7335 __l2cap_chan_add(conn, chan);
7336
7337 /* l2cap_chan_add takes its own ref so we can drop this one */
7338 hci_conn_drop(hcon);
7339
7340 l2cap_state_change(chan, BT_CONNECT);
7341 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7342
7343 /* Release chan->sport so that it can be reused by other
7344 * sockets (as it's only used for listening sockets).
7345 */
7346 write_lock(&chan_list_lock);
7347 chan->sport = 0;
7348 write_unlock(&chan_list_lock);
7349
7350 if (hcon->state == BT_CONNECTED) {
7351 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7352 __clear_chan_timer(chan);
7353 if (l2cap_chan_check_security(chan, true))
7354 l2cap_state_change(chan, BT_CONNECTED);
7355 } else
7356 l2cap_do_start(chan);
7357 }
7358
7359 err = 0;
7360
7361chan_unlock:
7362 l2cap_chan_unlock(chan);
7363 mutex_unlock(&conn->chan_lock);
7364done:
7365 hci_dev_unlock(hdev);
7366 hci_dev_put(hdev);
7367 return err;
7368}
7369EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7370
7371/* ---- L2CAP interface with lower layer (HCI) ---- */
7372
7373int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7374{
7375 int exact = 0, lm1 = 0, lm2 = 0;
7376 struct l2cap_chan *c;
7377
7378 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7379
7380 /* Find listening sockets and check their link_mode */
7381 read_lock(&chan_list_lock);
7382 list_for_each_entry(c, &chan_list, global_l) {
7383 if (c->state != BT_LISTEN)
7384 continue;
7385
7386 if (!bacmp(&c->src, &hdev->bdaddr)) {
7387 lm1 |= HCI_LM_ACCEPT;
7388 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7389 lm1 |= HCI_LM_MASTER;
7390 exact++;
7391 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7392 lm2 |= HCI_LM_ACCEPT;
7393 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7394 lm2 |= HCI_LM_MASTER;
7395 }
7396 }
7397 read_unlock(&chan_list_lock);
7398
7399 return exact ? lm1 : lm2;
7400}
7401
7402/* Find the next fixed channel in BT_LISTEN state, continue iteration
7403 * from an existing channel in the list or from the beginning of the
7404 * global list (by passing NULL as first parameter).
7405 */
7406static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7407 struct hci_conn *hcon)
7408{
7409 u8 src_type = bdaddr_src_type(hcon);
7410
7411 read_lock(&chan_list_lock);
7412
7413 if (c)
7414 c = list_next_entry(c, global_l);
7415 else
7416 c = list_entry(chan_list.next, typeof(*c), global_l);
7417
7418 list_for_each_entry_from(c, &chan_list, global_l) {
7419 if (c->chan_type != L2CAP_CHAN_FIXED)
7420 continue;
7421 if (c->state != BT_LISTEN)
7422 continue;
7423 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7424 continue;
7425 if (src_type != c->src_type)
7426 continue;
7427
7428 l2cap_chan_hold(c);
7429 read_unlock(&chan_list_lock);
7430 return c;
7431 }
7432
7433 read_unlock(&chan_list_lock);
7434
7435 return NULL;
7436}
7437
7438static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7439{
7440 struct hci_dev *hdev = hcon->hdev;
7441 struct l2cap_conn *conn;
7442 struct l2cap_chan *pchan;
7443 u8 dst_type;
7444
7445 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7446 return;
7447
7448 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7449
7450 if (status) {
7451 l2cap_conn_del(hcon, bt_to_errno(status));
7452 return;
7453 }
7454
7455 conn = l2cap_conn_add(hcon);
7456 if (!conn)
7457 return;
7458
7459 dst_type = bdaddr_dst_type(hcon);
7460
7461 /* If device is blocked, do not create channels for it */
7462 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7463 return;
7464
7465 /* Find fixed channels and notify them of the new connection. We
7466 * use multiple individual lookups, continuing each time where
7467 * we left off, because the list lock would prevent calling the
7468 * potentially sleeping l2cap_chan_lock() function.
7469 */
7470 pchan = l2cap_global_fixed_chan(NULL, hcon);
7471 while (pchan) {
7472 struct l2cap_chan *chan, *next;
7473
7474 /* Client fixed channels should override server ones */
7475 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7476 goto next;
7477
7478 l2cap_chan_lock(pchan);
7479 chan = pchan->ops->new_connection(pchan);
7480 if (chan) {
7481 bacpy(&chan->src, &hcon->src);
7482 bacpy(&chan->dst, &hcon->dst);
7483 chan->src_type = bdaddr_src_type(hcon);
7484 chan->dst_type = dst_type;
7485
7486 __l2cap_chan_add(conn, chan);
7487 }
7488
7489 l2cap_chan_unlock(pchan);
7490next:
7491 next = l2cap_global_fixed_chan(pchan, hcon);
7492 l2cap_chan_put(pchan);
7493 pchan = next;
7494 }
7495
7496 l2cap_conn_ready(conn);
7497}
7498
7499int l2cap_disconn_ind(struct hci_conn *hcon)
7500{
7501 struct l2cap_conn *conn = hcon->l2cap_data;
7502
7503 BT_DBG("hcon %p", hcon);
7504
7505 if (!conn)
7506 return HCI_ERROR_REMOTE_USER_TERM;
7507 return conn->disc_reason;
7508}
7509
7510static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7511{
7512 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7513 return;
7514
7515 BT_DBG("hcon %p reason %d", hcon, reason);
7516
7517 l2cap_conn_del(hcon, bt_to_errno(reason));
7518}
7519
7520static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7521{
7522 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7523 return;
7524
7525 if (encrypt == 0x00) {
7526 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7527 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7528 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7529 chan->sec_level == BT_SECURITY_FIPS)
7530 l2cap_chan_close(chan, ECONNREFUSED);
7531 } else {
7532 if (chan->sec_level == BT_SECURITY_MEDIUM)
7533 __clear_chan_timer(chan);
7534 }
7535}
7536
7537static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7538{
7539 struct l2cap_conn *conn = hcon->l2cap_data;
7540 struct l2cap_chan *chan;
7541
7542 if (!conn)
7543 return;
7544
7545 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7546
7547 mutex_lock(&conn->chan_lock);
7548
7549 list_for_each_entry(chan, &conn->chan_l, list) {
7550 l2cap_chan_lock(chan);
7551
7552 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7553 state_to_string(chan->state));
7554
7555 if (chan->scid == L2CAP_CID_A2MP) {
7556 l2cap_chan_unlock(chan);
7557 continue;
7558 }
7559
7560 if (!status && encrypt)
7561 chan->sec_level = hcon->sec_level;
7562
7563 if (!__l2cap_no_conn_pending(chan)) {
7564 l2cap_chan_unlock(chan);
7565 continue;
7566 }
7567
7568 if (!status && (chan->state == BT_CONNECTED ||
7569 chan->state == BT_CONFIG)) {
7570 chan->ops->resume(chan);
7571 l2cap_check_encryption(chan, encrypt);
7572 l2cap_chan_unlock(chan);
7573 continue;
7574 }
7575
7576 if (chan->state == BT_CONNECT) {
7577 if (!status && l2cap_check_enc_key_size(hcon))
7578 l2cap_start_connection(chan);
7579 else
7580 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7581 } else if (chan->state == BT_CONNECT2 &&
7582 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7583 struct l2cap_conn_rsp rsp;
7584 __u16 res, stat;
7585
7586 if (!status && l2cap_check_enc_key_size(hcon)) {
7587 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7588 res = L2CAP_CR_PEND;
7589 stat = L2CAP_CS_AUTHOR_PEND;
7590 chan->ops->defer(chan);
7591 } else {
7592 l2cap_state_change(chan, BT_CONFIG);
7593 res = L2CAP_CR_SUCCESS;
7594 stat = L2CAP_CS_NO_INFO;
7595 }
7596 } else {
7597 l2cap_state_change(chan, BT_DISCONN);
7598 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7599 res = L2CAP_CR_SEC_BLOCK;
7600 stat = L2CAP_CS_NO_INFO;
7601 }
7602
7603 rsp.scid = cpu_to_le16(chan->dcid);
7604 rsp.dcid = cpu_to_le16(chan->scid);
7605 rsp.result = cpu_to_le16(res);
7606 rsp.status = cpu_to_le16(stat);
7607 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7608 sizeof(rsp), &rsp);
7609
7610 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7611 res == L2CAP_CR_SUCCESS) {
7612 char buf[128];
7613 set_bit(CONF_REQ_SENT, &chan->conf_state);
7614 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7615 L2CAP_CONF_REQ,
7616 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7617 buf);
7618 chan->num_conf_req++;
7619 }
7620 }
7621
7622 l2cap_chan_unlock(chan);
7623 }
7624
7625 mutex_unlock(&conn->chan_lock);
7626}
7627
7628void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7629{
7630 struct l2cap_conn *conn = hcon->l2cap_data;
7631 struct l2cap_hdr *hdr;
7632 int len;
7633
7634 /* For AMP controller do not create l2cap conn */
7635 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7636 goto drop;
7637
7638 if (!conn)
7639 conn = l2cap_conn_add(hcon);
7640
7641 if (!conn)
7642 goto drop;
7643
7644 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7645
7646 switch (flags) {
7647 case ACL_START:
7648 case ACL_START_NO_FLUSH:
7649 case ACL_COMPLETE:
7650 if (conn->rx_len) {
7651 BT_ERR("Unexpected start frame (len %d)", skb->len);
7652 kfree_skb(conn->rx_skb);
7653 conn->rx_skb = NULL;
7654 conn->rx_len = 0;
7655 l2cap_conn_unreliable(conn, ECOMM);
7656 }
7657
7658 /* Start fragment always begin with Basic L2CAP header */
7659 if (skb->len < L2CAP_HDR_SIZE) {
7660 BT_ERR("Frame is too short (len %d)", skb->len);
7661 l2cap_conn_unreliable(conn, ECOMM);
7662 goto drop;
7663 }
7664
7665 hdr = (struct l2cap_hdr *) skb->data;
7666 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7667
7668 if (len == skb->len) {
7669 /* Complete frame received */
7670 l2cap_recv_frame(conn, skb);
7671 return;
7672 }
7673
7674 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7675
7676 if (skb->len > len) {
7677 BT_ERR("Frame is too long (len %d, expected len %d)",
7678 skb->len, len);
7679 l2cap_conn_unreliable(conn, ECOMM);
7680 goto drop;
7681 }
7682
7683 /* Allocate skb for the complete frame (with header) */
7684 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7685 if (!conn->rx_skb)
7686 goto drop;
7687
7688 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7689 skb->len);
7690 conn->rx_len = len - skb->len;
7691 break;
7692
7693 case ACL_CONT:
7694 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7695
7696 if (!conn->rx_len) {
7697 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7698 l2cap_conn_unreliable(conn, ECOMM);
7699 goto drop;
7700 }
7701
7702 if (skb->len > conn->rx_len) {
7703 BT_ERR("Fragment is too long (len %d, expected %d)",
7704 skb->len, conn->rx_len);
7705 kfree_skb(conn->rx_skb);
7706 conn->rx_skb = NULL;
7707 conn->rx_len = 0;
7708 l2cap_conn_unreliable(conn, ECOMM);
7709 goto drop;
7710 }
7711
7712 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7713 skb->len);
7714 conn->rx_len -= skb->len;
7715
7716 if (!conn->rx_len) {
7717 /* Complete frame received. l2cap_recv_frame
7718 * takes ownership of the skb so set the global
7719 * rx_skb pointer to NULL first.
7720 */
7721 struct sk_buff *rx_skb = conn->rx_skb;
7722 conn->rx_skb = NULL;
7723 l2cap_recv_frame(conn, rx_skb);
7724 }
7725 break;
7726 }
7727
7728drop:
7729 kfree_skb(skb);
7730}
7731
7732static struct hci_cb l2cap_cb = {
7733 .name = "L2CAP",
7734 .connect_cfm = l2cap_connect_cfm,
7735 .disconn_cfm = l2cap_disconn_cfm,
7736 .security_cfm = l2cap_security_cfm,
7737};
7738
7739static int l2cap_debugfs_show(struct seq_file *f, void *p)
7740{
7741 struct l2cap_chan *c;
7742
7743 read_lock(&chan_list_lock);
7744
7745 list_for_each_entry(c, &chan_list, global_l) {
7746 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7747 &c->src, c->src_type, &c->dst, c->dst_type,
7748 c->state, __le16_to_cpu(c->psm),
7749 c->scid, c->dcid, c->imtu, c->omtu,
7750 c->sec_level, c->mode);
7751 }
7752
7753 read_unlock(&chan_list_lock);
7754
7755 return 0;
7756}
7757
7758DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7759
7760static struct dentry *l2cap_debugfs;
7761
7762int __init l2cap_init(void)
7763{
7764 int err;
7765
7766 err = l2cap_init_sockets();
7767 if (err < 0)
7768 return err;
7769
7770 hci_register_cb(&l2cap_cb);
7771
7772 if (IS_ERR_OR_NULL(bt_debugfs))
7773 return 0;
7774
7775 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7776 NULL, &l2cap_debugfs_fops);
7777
7778 return 0;
7779}
7780
7781void l2cap_exit(void)
7782{
7783 debugfs_remove(l2cap_debugfs);
7784 hci_unregister_cb(&l2cap_cb);
7785 l2cap_cleanup_sockets();
7786}
7787
7788module_param(disable_ertm, bool, 0644);
7789MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");