2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
| L2CAP_FEAT_UCD
;
50 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
| L2CAP_FC_CONNLESS
, };
52 static LIST_HEAD(chan_list
);
53 static DEFINE_RWLOCK(chan_list_lock
);
55 static u16 le_max_credits
= L2CAP_LE_MAX_CREDITS
;
56 static u16 le_default_mps
= L2CAP_LE_DEFAULT_MPS
;
58 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
59 u8 code
, u8 ident
, u16 dlen
, void *data
);
60 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
62 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
63 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
65 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
66 struct sk_buff_head
*skbs
, u8 event
);
68 static inline __u8
bdaddr_type(struct hci_conn
*hcon
, __u8 type
)
70 if (hcon
->type
== LE_LINK
) {
71 if (type
== ADDR_LE_DEV_PUBLIC
)
72 return BDADDR_LE_PUBLIC
;
74 return BDADDR_LE_RANDOM
;
80 /* ---- L2CAP channels ---- */
82 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
87 list_for_each_entry(c
, &conn
->chan_l
, list
) {
94 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
99 list_for_each_entry(c
, &conn
->chan_l
, list
) {
106 /* Find channel with given SCID.
107 * Returns locked channel. */
108 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
111 struct l2cap_chan
*c
;
113 mutex_lock(&conn
->chan_lock
);
114 c
= __l2cap_get_chan_by_scid(conn
, cid
);
117 mutex_unlock(&conn
->chan_lock
);
122 /* Find channel with given DCID.
123 * Returns locked channel.
125 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
128 struct l2cap_chan
*c
;
130 mutex_lock(&conn
->chan_lock
);
131 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
134 mutex_unlock(&conn
->chan_lock
);
139 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
142 struct l2cap_chan
*c
;
144 list_for_each_entry(c
, &conn
->chan_l
, list
) {
145 if (c
->ident
== ident
)
151 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
154 struct l2cap_chan
*c
;
156 mutex_lock(&conn
->chan_lock
);
157 c
= __l2cap_get_chan_by_ident(conn
, ident
);
160 mutex_unlock(&conn
->chan_lock
);
165 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
167 struct l2cap_chan
*c
;
169 list_for_each_entry(c
, &chan_list
, global_l
) {
170 if (c
->sport
== psm
&& !bacmp(&c
->src
, src
))
176 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
180 write_lock(&chan_list_lock
);
182 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
195 for (p
= 0x1001; p
< 0x1100; p
+= 2)
196 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
197 chan
->psm
= cpu_to_le16(p
);
198 chan
->sport
= cpu_to_le16(p
);
205 write_unlock(&chan_list_lock
);
209 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
211 write_lock(&chan_list_lock
);
215 write_unlock(&chan_list_lock
);
220 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
224 if (conn
->hcon
->type
== LE_LINK
)
225 dyn_end
= L2CAP_CID_LE_DYN_END
;
227 dyn_end
= L2CAP_CID_DYN_END
;
229 for (cid
= L2CAP_CID_DYN_START
; cid
< dyn_end
; cid
++) {
230 if (!__l2cap_get_chan_by_scid(conn
, cid
))
237 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
239 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
240 state_to_string(state
));
243 chan
->ops
->state_change(chan
, state
, 0);
246 static inline void l2cap_state_change_and_error(struct l2cap_chan
*chan
,
250 chan
->ops
->state_change(chan
, chan
->state
, err
);
253 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
255 chan
->ops
->state_change(chan
, chan
->state
, err
);
258 static void __set_retrans_timer(struct l2cap_chan
*chan
)
260 if (!delayed_work_pending(&chan
->monitor_timer
) &&
261 chan
->retrans_timeout
) {
262 l2cap_set_timer(chan
, &chan
->retrans_timer
,
263 msecs_to_jiffies(chan
->retrans_timeout
));
267 static void __set_monitor_timer(struct l2cap_chan
*chan
)
269 __clear_retrans_timer(chan
);
270 if (chan
->monitor_timeout
) {
271 l2cap_set_timer(chan
, &chan
->monitor_timer
,
272 msecs_to_jiffies(chan
->monitor_timeout
));
276 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
281 skb_queue_walk(head
, skb
) {
282 if (bt_cb(skb
)->control
.txseq
== seq
)
289 /* ---- L2CAP sequence number lists ---- */
291 /* For ERTM, ordered lists of sequence numbers must be tracked for
292 * SREJ requests that are received and for frames that are to be
293 * retransmitted. These seq_list functions implement a singly-linked
294 * list in an array, where membership in the list can also be checked
295 * in constant time. Items can also be added to the tail of the list
296 * and removed from the head in constant time, without further memory
300 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
302 size_t alloc_size
, i
;
304 /* Allocated size is a power of 2 to map sequence numbers
305 * (which may be up to 14 bits) in to a smaller array that is
306 * sized for the negotiated ERTM transmit windows.
308 alloc_size
= roundup_pow_of_two(size
);
310 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
314 seq_list
->mask
= alloc_size
- 1;
315 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
316 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
317 for (i
= 0; i
< alloc_size
; i
++)
318 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
323 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
325 kfree(seq_list
->list
);
328 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
331 /* Constant-time check for list membership */
332 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
335 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
337 u16 seq
= seq_list
->head
;
338 u16 mask
= seq_list
->mask
;
340 seq_list
->head
= seq_list
->list
[seq
& mask
];
341 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
343 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
344 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
345 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
351 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
355 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
358 for (i
= 0; i
<= seq_list
->mask
; i
++)
359 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
361 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
362 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
365 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
367 u16 mask
= seq_list
->mask
;
369 /* All appends happen in constant time */
371 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
374 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
375 seq_list
->head
= seq
;
377 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
379 seq_list
->tail
= seq
;
380 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
383 static void l2cap_chan_timeout(struct work_struct
*work
)
385 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
387 struct l2cap_conn
*conn
= chan
->conn
;
390 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
392 mutex_lock(&conn
->chan_lock
);
393 l2cap_chan_lock(chan
);
395 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
396 reason
= ECONNREFUSED
;
397 else if (chan
->state
== BT_CONNECT
&&
398 chan
->sec_level
!= BT_SECURITY_SDP
)
399 reason
= ECONNREFUSED
;
403 l2cap_chan_close(chan
, reason
);
405 l2cap_chan_unlock(chan
);
407 chan
->ops
->close(chan
);
408 mutex_unlock(&conn
->chan_lock
);
410 l2cap_chan_put(chan
);
413 struct l2cap_chan
*l2cap_chan_create(void)
415 struct l2cap_chan
*chan
;
417 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
421 mutex_init(&chan
->lock
);
423 write_lock(&chan_list_lock
);
424 list_add(&chan
->global_l
, &chan_list
);
425 write_unlock(&chan_list_lock
);
427 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
429 chan
->state
= BT_OPEN
;
431 kref_init(&chan
->kref
);
433 /* This flag is cleared in l2cap_chan_ready() */
434 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
436 BT_DBG("chan %p", chan
);
441 static void l2cap_chan_destroy(struct kref
*kref
)
443 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
445 BT_DBG("chan %p", chan
);
447 write_lock(&chan_list_lock
);
448 list_del(&chan
->global_l
);
449 write_unlock(&chan_list_lock
);
454 void l2cap_chan_hold(struct l2cap_chan
*c
)
456 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
461 void l2cap_chan_put(struct l2cap_chan
*c
)
463 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
465 kref_put(&c
->kref
, l2cap_chan_destroy
);
468 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
470 chan
->fcs
= L2CAP_FCS_CRC16
;
471 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
472 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
473 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
474 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
475 chan
->sec_level
= BT_SECURITY_LOW
;
477 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
480 static void l2cap_le_flowctl_init(struct l2cap_chan
*chan
)
483 chan
->sdu_last_frag
= NULL
;
485 chan
->tx_credits
= 0;
486 chan
->rx_credits
= le_max_credits
;
487 chan
->mps
= min_t(u16
, chan
->imtu
, le_default_mps
);
489 skb_queue_head_init(&chan
->tx_q
);
492 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
494 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
495 __le16_to_cpu(chan
->psm
), chan
->dcid
);
497 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
501 switch (chan
->chan_type
) {
502 case L2CAP_CHAN_CONN_ORIENTED
:
503 /* Alloc CID for connection-oriented socket */
504 chan
->scid
= l2cap_alloc_cid(conn
);
505 if (conn
->hcon
->type
== ACL_LINK
)
506 chan
->omtu
= L2CAP_DEFAULT_MTU
;
509 case L2CAP_CHAN_CONN_LESS
:
510 /* Connectionless socket */
511 chan
->scid
= L2CAP_CID_CONN_LESS
;
512 chan
->dcid
= L2CAP_CID_CONN_LESS
;
513 chan
->omtu
= L2CAP_DEFAULT_MTU
;
516 case L2CAP_CHAN_FIXED
:
517 /* Caller will set CID and CID specific MTU values */
521 /* Raw socket can send/recv signalling messages only */
522 chan
->scid
= L2CAP_CID_SIGNALING
;
523 chan
->dcid
= L2CAP_CID_SIGNALING
;
524 chan
->omtu
= L2CAP_DEFAULT_MTU
;
527 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
528 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
529 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
530 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
531 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
532 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
534 l2cap_chan_hold(chan
);
536 hci_conn_hold(conn
->hcon
);
538 list_add(&chan
->list
, &conn
->chan_l
);
541 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
543 mutex_lock(&conn
->chan_lock
);
544 __l2cap_chan_add(conn
, chan
);
545 mutex_unlock(&conn
->chan_lock
);
548 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
550 struct l2cap_conn
*conn
= chan
->conn
;
552 __clear_chan_timer(chan
);
554 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
557 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
558 /* Delete from channel list */
559 list_del(&chan
->list
);
561 l2cap_chan_put(chan
);
565 if (chan
->scid
!= L2CAP_CID_A2MP
)
566 hci_conn_drop(conn
->hcon
);
568 if (mgr
&& mgr
->bredr_chan
== chan
)
569 mgr
->bredr_chan
= NULL
;
572 if (chan
->hs_hchan
) {
573 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
575 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
576 amp_disconnect_logical_link(hs_hchan
);
579 chan
->ops
->teardown(chan
, err
);
581 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
585 case L2CAP_MODE_BASIC
:
588 case L2CAP_MODE_LE_FLOWCTL
:
589 skb_queue_purge(&chan
->tx_q
);
592 case L2CAP_MODE_ERTM
:
593 __clear_retrans_timer(chan
);
594 __clear_monitor_timer(chan
);
595 __clear_ack_timer(chan
);
597 skb_queue_purge(&chan
->srej_q
);
599 l2cap_seq_list_free(&chan
->srej_list
);
600 l2cap_seq_list_free(&chan
->retrans_list
);
604 case L2CAP_MODE_STREAMING
:
605 skb_queue_purge(&chan
->tx_q
);
612 void l2cap_conn_update_id_addr(struct hci_conn
*hcon
)
614 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
615 struct l2cap_chan
*chan
;
617 mutex_lock(&conn
->chan_lock
);
619 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
620 l2cap_chan_lock(chan
);
621 bacpy(&chan
->dst
, &hcon
->dst
);
622 chan
->dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
623 l2cap_chan_unlock(chan
);
626 mutex_unlock(&conn
->chan_lock
);
629 static void l2cap_chan_le_connect_reject(struct l2cap_chan
*chan
)
631 struct l2cap_conn
*conn
= chan
->conn
;
632 struct l2cap_le_conn_rsp rsp
;
635 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
636 result
= L2CAP_CR_AUTHORIZATION
;
638 result
= L2CAP_CR_BAD_PSM
;
640 l2cap_state_change(chan
, BT_DISCONN
);
642 rsp
.dcid
= cpu_to_le16(chan
->scid
);
643 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
644 rsp
.mps
= cpu_to_le16(chan
->mps
);
645 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
646 rsp
.result
= cpu_to_le16(result
);
648 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
652 static void l2cap_chan_connect_reject(struct l2cap_chan
*chan
)
654 struct l2cap_conn
*conn
= chan
->conn
;
655 struct l2cap_conn_rsp rsp
;
658 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
659 result
= L2CAP_CR_SEC_BLOCK
;
661 result
= L2CAP_CR_BAD_PSM
;
663 l2cap_state_change(chan
, BT_DISCONN
);
665 rsp
.scid
= cpu_to_le16(chan
->dcid
);
666 rsp
.dcid
= cpu_to_le16(chan
->scid
);
667 rsp
.result
= cpu_to_le16(result
);
668 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
670 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
673 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
675 struct l2cap_conn
*conn
= chan
->conn
;
677 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
679 switch (chan
->state
) {
681 chan
->ops
->teardown(chan
, 0);
686 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
687 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
688 l2cap_send_disconn_req(chan
, reason
);
690 l2cap_chan_del(chan
, reason
);
694 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
695 if (conn
->hcon
->type
== ACL_LINK
)
696 l2cap_chan_connect_reject(chan
);
697 else if (conn
->hcon
->type
== LE_LINK
)
698 l2cap_chan_le_connect_reject(chan
);
701 l2cap_chan_del(chan
, reason
);
706 l2cap_chan_del(chan
, reason
);
710 chan
->ops
->teardown(chan
, 0);
715 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
717 switch (chan
->chan_type
) {
719 switch (chan
->sec_level
) {
720 case BT_SECURITY_HIGH
:
721 case BT_SECURITY_FIPS
:
722 return HCI_AT_DEDICATED_BONDING_MITM
;
723 case BT_SECURITY_MEDIUM
:
724 return HCI_AT_DEDICATED_BONDING
;
726 return HCI_AT_NO_BONDING
;
729 case L2CAP_CHAN_CONN_LESS
:
730 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_3DSP
)) {
731 if (chan
->sec_level
== BT_SECURITY_LOW
)
732 chan
->sec_level
= BT_SECURITY_SDP
;
734 if (chan
->sec_level
== BT_SECURITY_HIGH
||
735 chan
->sec_level
== BT_SECURITY_FIPS
)
736 return HCI_AT_NO_BONDING_MITM
;
738 return HCI_AT_NO_BONDING
;
740 case L2CAP_CHAN_CONN_ORIENTED
:
741 if (chan
->psm
== cpu_to_le16(L2CAP_PSM_SDP
)) {
742 if (chan
->sec_level
== BT_SECURITY_LOW
)
743 chan
->sec_level
= BT_SECURITY_SDP
;
745 if (chan
->sec_level
== BT_SECURITY_HIGH
||
746 chan
->sec_level
== BT_SECURITY_FIPS
)
747 return HCI_AT_NO_BONDING_MITM
;
749 return HCI_AT_NO_BONDING
;
753 switch (chan
->sec_level
) {
754 case BT_SECURITY_HIGH
:
755 case BT_SECURITY_FIPS
:
756 return HCI_AT_GENERAL_BONDING_MITM
;
757 case BT_SECURITY_MEDIUM
:
758 return HCI_AT_GENERAL_BONDING
;
760 return HCI_AT_NO_BONDING
;
766 /* Service level security */
767 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
769 struct l2cap_conn
*conn
= chan
->conn
;
772 if (conn
->hcon
->type
== LE_LINK
)
773 return smp_conn_security(conn
->hcon
, chan
->sec_level
);
775 auth_type
= l2cap_get_auth_type(chan
);
777 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
780 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
784 /* Get next available identificator.
785 * 1 - 128 are used by kernel.
786 * 129 - 199 are reserved.
787 * 200 - 254 are used by utilities like l2ping, etc.
790 spin_lock(&conn
->lock
);
792 if (++conn
->tx_ident
> 128)
797 spin_unlock(&conn
->lock
);
802 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
805 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
808 BT_DBG("code 0x%2.2x", code
);
813 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
814 flags
= ACL_START_NO_FLUSH
;
818 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
819 skb
->priority
= HCI_PRIO_MAX
;
821 hci_send_acl(conn
->hchan
, skb
, flags
);
824 static bool __chan_is_moving(struct l2cap_chan
*chan
)
826 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
827 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
830 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
832 struct hci_conn
*hcon
= chan
->conn
->hcon
;
835 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
838 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
840 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
847 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
848 lmp_no_flush_capable(hcon
->hdev
))
849 flags
= ACL_START_NO_FLUSH
;
853 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
854 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
857 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
859 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
860 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
862 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
865 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
866 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
873 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
874 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
881 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
883 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
884 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
886 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
889 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
890 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
897 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
898 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
905 static inline void __unpack_control(struct l2cap_chan
*chan
,
908 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
909 __unpack_extended_control(get_unaligned_le32(skb
->data
),
910 &bt_cb(skb
)->control
);
911 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
913 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
914 &bt_cb(skb
)->control
);
915 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
919 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
923 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
924 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
926 if (control
->sframe
) {
927 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
928 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
929 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
931 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
932 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
938 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
942 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
943 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
945 if (control
->sframe
) {
946 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
947 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
948 packed
|= L2CAP_CTRL_FRAME_TYPE
;
950 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
951 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
957 static inline void __pack_control(struct l2cap_chan
*chan
,
958 struct l2cap_ctrl
*control
,
961 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
962 put_unaligned_le32(__pack_extended_control(control
),
963 skb
->data
+ L2CAP_HDR_SIZE
);
965 put_unaligned_le16(__pack_enhanced_control(control
),
966 skb
->data
+ L2CAP_HDR_SIZE
);
970 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
972 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
973 return L2CAP_EXT_HDR_SIZE
;
975 return L2CAP_ENH_HDR_SIZE
;
978 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
982 struct l2cap_hdr
*lh
;
983 int hlen
= __ertm_hdr_size(chan
);
985 if (chan
->fcs
== L2CAP_FCS_CRC16
)
986 hlen
+= L2CAP_FCS_SIZE
;
988 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
991 return ERR_PTR(-ENOMEM
);
993 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
994 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
995 lh
->cid
= cpu_to_le16(chan
->dcid
);
997 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
998 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
1000 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
1002 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1003 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
1004 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1007 skb
->priority
= HCI_PRIO_MAX
;
1011 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
1012 struct l2cap_ctrl
*control
)
1014 struct sk_buff
*skb
;
1017 BT_DBG("chan %p, control %p", chan
, control
);
1019 if (!control
->sframe
)
1022 if (__chan_is_moving(chan
))
1025 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
1029 if (control
->super
== L2CAP_SUPER_RR
)
1030 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1031 else if (control
->super
== L2CAP_SUPER_RNR
)
1032 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1034 if (control
->super
!= L2CAP_SUPER_SREJ
) {
1035 chan
->last_acked_seq
= control
->reqseq
;
1036 __clear_ack_timer(chan
);
1039 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
1040 control
->final
, control
->poll
, control
->super
);
1042 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1043 control_field
= __pack_extended_control(control
);
1045 control_field
= __pack_enhanced_control(control
);
1047 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
1049 l2cap_do_send(chan
, skb
);
1052 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
1054 struct l2cap_ctrl control
;
1056 BT_DBG("chan %p, poll %d", chan
, poll
);
1058 memset(&control
, 0, sizeof(control
));
1060 control
.poll
= poll
;
1062 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
1063 control
.super
= L2CAP_SUPER_RNR
;
1065 control
.super
= L2CAP_SUPER_RR
;
1067 control
.reqseq
= chan
->buffer_seq
;
1068 l2cap_send_sframe(chan
, &control
);
1071 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1073 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1076 static bool __amp_capable(struct l2cap_chan
*chan
)
1078 struct l2cap_conn
*conn
= chan
->conn
;
1079 struct hci_dev
*hdev
;
1080 bool amp_available
= false;
1082 if (!conn
->hs_enabled
)
1085 if (!(conn
->fixed_chan_mask
& L2CAP_FC_A2MP
))
1088 read_lock(&hci_dev_list_lock
);
1089 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1090 if (hdev
->amp_type
!= AMP_TYPE_BREDR
&&
1091 test_bit(HCI_UP
, &hdev
->flags
)) {
1092 amp_available
= true;
1096 read_unlock(&hci_dev_list_lock
);
1098 if (chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
)
1099 return amp_available
;
1104 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1106 /* Check EFS parameters */
1110 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1112 struct l2cap_conn
*conn
= chan
->conn
;
1113 struct l2cap_conn_req req
;
1115 req
.scid
= cpu_to_le16(chan
->scid
);
1116 req
.psm
= chan
->psm
;
1118 chan
->ident
= l2cap_get_ident(conn
);
1120 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1122 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1125 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1127 struct l2cap_create_chan_req req
;
1128 req
.scid
= cpu_to_le16(chan
->scid
);
1129 req
.psm
= chan
->psm
;
1130 req
.amp_id
= amp_id
;
1132 chan
->ident
= l2cap_get_ident(chan
->conn
);
1134 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1138 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1140 struct sk_buff
*skb
;
1142 BT_DBG("chan %p", chan
);
1144 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1147 __clear_retrans_timer(chan
);
1148 __clear_monitor_timer(chan
);
1149 __clear_ack_timer(chan
);
1151 chan
->retry_count
= 0;
1152 skb_queue_walk(&chan
->tx_q
, skb
) {
1153 if (bt_cb(skb
)->control
.retries
)
1154 bt_cb(skb
)->control
.retries
= 1;
1159 chan
->expected_tx_seq
= chan
->buffer_seq
;
1161 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1162 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1163 l2cap_seq_list_clear(&chan
->retrans_list
);
1164 l2cap_seq_list_clear(&chan
->srej_list
);
1165 skb_queue_purge(&chan
->srej_q
);
1167 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1168 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1170 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1173 static void l2cap_move_done(struct l2cap_chan
*chan
)
1175 u8 move_role
= chan
->move_role
;
1176 BT_DBG("chan %p", chan
);
1178 chan
->move_state
= L2CAP_MOVE_STABLE
;
1179 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1181 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1184 switch (move_role
) {
1185 case L2CAP_MOVE_ROLE_INITIATOR
:
1186 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1187 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1189 case L2CAP_MOVE_ROLE_RESPONDER
:
1190 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1195 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1197 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1198 chan
->conf_state
= 0;
1199 __clear_chan_timer(chan
);
1201 if (chan
->mode
== L2CAP_MODE_LE_FLOWCTL
&& !chan
->tx_credits
)
1202 chan
->ops
->suspend(chan
);
1204 chan
->state
= BT_CONNECTED
;
1206 chan
->ops
->ready(chan
);
1209 static void l2cap_le_connect(struct l2cap_chan
*chan
)
1211 struct l2cap_conn
*conn
= chan
->conn
;
1212 struct l2cap_le_conn_req req
;
1214 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT
, &chan
->flags
))
1217 req
.psm
= chan
->psm
;
1218 req
.scid
= cpu_to_le16(chan
->scid
);
1219 req
.mtu
= cpu_to_le16(chan
->imtu
);
1220 req
.mps
= cpu_to_le16(chan
->mps
);
1221 req
.credits
= cpu_to_le16(chan
->rx_credits
);
1223 chan
->ident
= l2cap_get_ident(conn
);
1225 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_REQ
,
1229 static void l2cap_le_start(struct l2cap_chan
*chan
)
1231 struct l2cap_conn
*conn
= chan
->conn
;
1233 if (!smp_conn_security(conn
->hcon
, chan
->sec_level
))
1237 l2cap_chan_ready(chan
);
1241 if (chan
->state
== BT_CONNECT
)
1242 l2cap_le_connect(chan
);
1245 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1247 if (__amp_capable(chan
)) {
1248 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1249 a2mp_discover_amp(chan
);
1250 } else if (chan
->conn
->hcon
->type
== LE_LINK
) {
1251 l2cap_le_start(chan
);
1253 l2cap_send_conn_req(chan
);
1257 static void l2cap_do_start(struct l2cap_chan
*chan
)
1259 struct l2cap_conn
*conn
= chan
->conn
;
1261 if (conn
->hcon
->type
== LE_LINK
) {
1262 l2cap_le_start(chan
);
1266 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1267 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1270 if (l2cap_chan_check_security(chan
) &&
1271 __l2cap_no_conn_pending(chan
)) {
1272 l2cap_start_connection(chan
);
1275 struct l2cap_info_req req
;
1276 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1278 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1279 conn
->info_ident
= l2cap_get_ident(conn
);
1281 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1283 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1288 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1290 u32 local_feat_mask
= l2cap_feat_mask
;
1292 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1295 case L2CAP_MODE_ERTM
:
1296 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1297 case L2CAP_MODE_STREAMING
:
1298 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1304 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1306 struct l2cap_conn
*conn
= chan
->conn
;
1307 struct l2cap_disconn_req req
;
1312 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1313 __clear_retrans_timer(chan
);
1314 __clear_monitor_timer(chan
);
1315 __clear_ack_timer(chan
);
1318 if (chan
->scid
== L2CAP_CID_A2MP
) {
1319 l2cap_state_change(chan
, BT_DISCONN
);
1323 req
.dcid
= cpu_to_le16(chan
->dcid
);
1324 req
.scid
= cpu_to_le16(chan
->scid
);
1325 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1328 l2cap_state_change_and_error(chan
, BT_DISCONN
, err
);
1331 /* ---- L2CAP connections ---- */
1332 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1334 struct l2cap_chan
*chan
, *tmp
;
1336 BT_DBG("conn %p", conn
);
1338 mutex_lock(&conn
->chan_lock
);
1340 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1341 l2cap_chan_lock(chan
);
1343 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1344 l2cap_chan_unlock(chan
);
1348 if (chan
->state
== BT_CONNECT
) {
1349 if (!l2cap_chan_check_security(chan
) ||
1350 !__l2cap_no_conn_pending(chan
)) {
1351 l2cap_chan_unlock(chan
);
1355 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1356 && test_bit(CONF_STATE2_DEVICE
,
1357 &chan
->conf_state
)) {
1358 l2cap_chan_close(chan
, ECONNRESET
);
1359 l2cap_chan_unlock(chan
);
1363 l2cap_start_connection(chan
);
1365 } else if (chan
->state
== BT_CONNECT2
) {
1366 struct l2cap_conn_rsp rsp
;
1368 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1369 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1371 if (l2cap_chan_check_security(chan
)) {
1372 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
1373 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1374 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1375 chan
->ops
->defer(chan
);
1378 l2cap_state_change(chan
, BT_CONFIG
);
1379 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1380 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1383 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1384 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1387 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1390 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1391 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1392 l2cap_chan_unlock(chan
);
1396 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1397 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1398 l2cap_build_conf_req(chan
, buf
), buf
);
1399 chan
->num_conf_req
++;
1402 l2cap_chan_unlock(chan
);
1405 mutex_unlock(&conn
->chan_lock
);
1408 /* Find socket with cid and source/destination bdaddr.
1409 * Returns closest match, locked.
1411 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1415 struct l2cap_chan
*c
, *c1
= NULL
;
1417 read_lock(&chan_list_lock
);
1419 list_for_each_entry(c
, &chan_list
, global_l
) {
1420 if (state
&& c
->state
!= state
)
1423 if (c
->scid
== cid
) {
1424 int src_match
, dst_match
;
1425 int src_any
, dst_any
;
1428 src_match
= !bacmp(&c
->src
, src
);
1429 dst_match
= !bacmp(&c
->dst
, dst
);
1430 if (src_match
&& dst_match
) {
1431 read_unlock(&chan_list_lock
);
1436 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1437 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1438 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1439 (src_any
&& dst_any
))
1444 read_unlock(&chan_list_lock
);
1449 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1451 struct hci_conn
*hcon
= conn
->hcon
;
1452 struct l2cap_chan
*chan
, *pchan
;
1457 bt_6lowpan_add_conn(conn
);
1459 /* Check if we have socket listening on cid */
1460 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_ATT
,
1461 &hcon
->src
, &hcon
->dst
);
1465 /* Client ATT sockets should override the server one */
1466 if (__l2cap_get_chan_by_dcid(conn
, L2CAP_CID_ATT
))
1469 dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
1471 /* If device is blocked, do not create a channel for it */
1472 if (hci_blacklist_lookup(hcon
->hdev
, &hcon
->dst
, dst_type
))
1475 l2cap_chan_lock(pchan
);
1477 chan
= pchan
->ops
->new_connection(pchan
);
1481 bacpy(&chan
->src
, &hcon
->src
);
1482 bacpy(&chan
->dst
, &hcon
->dst
);
1483 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
1484 chan
->dst_type
= dst_type
;
1486 __l2cap_chan_add(conn
, chan
);
1489 l2cap_chan_unlock(pchan
);
1492 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1494 struct l2cap_chan
*chan
;
1495 struct hci_conn
*hcon
= conn
->hcon
;
1497 BT_DBG("conn %p", conn
);
1499 /* For outgoing pairing which doesn't necessarily have an
1500 * associated socket (e.g. mgmt_pair_device).
1502 if (hcon
->out
&& hcon
->type
== LE_LINK
)
1503 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1505 mutex_lock(&conn
->chan_lock
);
1507 if (hcon
->type
== LE_LINK
)
1508 l2cap_le_conn_ready(conn
);
1510 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1512 l2cap_chan_lock(chan
);
1514 if (chan
->scid
== L2CAP_CID_A2MP
) {
1515 l2cap_chan_unlock(chan
);
1519 if (hcon
->type
== LE_LINK
) {
1520 l2cap_le_start(chan
);
1521 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1522 l2cap_chan_ready(chan
);
1524 } else if (chan
->state
== BT_CONNECT
) {
1525 l2cap_do_start(chan
);
1528 l2cap_chan_unlock(chan
);
1531 mutex_unlock(&conn
->chan_lock
);
1533 queue_work(hcon
->hdev
->workqueue
, &conn
->pending_rx_work
);
1536 /* Notify sockets that we cannot guaranty reliability anymore */
1537 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1539 struct l2cap_chan
*chan
;
1541 BT_DBG("conn %p", conn
);
1543 mutex_lock(&conn
->chan_lock
);
1545 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1546 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1547 l2cap_chan_set_err(chan
, err
);
1550 mutex_unlock(&conn
->chan_lock
);
1553 static void l2cap_info_timeout(struct work_struct
*work
)
1555 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1558 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1559 conn
->info_ident
= 0;
1561 l2cap_conn_start(conn
);
1566 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1567 * callback is called during registration. The ->remove callback is called
1568 * during unregistration.
1569 * An l2cap_user object can either be explicitly unregistered or when the
1570 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1571 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1572 * External modules must own a reference to the l2cap_conn object if they intend
1573 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1574 * any time if they don't.
1577 int l2cap_register_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1579 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1582 /* We need to check whether l2cap_conn is registered. If it is not, we
1583 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1584 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1585 * relies on the parent hci_conn object to be locked. This itself relies
1586 * on the hci_dev object to be locked. So we must lock the hci device
1591 if (user
->list
.next
|| user
->list
.prev
) {
1596 /* conn->hchan is NULL after l2cap_conn_del() was called */
1602 ret
= user
->probe(conn
, user
);
1606 list_add(&user
->list
, &conn
->users
);
1610 hci_dev_unlock(hdev
);
1613 EXPORT_SYMBOL(l2cap_register_user
);
1615 void l2cap_unregister_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1617 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1621 if (!user
->list
.next
|| !user
->list
.prev
)
1624 list_del(&user
->list
);
1625 user
->list
.next
= NULL
;
1626 user
->list
.prev
= NULL
;
1627 user
->remove(conn
, user
);
1630 hci_dev_unlock(hdev
);
1632 EXPORT_SYMBOL(l2cap_unregister_user
);
1634 static void l2cap_unregister_all_users(struct l2cap_conn
*conn
)
1636 struct l2cap_user
*user
;
1638 while (!list_empty(&conn
->users
)) {
1639 user
= list_first_entry(&conn
->users
, struct l2cap_user
, list
);
1640 list_del(&user
->list
);
1641 user
->list
.next
= NULL
;
1642 user
->list
.prev
= NULL
;
1643 user
->remove(conn
, user
);
1647 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1649 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1650 struct l2cap_chan
*chan
, *l
;
1655 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1657 kfree_skb(conn
->rx_skb
);
1659 skb_queue_purge(&conn
->pending_rx
);
1660 flush_work(&conn
->pending_rx_work
);
1662 l2cap_unregister_all_users(conn
);
1664 mutex_lock(&conn
->chan_lock
);
1667 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1668 l2cap_chan_hold(chan
);
1669 l2cap_chan_lock(chan
);
1671 l2cap_chan_del(chan
, err
);
1673 l2cap_chan_unlock(chan
);
1675 chan
->ops
->close(chan
);
1676 l2cap_chan_put(chan
);
1679 mutex_unlock(&conn
->chan_lock
);
1681 hci_chan_del(conn
->hchan
);
1683 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1684 cancel_delayed_work_sync(&conn
->info_timer
);
1686 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1687 cancel_delayed_work_sync(&conn
->security_timer
);
1688 smp_chan_destroy(conn
);
1691 hcon
->l2cap_data
= NULL
;
1693 l2cap_conn_put(conn
);
1696 static void security_timeout(struct work_struct
*work
)
1698 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1699 security_timer
.work
);
1701 BT_DBG("conn %p", conn
);
1703 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1704 smp_chan_destroy(conn
);
1705 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1709 static void l2cap_conn_free(struct kref
*ref
)
1711 struct l2cap_conn
*conn
= container_of(ref
, struct l2cap_conn
, ref
);
1713 hci_conn_put(conn
->hcon
);
1717 void l2cap_conn_get(struct l2cap_conn
*conn
)
1719 kref_get(&conn
->ref
);
1721 EXPORT_SYMBOL(l2cap_conn_get
);
1723 void l2cap_conn_put(struct l2cap_conn
*conn
)
1725 kref_put(&conn
->ref
, l2cap_conn_free
);
1727 EXPORT_SYMBOL(l2cap_conn_put
);
1729 /* ---- Socket interface ---- */
1731 /* Find socket with psm and source / destination bdaddr.
1732 * Returns closest match.
1734 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1739 struct l2cap_chan
*c
, *c1
= NULL
;
1741 read_lock(&chan_list_lock
);
1743 list_for_each_entry(c
, &chan_list
, global_l
) {
1744 if (state
&& c
->state
!= state
)
1747 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
1750 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
1753 if (c
->psm
== psm
) {
1754 int src_match
, dst_match
;
1755 int src_any
, dst_any
;
1758 src_match
= !bacmp(&c
->src
, src
);
1759 dst_match
= !bacmp(&c
->dst
, dst
);
1760 if (src_match
&& dst_match
) {
1761 read_unlock(&chan_list_lock
);
1766 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1767 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1768 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1769 (src_any
&& dst_any
))
1774 read_unlock(&chan_list_lock
);
1779 static void l2cap_monitor_timeout(struct work_struct
*work
)
1781 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1782 monitor_timer
.work
);
1784 BT_DBG("chan %p", chan
);
1786 l2cap_chan_lock(chan
);
1789 l2cap_chan_unlock(chan
);
1790 l2cap_chan_put(chan
);
1794 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1796 l2cap_chan_unlock(chan
);
1797 l2cap_chan_put(chan
);
1800 static void l2cap_retrans_timeout(struct work_struct
*work
)
1802 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1803 retrans_timer
.work
);
1805 BT_DBG("chan %p", chan
);
1807 l2cap_chan_lock(chan
);
1810 l2cap_chan_unlock(chan
);
1811 l2cap_chan_put(chan
);
1815 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1816 l2cap_chan_unlock(chan
);
1817 l2cap_chan_put(chan
);
1820 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1821 struct sk_buff_head
*skbs
)
1823 struct sk_buff
*skb
;
1824 struct l2cap_ctrl
*control
;
1826 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1828 if (__chan_is_moving(chan
))
1831 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1833 while (!skb_queue_empty(&chan
->tx_q
)) {
1835 skb
= skb_dequeue(&chan
->tx_q
);
1837 bt_cb(skb
)->control
.retries
= 1;
1838 control
= &bt_cb(skb
)->control
;
1840 control
->reqseq
= 0;
1841 control
->txseq
= chan
->next_tx_seq
;
1843 __pack_control(chan
, control
, skb
);
1845 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1846 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1847 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1850 l2cap_do_send(chan
, skb
);
1852 BT_DBG("Sent txseq %u", control
->txseq
);
1854 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1855 chan
->frames_sent
++;
1859 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1861 struct sk_buff
*skb
, *tx_skb
;
1862 struct l2cap_ctrl
*control
;
1865 BT_DBG("chan %p", chan
);
1867 if (chan
->state
!= BT_CONNECTED
)
1870 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1873 if (__chan_is_moving(chan
))
1876 while (chan
->tx_send_head
&&
1877 chan
->unacked_frames
< chan
->remote_tx_win
&&
1878 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1880 skb
= chan
->tx_send_head
;
1882 bt_cb(skb
)->control
.retries
= 1;
1883 control
= &bt_cb(skb
)->control
;
1885 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1888 control
->reqseq
= chan
->buffer_seq
;
1889 chan
->last_acked_seq
= chan
->buffer_seq
;
1890 control
->txseq
= chan
->next_tx_seq
;
1892 __pack_control(chan
, control
, skb
);
1894 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1895 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1896 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1899 /* Clone after data has been modified. Data is assumed to be
1900 read-only (for locking purposes) on cloned sk_buffs.
1902 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1907 __set_retrans_timer(chan
);
1909 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1910 chan
->unacked_frames
++;
1911 chan
->frames_sent
++;
1914 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1915 chan
->tx_send_head
= NULL
;
1917 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1919 l2cap_do_send(chan
, tx_skb
);
1920 BT_DBG("Sent txseq %u", control
->txseq
);
1923 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1924 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1929 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1931 struct l2cap_ctrl control
;
1932 struct sk_buff
*skb
;
1933 struct sk_buff
*tx_skb
;
1936 BT_DBG("chan %p", chan
);
1938 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1941 if (__chan_is_moving(chan
))
1944 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1945 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1947 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1949 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1954 bt_cb(skb
)->control
.retries
++;
1955 control
= bt_cb(skb
)->control
;
1957 if (chan
->max_tx
!= 0 &&
1958 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1959 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1960 l2cap_send_disconn_req(chan
, ECONNRESET
);
1961 l2cap_seq_list_clear(&chan
->retrans_list
);
1965 control
.reqseq
= chan
->buffer_seq
;
1966 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1971 if (skb_cloned(skb
)) {
1972 /* Cloned sk_buffs are read-only, so we need a
1975 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
1977 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1981 l2cap_seq_list_clear(&chan
->retrans_list
);
1985 /* Update skb contents */
1986 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1987 put_unaligned_le32(__pack_extended_control(&control
),
1988 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1990 put_unaligned_le16(__pack_enhanced_control(&control
),
1991 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1994 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1995 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
1996 put_unaligned_le16(fcs
, skb_put(tx_skb
,
2000 l2cap_do_send(chan
, tx_skb
);
2002 BT_DBG("Resent txseq %d", control
.txseq
);
2004 chan
->last_acked_seq
= chan
->buffer_seq
;
2008 static void l2cap_retransmit(struct l2cap_chan
*chan
,
2009 struct l2cap_ctrl
*control
)
2011 BT_DBG("chan %p, control %p", chan
, control
);
2013 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2014 l2cap_ertm_resend(chan
);
2017 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2018 struct l2cap_ctrl
*control
)
2020 struct sk_buff
*skb
;
2022 BT_DBG("chan %p, control %p", chan
, control
);
2025 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2027 l2cap_seq_list_clear(&chan
->retrans_list
);
2029 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2032 if (chan
->unacked_frames
) {
2033 skb_queue_walk(&chan
->tx_q
, skb
) {
2034 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
2035 skb
== chan
->tx_send_head
)
2039 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2040 if (skb
== chan
->tx_send_head
)
2043 l2cap_seq_list_append(&chan
->retrans_list
,
2044 bt_cb(skb
)->control
.txseq
);
2047 l2cap_ertm_resend(chan
);
2051 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2053 struct l2cap_ctrl control
;
2054 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2055 chan
->last_acked_seq
);
2058 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2059 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2061 memset(&control
, 0, sizeof(control
));
2064 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2065 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2066 __clear_ack_timer(chan
);
2067 control
.super
= L2CAP_SUPER_RNR
;
2068 control
.reqseq
= chan
->buffer_seq
;
2069 l2cap_send_sframe(chan
, &control
);
2071 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2072 l2cap_ertm_send(chan
);
2073 /* If any i-frames were sent, they included an ack */
2074 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2078 /* Ack now if the window is 3/4ths full.
2079 * Calculate without mul or div
2081 threshold
= chan
->ack_win
;
2082 threshold
+= threshold
<< 1;
2085 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2088 if (frames_to_ack
>= threshold
) {
2089 __clear_ack_timer(chan
);
2090 control
.super
= L2CAP_SUPER_RR
;
2091 control
.reqseq
= chan
->buffer_seq
;
2092 l2cap_send_sframe(chan
, &control
);
2097 __set_ack_timer(chan
);
2101 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2102 struct msghdr
*msg
, int len
,
2103 int count
, struct sk_buff
*skb
)
2105 struct l2cap_conn
*conn
= chan
->conn
;
2106 struct sk_buff
**frag
;
2109 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
2115 /* Continuation fragments (no L2CAP header) */
2116 frag
= &skb_shinfo(skb
)->frag_list
;
2118 struct sk_buff
*tmp
;
2120 count
= min_t(unsigned int, conn
->mtu
, len
);
2122 tmp
= chan
->ops
->alloc_skb(chan
, count
,
2123 msg
->msg_flags
& MSG_DONTWAIT
);
2125 return PTR_ERR(tmp
);
2129 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
2132 (*frag
)->priority
= skb
->priority
;
2137 skb
->len
+= (*frag
)->len
;
2138 skb
->data_len
+= (*frag
)->len
;
2140 frag
= &(*frag
)->next
;
2146 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2147 struct msghdr
*msg
, size_t len
,
2150 struct l2cap_conn
*conn
= chan
->conn
;
2151 struct sk_buff
*skb
;
2152 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2153 struct l2cap_hdr
*lh
;
2155 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan
,
2156 __le16_to_cpu(chan
->psm
), len
, priority
);
2158 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2160 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2161 msg
->msg_flags
& MSG_DONTWAIT
);
2165 skb
->priority
= priority
;
2167 /* Create L2CAP header */
2168 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2169 lh
->cid
= cpu_to_le16(chan
->dcid
);
2170 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2171 put_unaligned(chan
->psm
, (__le16
*) skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2173 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2174 if (unlikely(err
< 0)) {
2176 return ERR_PTR(err
);
2181 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2182 struct msghdr
*msg
, size_t len
,
2185 struct l2cap_conn
*conn
= chan
->conn
;
2186 struct sk_buff
*skb
;
2188 struct l2cap_hdr
*lh
;
2190 BT_DBG("chan %p len %zu", chan
, len
);
2192 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2194 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2195 msg
->msg_flags
& MSG_DONTWAIT
);
2199 skb
->priority
= priority
;
2201 /* Create L2CAP header */
2202 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2203 lh
->cid
= cpu_to_le16(chan
->dcid
);
2204 lh
->len
= cpu_to_le16(len
);
2206 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2207 if (unlikely(err
< 0)) {
2209 return ERR_PTR(err
);
2214 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2215 struct msghdr
*msg
, size_t len
,
2218 struct l2cap_conn
*conn
= chan
->conn
;
2219 struct sk_buff
*skb
;
2220 int err
, count
, hlen
;
2221 struct l2cap_hdr
*lh
;
2223 BT_DBG("chan %p len %zu", chan
, len
);
2226 return ERR_PTR(-ENOTCONN
);
2228 hlen
= __ertm_hdr_size(chan
);
2231 hlen
+= L2CAP_SDULEN_SIZE
;
2233 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2234 hlen
+= L2CAP_FCS_SIZE
;
2236 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2238 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2239 msg
->msg_flags
& MSG_DONTWAIT
);
2243 /* Create L2CAP header */
2244 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2245 lh
->cid
= cpu_to_le16(chan
->dcid
);
2246 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2248 /* Control header is populated later */
2249 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2250 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2252 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2255 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2257 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2258 if (unlikely(err
< 0)) {
2260 return ERR_PTR(err
);
2263 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2264 bt_cb(skb
)->control
.retries
= 0;
2268 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2269 struct sk_buff_head
*seg_queue
,
2270 struct msghdr
*msg
, size_t len
)
2272 struct sk_buff
*skb
;
2277 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2279 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2280 * so fragmented skbs are not used. The HCI layer's handling
2281 * of fragmented skbs is not compatible with ERTM's queueing.
2284 /* PDU size is derived from the HCI MTU */
2285 pdu_len
= chan
->conn
->mtu
;
2287 /* Constrain PDU size for BR/EDR connections */
2289 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2291 /* Adjust for largest possible L2CAP overhead. */
2293 pdu_len
-= L2CAP_FCS_SIZE
;
2295 pdu_len
-= __ertm_hdr_size(chan
);
2297 /* Remote device may have requested smaller PDUs */
2298 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2300 if (len
<= pdu_len
) {
2301 sar
= L2CAP_SAR_UNSEGMENTED
;
2305 sar
= L2CAP_SAR_START
;
2307 pdu_len
-= L2CAP_SDULEN_SIZE
;
2311 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2314 __skb_queue_purge(seg_queue
);
2315 return PTR_ERR(skb
);
2318 bt_cb(skb
)->control
.sar
= sar
;
2319 __skb_queue_tail(seg_queue
, skb
);
2324 pdu_len
+= L2CAP_SDULEN_SIZE
;
2327 if (len
<= pdu_len
) {
2328 sar
= L2CAP_SAR_END
;
2331 sar
= L2CAP_SAR_CONTINUE
;
2338 static struct sk_buff
*l2cap_create_le_flowctl_pdu(struct l2cap_chan
*chan
,
2340 size_t len
, u16 sdulen
)
2342 struct l2cap_conn
*conn
= chan
->conn
;
2343 struct sk_buff
*skb
;
2344 int err
, count
, hlen
;
2345 struct l2cap_hdr
*lh
;
2347 BT_DBG("chan %p len %zu", chan
, len
);
2350 return ERR_PTR(-ENOTCONN
);
2352 hlen
= L2CAP_HDR_SIZE
;
2355 hlen
+= L2CAP_SDULEN_SIZE
;
2357 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2359 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2360 msg
->msg_flags
& MSG_DONTWAIT
);
2364 /* Create L2CAP header */
2365 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2366 lh
->cid
= cpu_to_le16(chan
->dcid
);
2367 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2370 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2372 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2373 if (unlikely(err
< 0)) {
2375 return ERR_PTR(err
);
2381 static int l2cap_segment_le_sdu(struct l2cap_chan
*chan
,
2382 struct sk_buff_head
*seg_queue
,
2383 struct msghdr
*msg
, size_t len
)
2385 struct sk_buff
*skb
;
2389 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2391 pdu_len
= chan
->conn
->mtu
- L2CAP_HDR_SIZE
;
2393 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2396 pdu_len
-= L2CAP_SDULEN_SIZE
;
2402 skb
= l2cap_create_le_flowctl_pdu(chan
, msg
, pdu_len
, sdu_len
);
2404 __skb_queue_purge(seg_queue
);
2405 return PTR_ERR(skb
);
2408 __skb_queue_tail(seg_queue
, skb
);
2414 pdu_len
+= L2CAP_SDULEN_SIZE
;
2421 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2424 struct sk_buff
*skb
;
2426 struct sk_buff_head seg_queue
;
2431 /* Connectionless channel */
2432 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2433 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2435 return PTR_ERR(skb
);
2437 /* Channel lock is released before requesting new skb and then
2438 * reacquired thus we need to recheck channel state.
2440 if (chan
->state
!= BT_CONNECTED
) {
2445 l2cap_do_send(chan
, skb
);
2449 switch (chan
->mode
) {
2450 case L2CAP_MODE_LE_FLOWCTL
:
2451 /* Check outgoing MTU */
2452 if (len
> chan
->omtu
)
2455 if (!chan
->tx_credits
)
2458 __skb_queue_head_init(&seg_queue
);
2460 err
= l2cap_segment_le_sdu(chan
, &seg_queue
, msg
, len
);
2462 if (chan
->state
!= BT_CONNECTED
) {
2463 __skb_queue_purge(&seg_queue
);
2470 skb_queue_splice_tail_init(&seg_queue
, &chan
->tx_q
);
2472 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
2473 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
2477 if (!chan
->tx_credits
)
2478 chan
->ops
->suspend(chan
);
2484 case L2CAP_MODE_BASIC
:
2485 /* Check outgoing MTU */
2486 if (len
> chan
->omtu
)
2489 /* Create a basic PDU */
2490 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2492 return PTR_ERR(skb
);
2494 /* Channel lock is released before requesting new skb and then
2495 * reacquired thus we need to recheck channel state.
2497 if (chan
->state
!= BT_CONNECTED
) {
2502 l2cap_do_send(chan
, skb
);
2506 case L2CAP_MODE_ERTM
:
2507 case L2CAP_MODE_STREAMING
:
2508 /* Check outgoing MTU */
2509 if (len
> chan
->omtu
) {
2514 __skb_queue_head_init(&seg_queue
);
2516 /* Do segmentation before calling in to the state machine,
2517 * since it's possible to block while waiting for memory
2520 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2522 /* The channel could have been closed while segmenting,
2523 * check that it is still connected.
2525 if (chan
->state
!= BT_CONNECTED
) {
2526 __skb_queue_purge(&seg_queue
);
2533 if (chan
->mode
== L2CAP_MODE_ERTM
)
2534 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2536 l2cap_streaming_send(chan
, &seg_queue
);
2540 /* If the skbs were not queued for sending, they'll still be in
2541 * seg_queue and need to be purged.
2543 __skb_queue_purge(&seg_queue
);
2547 BT_DBG("bad state %1.1x", chan
->mode
);
2554 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2556 struct l2cap_ctrl control
;
2559 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2561 memset(&control
, 0, sizeof(control
));
2563 control
.super
= L2CAP_SUPER_SREJ
;
2565 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2566 seq
= __next_seq(chan
, seq
)) {
2567 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2568 control
.reqseq
= seq
;
2569 l2cap_send_sframe(chan
, &control
);
2570 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2574 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2577 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2579 struct l2cap_ctrl control
;
2581 BT_DBG("chan %p", chan
);
2583 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2586 memset(&control
, 0, sizeof(control
));
2588 control
.super
= L2CAP_SUPER_SREJ
;
2589 control
.reqseq
= chan
->srej_list
.tail
;
2590 l2cap_send_sframe(chan
, &control
);
2593 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2595 struct l2cap_ctrl control
;
2599 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2601 memset(&control
, 0, sizeof(control
));
2603 control
.super
= L2CAP_SUPER_SREJ
;
2605 /* Capture initial list head to allow only one pass through the list. */
2606 initial_head
= chan
->srej_list
.head
;
2609 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2610 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2613 control
.reqseq
= seq
;
2614 l2cap_send_sframe(chan
, &control
);
2615 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2616 } while (chan
->srej_list
.head
!= initial_head
);
2619 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2621 struct sk_buff
*acked_skb
;
2624 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2626 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2629 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2630 chan
->expected_ack_seq
, chan
->unacked_frames
);
2632 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2633 ackseq
= __next_seq(chan
, ackseq
)) {
2635 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2637 skb_unlink(acked_skb
, &chan
->tx_q
);
2638 kfree_skb(acked_skb
);
2639 chan
->unacked_frames
--;
2643 chan
->expected_ack_seq
= reqseq
;
2645 if (chan
->unacked_frames
== 0)
2646 __clear_retrans_timer(chan
);
2648 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2651 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2653 BT_DBG("chan %p", chan
);
2655 chan
->expected_tx_seq
= chan
->buffer_seq
;
2656 l2cap_seq_list_clear(&chan
->srej_list
);
2657 skb_queue_purge(&chan
->srej_q
);
2658 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2661 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2662 struct l2cap_ctrl
*control
,
2663 struct sk_buff_head
*skbs
, u8 event
)
2665 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2669 case L2CAP_EV_DATA_REQUEST
:
2670 if (chan
->tx_send_head
== NULL
)
2671 chan
->tx_send_head
= skb_peek(skbs
);
2673 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2674 l2cap_ertm_send(chan
);
2676 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2677 BT_DBG("Enter LOCAL_BUSY");
2678 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2680 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2681 /* The SREJ_SENT state must be aborted if we are to
2682 * enter the LOCAL_BUSY state.
2684 l2cap_abort_rx_srej_sent(chan
);
2687 l2cap_send_ack(chan
);
2690 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2691 BT_DBG("Exit LOCAL_BUSY");
2692 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2694 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2695 struct l2cap_ctrl local_control
;
2697 memset(&local_control
, 0, sizeof(local_control
));
2698 local_control
.sframe
= 1;
2699 local_control
.super
= L2CAP_SUPER_RR
;
2700 local_control
.poll
= 1;
2701 local_control
.reqseq
= chan
->buffer_seq
;
2702 l2cap_send_sframe(chan
, &local_control
);
2704 chan
->retry_count
= 1;
2705 __set_monitor_timer(chan
);
2706 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2709 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2710 l2cap_process_reqseq(chan
, control
->reqseq
);
2712 case L2CAP_EV_EXPLICIT_POLL
:
2713 l2cap_send_rr_or_rnr(chan
, 1);
2714 chan
->retry_count
= 1;
2715 __set_monitor_timer(chan
);
2716 __clear_ack_timer(chan
);
2717 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2719 case L2CAP_EV_RETRANS_TO
:
2720 l2cap_send_rr_or_rnr(chan
, 1);
2721 chan
->retry_count
= 1;
2722 __set_monitor_timer(chan
);
2723 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2725 case L2CAP_EV_RECV_FBIT
:
2726 /* Nothing to process */
2733 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2734 struct l2cap_ctrl
*control
,
2735 struct sk_buff_head
*skbs
, u8 event
)
2737 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2741 case L2CAP_EV_DATA_REQUEST
:
2742 if (chan
->tx_send_head
== NULL
)
2743 chan
->tx_send_head
= skb_peek(skbs
);
2744 /* Queue data, but don't send. */
2745 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2747 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2748 BT_DBG("Enter LOCAL_BUSY");
2749 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2751 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2752 /* The SREJ_SENT state must be aborted if we are to
2753 * enter the LOCAL_BUSY state.
2755 l2cap_abort_rx_srej_sent(chan
);
2758 l2cap_send_ack(chan
);
2761 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2762 BT_DBG("Exit LOCAL_BUSY");
2763 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2765 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2766 struct l2cap_ctrl local_control
;
2767 memset(&local_control
, 0, sizeof(local_control
));
2768 local_control
.sframe
= 1;
2769 local_control
.super
= L2CAP_SUPER_RR
;
2770 local_control
.poll
= 1;
2771 local_control
.reqseq
= chan
->buffer_seq
;
2772 l2cap_send_sframe(chan
, &local_control
);
2774 chan
->retry_count
= 1;
2775 __set_monitor_timer(chan
);
2776 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2779 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2780 l2cap_process_reqseq(chan
, control
->reqseq
);
2784 case L2CAP_EV_RECV_FBIT
:
2785 if (control
&& control
->final
) {
2786 __clear_monitor_timer(chan
);
2787 if (chan
->unacked_frames
> 0)
2788 __set_retrans_timer(chan
);
2789 chan
->retry_count
= 0;
2790 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2791 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2794 case L2CAP_EV_EXPLICIT_POLL
:
2797 case L2CAP_EV_MONITOR_TO
:
2798 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2799 l2cap_send_rr_or_rnr(chan
, 1);
2800 __set_monitor_timer(chan
);
2801 chan
->retry_count
++;
2803 l2cap_send_disconn_req(chan
, ECONNABORTED
);
2811 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2812 struct sk_buff_head
*skbs
, u8 event
)
2814 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2815 chan
, control
, skbs
, event
, chan
->tx_state
);
2817 switch (chan
->tx_state
) {
2818 case L2CAP_TX_STATE_XMIT
:
2819 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2821 case L2CAP_TX_STATE_WAIT_F
:
2822 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2830 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2831 struct l2cap_ctrl
*control
)
2833 BT_DBG("chan %p, control %p", chan
, control
);
2834 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2837 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2838 struct l2cap_ctrl
*control
)
2840 BT_DBG("chan %p, control %p", chan
, control
);
2841 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2844 /* Copy frame to all raw sockets on that connection */
2845 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2847 struct sk_buff
*nskb
;
2848 struct l2cap_chan
*chan
;
2850 BT_DBG("conn %p", conn
);
2852 mutex_lock(&conn
->chan_lock
);
2854 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2855 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2858 /* Don't send frame to the channel it came from */
2859 if (bt_cb(skb
)->chan
== chan
)
2862 nskb
= skb_clone(skb
, GFP_KERNEL
);
2865 if (chan
->ops
->recv(chan
, nskb
))
2869 mutex_unlock(&conn
->chan_lock
);
2872 /* ---- L2CAP signalling commands ---- */
2873 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2874 u8 ident
, u16 dlen
, void *data
)
2876 struct sk_buff
*skb
, **frag
;
2877 struct l2cap_cmd_hdr
*cmd
;
2878 struct l2cap_hdr
*lh
;
2881 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2882 conn
, code
, ident
, dlen
);
2884 if (conn
->mtu
< L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
)
2887 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2888 count
= min_t(unsigned int, conn
->mtu
, len
);
2890 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2894 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2895 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2897 if (conn
->hcon
->type
== LE_LINK
)
2898 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2900 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2902 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2905 cmd
->len
= cpu_to_le16(dlen
);
2908 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2909 memcpy(skb_put(skb
, count
), data
, count
);
2915 /* Continuation fragments (no L2CAP header) */
2916 frag
= &skb_shinfo(skb
)->frag_list
;
2918 count
= min_t(unsigned int, conn
->mtu
, len
);
2920 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2924 memcpy(skb_put(*frag
, count
), data
, count
);
2929 frag
= &(*frag
)->next
;
2939 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2942 struct l2cap_conf_opt
*opt
= *ptr
;
2945 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2953 *val
= *((u8
*) opt
->val
);
2957 *val
= get_unaligned_le16(opt
->val
);
2961 *val
= get_unaligned_le32(opt
->val
);
2965 *val
= (unsigned long) opt
->val
;
2969 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2973 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2975 struct l2cap_conf_opt
*opt
= *ptr
;
2977 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2984 *((u8
*) opt
->val
) = val
;
2988 put_unaligned_le16(val
, opt
->val
);
2992 put_unaligned_le32(val
, opt
->val
);
2996 memcpy(opt
->val
, (void *) val
, len
);
3000 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
3003 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
3005 struct l2cap_conf_efs efs
;
3007 switch (chan
->mode
) {
3008 case L2CAP_MODE_ERTM
:
3009 efs
.id
= chan
->local_id
;
3010 efs
.stype
= chan
->local_stype
;
3011 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3012 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3013 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
3014 efs
.flush_to
= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
3017 case L2CAP_MODE_STREAMING
:
3019 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
3020 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3021 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3030 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3031 (unsigned long) &efs
);
3034 static void l2cap_ack_timeout(struct work_struct
*work
)
3036 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
3040 BT_DBG("chan %p", chan
);
3042 l2cap_chan_lock(chan
);
3044 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
3045 chan
->last_acked_seq
);
3048 l2cap_send_rr_or_rnr(chan
, 0);
3050 l2cap_chan_unlock(chan
);
3051 l2cap_chan_put(chan
);
3054 int l2cap_ertm_init(struct l2cap_chan
*chan
)
3058 chan
->next_tx_seq
= 0;
3059 chan
->expected_tx_seq
= 0;
3060 chan
->expected_ack_seq
= 0;
3061 chan
->unacked_frames
= 0;
3062 chan
->buffer_seq
= 0;
3063 chan
->frames_sent
= 0;
3064 chan
->last_acked_seq
= 0;
3066 chan
->sdu_last_frag
= NULL
;
3069 skb_queue_head_init(&chan
->tx_q
);
3071 chan
->local_amp_id
= AMP_ID_BREDR
;
3072 chan
->move_id
= AMP_ID_BREDR
;
3073 chan
->move_state
= L2CAP_MOVE_STABLE
;
3074 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
3076 if (chan
->mode
!= L2CAP_MODE_ERTM
)
3079 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
3080 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3082 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
3083 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
3084 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
3086 skb_queue_head_init(&chan
->srej_q
);
3088 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
3092 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
3094 l2cap_seq_list_free(&chan
->srej_list
);
3099 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
3102 case L2CAP_MODE_STREAMING
:
3103 case L2CAP_MODE_ERTM
:
3104 if (l2cap_mode_supported(mode
, remote_feat_mask
))
3108 return L2CAP_MODE_BASIC
;
3112 static inline bool __l2cap_ews_supported(struct l2cap_conn
*conn
)
3114 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
3117 static inline bool __l2cap_efs_supported(struct l2cap_conn
*conn
)
3119 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
3122 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
3123 struct l2cap_conf_rfc
*rfc
)
3125 if (chan
->local_amp_id
!= AMP_ID_BREDR
&& chan
->hs_hcon
) {
3126 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
3128 /* Class 1 devices have must have ERTM timeouts
3129 * exceeding the Link Supervision Timeout. The
3130 * default Link Supervision Timeout for AMP
3131 * controllers is 10 seconds.
3133 * Class 1 devices use 0xffffffff for their
3134 * best-effort flush timeout, so the clamping logic
3135 * will result in a timeout that meets the above
3136 * requirement. ERTM timeouts are 16-bit values, so
3137 * the maximum timeout is 65.535 seconds.
3140 /* Convert timeout to milliseconds and round */
3141 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3143 /* This is the recommended formula for class 2 devices
3144 * that start ERTM timers when packets are sent to the
3147 ertm_to
= 3 * ertm_to
+ 500;
3149 if (ertm_to
> 0xffff)
3152 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3153 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3155 rfc
->retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3156 rfc
->monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3160 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3162 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3163 __l2cap_ews_supported(chan
->conn
)) {
3164 /* use extended control field */
3165 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3166 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3168 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3169 L2CAP_DEFAULT_TX_WINDOW
);
3170 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3172 chan
->ack_win
= chan
->tx_win
;
3175 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3177 struct l2cap_conf_req
*req
= data
;
3178 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3179 void *ptr
= req
->data
;
3182 BT_DBG("chan %p", chan
);
3184 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3187 switch (chan
->mode
) {
3188 case L2CAP_MODE_STREAMING
:
3189 case L2CAP_MODE_ERTM
:
3190 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3193 if (__l2cap_efs_supported(chan
->conn
))
3194 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3198 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3203 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3204 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3206 switch (chan
->mode
) {
3207 case L2CAP_MODE_BASIC
:
3208 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3209 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3212 rfc
.mode
= L2CAP_MODE_BASIC
;
3214 rfc
.max_transmit
= 0;
3215 rfc
.retrans_timeout
= 0;
3216 rfc
.monitor_timeout
= 0;
3217 rfc
.max_pdu_size
= 0;
3219 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3220 (unsigned long) &rfc
);
3223 case L2CAP_MODE_ERTM
:
3224 rfc
.mode
= L2CAP_MODE_ERTM
;
3225 rfc
.max_transmit
= chan
->max_tx
;
3227 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3229 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3230 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3232 rfc
.max_pdu_size
= cpu_to_le16(size
);
3234 l2cap_txwin_setup(chan
);
3236 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3237 L2CAP_DEFAULT_TX_WINDOW
);
3239 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3240 (unsigned long) &rfc
);
3242 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3243 l2cap_add_opt_efs(&ptr
, chan
);
3245 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3246 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3249 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3250 if (chan
->fcs
== L2CAP_FCS_NONE
||
3251 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3252 chan
->fcs
= L2CAP_FCS_NONE
;
3253 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3258 case L2CAP_MODE_STREAMING
:
3259 l2cap_txwin_setup(chan
);
3260 rfc
.mode
= L2CAP_MODE_STREAMING
;
3262 rfc
.max_transmit
= 0;
3263 rfc
.retrans_timeout
= 0;
3264 rfc
.monitor_timeout
= 0;
3266 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3267 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3269 rfc
.max_pdu_size
= cpu_to_le16(size
);
3271 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3272 (unsigned long) &rfc
);
3274 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3275 l2cap_add_opt_efs(&ptr
, chan
);
3277 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3278 if (chan
->fcs
== L2CAP_FCS_NONE
||
3279 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3280 chan
->fcs
= L2CAP_FCS_NONE
;
3281 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3287 req
->dcid
= cpu_to_le16(chan
->dcid
);
3288 req
->flags
= cpu_to_le16(0);
3293 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3295 struct l2cap_conf_rsp
*rsp
= data
;
3296 void *ptr
= rsp
->data
;
3297 void *req
= chan
->conf_req
;
3298 int len
= chan
->conf_len
;
3299 int type
, hint
, olen
;
3301 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3302 struct l2cap_conf_efs efs
;
3304 u16 mtu
= L2CAP_DEFAULT_MTU
;
3305 u16 result
= L2CAP_CONF_SUCCESS
;
3308 BT_DBG("chan %p", chan
);
3310 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3311 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3313 hint
= type
& L2CAP_CONF_HINT
;
3314 type
&= L2CAP_CONF_MASK
;
3317 case L2CAP_CONF_MTU
:
3321 case L2CAP_CONF_FLUSH_TO
:
3322 chan
->flush_to
= val
;
3325 case L2CAP_CONF_QOS
:
3328 case L2CAP_CONF_RFC
:
3329 if (olen
== sizeof(rfc
))
3330 memcpy(&rfc
, (void *) val
, olen
);
3333 case L2CAP_CONF_FCS
:
3334 if (val
== L2CAP_FCS_NONE
)
3335 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3338 case L2CAP_CONF_EFS
:
3340 if (olen
== sizeof(efs
))
3341 memcpy(&efs
, (void *) val
, olen
);
3344 case L2CAP_CONF_EWS
:
3345 if (!chan
->conn
->hs_enabled
)
3346 return -ECONNREFUSED
;
3348 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3349 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3350 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3351 chan
->remote_tx_win
= val
;
3358 result
= L2CAP_CONF_UNKNOWN
;
3359 *((u8
*) ptr
++) = type
;
3364 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3367 switch (chan
->mode
) {
3368 case L2CAP_MODE_STREAMING
:
3369 case L2CAP_MODE_ERTM
:
3370 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3371 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3372 chan
->conn
->feat_mask
);
3377 if (__l2cap_efs_supported(chan
->conn
))
3378 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3380 return -ECONNREFUSED
;
3383 if (chan
->mode
!= rfc
.mode
)
3384 return -ECONNREFUSED
;
3390 if (chan
->mode
!= rfc
.mode
) {
3391 result
= L2CAP_CONF_UNACCEPT
;
3392 rfc
.mode
= chan
->mode
;
3394 if (chan
->num_conf_rsp
== 1)
3395 return -ECONNREFUSED
;
3397 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3398 (unsigned long) &rfc
);
3401 if (result
== L2CAP_CONF_SUCCESS
) {
3402 /* Configure output options and let the other side know
3403 * which ones we don't like. */
3405 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3406 result
= L2CAP_CONF_UNACCEPT
;
3409 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3411 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3414 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3415 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3416 efs
.stype
!= chan
->local_stype
) {
3418 result
= L2CAP_CONF_UNACCEPT
;
3420 if (chan
->num_conf_req
>= 1)
3421 return -ECONNREFUSED
;
3423 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3425 (unsigned long) &efs
);
3427 /* Send PENDING Conf Rsp */
3428 result
= L2CAP_CONF_PENDING
;
3429 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3434 case L2CAP_MODE_BASIC
:
3435 chan
->fcs
= L2CAP_FCS_NONE
;
3436 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3439 case L2CAP_MODE_ERTM
:
3440 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3441 chan
->remote_tx_win
= rfc
.txwin_size
;
3443 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3445 chan
->remote_max_tx
= rfc
.max_transmit
;
3447 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3448 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3449 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3450 rfc
.max_pdu_size
= cpu_to_le16(size
);
3451 chan
->remote_mps
= size
;
3453 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3455 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3457 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3458 sizeof(rfc
), (unsigned long) &rfc
);
3460 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3461 chan
->remote_id
= efs
.id
;
3462 chan
->remote_stype
= efs
.stype
;
3463 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3464 chan
->remote_flush_to
=
3465 le32_to_cpu(efs
.flush_to
);
3466 chan
->remote_acc_lat
=
3467 le32_to_cpu(efs
.acc_lat
);
3468 chan
->remote_sdu_itime
=
3469 le32_to_cpu(efs
.sdu_itime
);
3470 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3472 (unsigned long) &efs
);
3476 case L2CAP_MODE_STREAMING
:
3477 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3478 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3479 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3480 rfc
.max_pdu_size
= cpu_to_le16(size
);
3481 chan
->remote_mps
= size
;
3483 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3485 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3486 (unsigned long) &rfc
);
3491 result
= L2CAP_CONF_UNACCEPT
;
3493 memset(&rfc
, 0, sizeof(rfc
));
3494 rfc
.mode
= chan
->mode
;
3497 if (result
== L2CAP_CONF_SUCCESS
)
3498 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3500 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3501 rsp
->result
= cpu_to_le16(result
);
3502 rsp
->flags
= cpu_to_le16(0);
3507 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3508 void *data
, u16
*result
)
3510 struct l2cap_conf_req
*req
= data
;
3511 void *ptr
= req
->data
;
3514 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3515 struct l2cap_conf_efs efs
;
3517 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3519 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3520 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3523 case L2CAP_CONF_MTU
:
3524 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3525 *result
= L2CAP_CONF_UNACCEPT
;
3526 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3529 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3532 case L2CAP_CONF_FLUSH_TO
:
3533 chan
->flush_to
= val
;
3534 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3538 case L2CAP_CONF_RFC
:
3539 if (olen
== sizeof(rfc
))
3540 memcpy(&rfc
, (void *)val
, olen
);
3542 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3543 rfc
.mode
!= chan
->mode
)
3544 return -ECONNREFUSED
;
3548 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3549 sizeof(rfc
), (unsigned long) &rfc
);
3552 case L2CAP_CONF_EWS
:
3553 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3554 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3558 case L2CAP_CONF_EFS
:
3559 if (olen
== sizeof(efs
))
3560 memcpy(&efs
, (void *)val
, olen
);
3562 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3563 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3564 efs
.stype
!= chan
->local_stype
)
3565 return -ECONNREFUSED
;
3567 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3568 (unsigned long) &efs
);
3571 case L2CAP_CONF_FCS
:
3572 if (*result
== L2CAP_CONF_PENDING
)
3573 if (val
== L2CAP_FCS_NONE
)
3574 set_bit(CONF_RECV_NO_FCS
,
3580 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3581 return -ECONNREFUSED
;
3583 chan
->mode
= rfc
.mode
;
3585 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3587 case L2CAP_MODE_ERTM
:
3588 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3589 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3590 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3591 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3592 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3595 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3596 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3597 chan
->local_sdu_itime
=
3598 le32_to_cpu(efs
.sdu_itime
);
3599 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3600 chan
->local_flush_to
=
3601 le32_to_cpu(efs
.flush_to
);
3605 case L2CAP_MODE_STREAMING
:
3606 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3610 req
->dcid
= cpu_to_le16(chan
->dcid
);
3611 req
->flags
= cpu_to_le16(0);
3616 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3617 u16 result
, u16 flags
)
3619 struct l2cap_conf_rsp
*rsp
= data
;
3620 void *ptr
= rsp
->data
;
3622 BT_DBG("chan %p", chan
);
3624 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3625 rsp
->result
= cpu_to_le16(result
);
3626 rsp
->flags
= cpu_to_le16(flags
);
3631 void __l2cap_le_connect_rsp_defer(struct l2cap_chan
*chan
)
3633 struct l2cap_le_conn_rsp rsp
;
3634 struct l2cap_conn
*conn
= chan
->conn
;
3636 BT_DBG("chan %p", chan
);
3638 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3639 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
3640 rsp
.mps
= cpu_to_le16(chan
->mps
);
3641 rsp
.credits
= cpu_to_le16(chan
->rx_credits
);
3642 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3644 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
),
3648 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3650 struct l2cap_conn_rsp rsp
;
3651 struct l2cap_conn
*conn
= chan
->conn
;
3655 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3656 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3657 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3658 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3661 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3663 rsp_code
= L2CAP_CONN_RSP
;
3665 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3667 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3669 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3672 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3673 l2cap_build_conf_req(chan
, buf
), buf
);
3674 chan
->num_conf_req
++;
3677 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3681 /* Use sane default values in case a misbehaving remote device
3682 * did not send an RFC or extended window size option.
3684 u16 txwin_ext
= chan
->ack_win
;
3685 struct l2cap_conf_rfc rfc
= {
3687 .retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3688 .monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3689 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3690 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3693 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3695 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3698 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3699 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3702 case L2CAP_CONF_RFC
:
3703 if (olen
== sizeof(rfc
))
3704 memcpy(&rfc
, (void *)val
, olen
);
3706 case L2CAP_CONF_EWS
:
3713 case L2CAP_MODE_ERTM
:
3714 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3715 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3716 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3717 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3718 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3720 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3723 case L2CAP_MODE_STREAMING
:
3724 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3728 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3729 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3732 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3734 if (cmd_len
< sizeof(*rej
))
3737 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3740 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3741 cmd
->ident
== conn
->info_ident
) {
3742 cancel_delayed_work(&conn
->info_timer
);
3744 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3745 conn
->info_ident
= 0;
3747 l2cap_conn_start(conn
);
3753 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3754 struct l2cap_cmd_hdr
*cmd
,
3755 u8
*data
, u8 rsp_code
, u8 amp_id
)
3757 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3758 struct l2cap_conn_rsp rsp
;
3759 struct l2cap_chan
*chan
= NULL
, *pchan
;
3760 int result
, status
= L2CAP_CS_NO_INFO
;
3762 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3763 __le16 psm
= req
->psm
;
3765 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3767 /* Check if we have socket listening on psm */
3768 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
3769 &conn
->hcon
->dst
, ACL_LINK
);
3771 result
= L2CAP_CR_BAD_PSM
;
3775 mutex_lock(&conn
->chan_lock
);
3776 l2cap_chan_lock(pchan
);
3778 /* Check if the ACL is secure enough (if not SDP) */
3779 if (psm
!= cpu_to_le16(L2CAP_PSM_SDP
) &&
3780 !hci_conn_check_link_mode(conn
->hcon
)) {
3781 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3782 result
= L2CAP_CR_SEC_BLOCK
;
3786 result
= L2CAP_CR_NO_MEM
;
3788 /* Check if we already have channel with that dcid */
3789 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3792 chan
= pchan
->ops
->new_connection(pchan
);
3796 /* For certain devices (ex: HID mouse), support for authentication,
3797 * pairing and bonding is optional. For such devices, inorder to avoid
3798 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3799 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3801 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
3803 bacpy(&chan
->src
, &conn
->hcon
->src
);
3804 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
3805 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
3806 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
3809 chan
->local_amp_id
= amp_id
;
3811 __l2cap_chan_add(conn
, chan
);
3815 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
3817 chan
->ident
= cmd
->ident
;
3819 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3820 if (l2cap_chan_check_security(chan
)) {
3821 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
3822 l2cap_state_change(chan
, BT_CONNECT2
);
3823 result
= L2CAP_CR_PEND
;
3824 status
= L2CAP_CS_AUTHOR_PEND
;
3825 chan
->ops
->defer(chan
);
3827 /* Force pending result for AMP controllers.
3828 * The connection will succeed after the
3829 * physical link is up.
3831 if (amp_id
== AMP_ID_BREDR
) {
3832 l2cap_state_change(chan
, BT_CONFIG
);
3833 result
= L2CAP_CR_SUCCESS
;
3835 l2cap_state_change(chan
, BT_CONNECT2
);
3836 result
= L2CAP_CR_PEND
;
3838 status
= L2CAP_CS_NO_INFO
;
3841 l2cap_state_change(chan
, BT_CONNECT2
);
3842 result
= L2CAP_CR_PEND
;
3843 status
= L2CAP_CS_AUTHEN_PEND
;
3846 l2cap_state_change(chan
, BT_CONNECT2
);
3847 result
= L2CAP_CR_PEND
;
3848 status
= L2CAP_CS_NO_INFO
;
3852 l2cap_chan_unlock(pchan
);
3853 mutex_unlock(&conn
->chan_lock
);
3856 rsp
.scid
= cpu_to_le16(scid
);
3857 rsp
.dcid
= cpu_to_le16(dcid
);
3858 rsp
.result
= cpu_to_le16(result
);
3859 rsp
.status
= cpu_to_le16(status
);
3860 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3862 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3863 struct l2cap_info_req info
;
3864 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3866 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3867 conn
->info_ident
= l2cap_get_ident(conn
);
3869 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3871 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3872 sizeof(info
), &info
);
3875 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3876 result
== L2CAP_CR_SUCCESS
) {
3878 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3879 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3880 l2cap_build_conf_req(chan
, buf
), buf
);
3881 chan
->num_conf_req
++;
3887 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3888 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3890 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
3891 struct hci_conn
*hcon
= conn
->hcon
;
3893 if (cmd_len
< sizeof(struct l2cap_conn_req
))
3897 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
3898 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
3899 mgmt_device_connected(hdev
, &hcon
->dst
, hcon
->type
,
3900 hcon
->dst_type
, 0, NULL
, 0,
3902 hci_dev_unlock(hdev
);
3904 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3908 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3909 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3912 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3913 u16 scid
, dcid
, result
, status
;
3914 struct l2cap_chan
*chan
;
3918 if (cmd_len
< sizeof(*rsp
))
3921 scid
= __le16_to_cpu(rsp
->scid
);
3922 dcid
= __le16_to_cpu(rsp
->dcid
);
3923 result
= __le16_to_cpu(rsp
->result
);
3924 status
= __le16_to_cpu(rsp
->status
);
3926 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3927 dcid
, scid
, result
, status
);
3929 mutex_lock(&conn
->chan_lock
);
3932 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3938 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3947 l2cap_chan_lock(chan
);
3950 case L2CAP_CR_SUCCESS
:
3951 l2cap_state_change(chan
, BT_CONFIG
);
3954 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3956 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3959 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3960 l2cap_build_conf_req(chan
, req
), req
);
3961 chan
->num_conf_req
++;
3965 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3969 l2cap_chan_del(chan
, ECONNREFUSED
);
3973 l2cap_chan_unlock(chan
);
3976 mutex_unlock(&conn
->chan_lock
);
3981 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3983 /* FCS is enabled only in ERTM or streaming mode, if one or both
3986 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3987 chan
->fcs
= L2CAP_FCS_NONE
;
3988 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
3989 chan
->fcs
= L2CAP_FCS_CRC16
;
3992 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3993 u8 ident
, u16 flags
)
3995 struct l2cap_conn
*conn
= chan
->conn
;
3997 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
4000 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
4001 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
4003 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
4004 l2cap_build_conf_rsp(chan
, data
,
4005 L2CAP_CONF_SUCCESS
, flags
), data
);
4008 static void cmd_reject_invalid_cid(struct l2cap_conn
*conn
, u8 ident
,
4011 struct l2cap_cmd_rej_cid rej
;
4013 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
4014 rej
.scid
= __cpu_to_le16(scid
);
4015 rej
.dcid
= __cpu_to_le16(dcid
);
4017 l2cap_send_cmd(conn
, ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4020 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
4021 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4024 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
4027 struct l2cap_chan
*chan
;
4030 if (cmd_len
< sizeof(*req
))
4033 dcid
= __le16_to_cpu(req
->dcid
);
4034 flags
= __le16_to_cpu(req
->flags
);
4036 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
4038 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
4040 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, 0);
4044 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
4045 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4050 /* Reject if config buffer is too small. */
4051 len
= cmd_len
- sizeof(*req
);
4052 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
4053 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4054 l2cap_build_conf_rsp(chan
, rsp
,
4055 L2CAP_CONF_REJECT
, flags
), rsp
);
4060 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
4061 chan
->conf_len
+= len
;
4063 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
4064 /* Incomplete config. Send empty response. */
4065 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4066 l2cap_build_conf_rsp(chan
, rsp
,
4067 L2CAP_CONF_SUCCESS
, flags
), rsp
);
4071 /* Complete config. */
4072 len
= l2cap_parse_conf_req(chan
, rsp
);
4074 l2cap_send_disconn_req(chan
, ECONNRESET
);
4078 chan
->ident
= cmd
->ident
;
4079 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
4080 chan
->num_conf_rsp
++;
4082 /* Reset config buffer. */
4085 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
4088 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4089 set_default_fcs(chan
);
4091 if (chan
->mode
== L2CAP_MODE_ERTM
||
4092 chan
->mode
== L2CAP_MODE_STREAMING
)
4093 err
= l2cap_ertm_init(chan
);
4096 l2cap_send_disconn_req(chan
, -err
);
4098 l2cap_chan_ready(chan
);
4103 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
4105 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4106 l2cap_build_conf_req(chan
, buf
), buf
);
4107 chan
->num_conf_req
++;
4110 /* Got Conf Rsp PENDING from remote side and asume we sent
4111 Conf Rsp PENDING in the code above */
4112 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
4113 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4115 /* check compatibility */
4117 /* Send rsp for BR/EDR channel */
4119 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
4121 chan
->ident
= cmd
->ident
;
4125 l2cap_chan_unlock(chan
);
4129 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
4130 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4133 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
4134 u16 scid
, flags
, result
;
4135 struct l2cap_chan
*chan
;
4136 int len
= cmd_len
- sizeof(*rsp
);
4139 if (cmd_len
< sizeof(*rsp
))
4142 scid
= __le16_to_cpu(rsp
->scid
);
4143 flags
= __le16_to_cpu(rsp
->flags
);
4144 result
= __le16_to_cpu(rsp
->result
);
4146 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
4149 chan
= l2cap_get_chan_by_scid(conn
, scid
);
4154 case L2CAP_CONF_SUCCESS
:
4155 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
4156 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4159 case L2CAP_CONF_PENDING
:
4160 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4162 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4165 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4168 l2cap_send_disconn_req(chan
, ECONNRESET
);
4172 if (!chan
->hs_hcon
) {
4173 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
4176 if (l2cap_check_efs(chan
)) {
4177 amp_create_logical_link(chan
);
4178 chan
->ident
= cmd
->ident
;
4184 case L2CAP_CONF_UNACCEPT
:
4185 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4188 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4189 l2cap_send_disconn_req(chan
, ECONNRESET
);
4193 /* throw out any old stored conf requests */
4194 result
= L2CAP_CONF_SUCCESS
;
4195 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4198 l2cap_send_disconn_req(chan
, ECONNRESET
);
4202 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4203 L2CAP_CONF_REQ
, len
, req
);
4204 chan
->num_conf_req
++;
4205 if (result
!= L2CAP_CONF_SUCCESS
)
4211 l2cap_chan_set_err(chan
, ECONNRESET
);
4213 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4214 l2cap_send_disconn_req(chan
, ECONNRESET
);
4218 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4221 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4223 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4224 set_default_fcs(chan
);
4226 if (chan
->mode
== L2CAP_MODE_ERTM
||
4227 chan
->mode
== L2CAP_MODE_STREAMING
)
4228 err
= l2cap_ertm_init(chan
);
4231 l2cap_send_disconn_req(chan
, -err
);
4233 l2cap_chan_ready(chan
);
4237 l2cap_chan_unlock(chan
);
4241 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4242 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4245 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4246 struct l2cap_disconn_rsp rsp
;
4248 struct l2cap_chan
*chan
;
4250 if (cmd_len
!= sizeof(*req
))
4253 scid
= __le16_to_cpu(req
->scid
);
4254 dcid
= __le16_to_cpu(req
->dcid
);
4256 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4258 mutex_lock(&conn
->chan_lock
);
4260 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4262 mutex_unlock(&conn
->chan_lock
);
4263 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, scid
);
4267 l2cap_chan_lock(chan
);
4269 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4270 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4271 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4273 chan
->ops
->set_shutdown(chan
);
4275 l2cap_chan_hold(chan
);
4276 l2cap_chan_del(chan
, ECONNRESET
);
4278 l2cap_chan_unlock(chan
);
4280 chan
->ops
->close(chan
);
4281 l2cap_chan_put(chan
);
4283 mutex_unlock(&conn
->chan_lock
);
4288 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4289 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4292 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4294 struct l2cap_chan
*chan
;
4296 if (cmd_len
!= sizeof(*rsp
))
4299 scid
= __le16_to_cpu(rsp
->scid
);
4300 dcid
= __le16_to_cpu(rsp
->dcid
);
4302 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4304 mutex_lock(&conn
->chan_lock
);
4306 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4308 mutex_unlock(&conn
->chan_lock
);
4312 l2cap_chan_lock(chan
);
4314 l2cap_chan_hold(chan
);
4315 l2cap_chan_del(chan
, 0);
4317 l2cap_chan_unlock(chan
);
4319 chan
->ops
->close(chan
);
4320 l2cap_chan_put(chan
);
4322 mutex_unlock(&conn
->chan_lock
);
4327 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4328 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4331 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4334 if (cmd_len
!= sizeof(*req
))
4337 type
= __le16_to_cpu(req
->type
);
4339 BT_DBG("type 0x%4.4x", type
);
4341 if (type
== L2CAP_IT_FEAT_MASK
) {
4343 u32 feat_mask
= l2cap_feat_mask
;
4344 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4345 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4346 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4348 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4350 if (conn
->hs_enabled
)
4351 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4352 | L2CAP_FEAT_EXT_WINDOW
;
4354 put_unaligned_le32(feat_mask
, rsp
->data
);
4355 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4357 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4359 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4361 if (conn
->hs_enabled
)
4362 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
4364 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
4366 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4367 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
4368 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
4369 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4372 struct l2cap_info_rsp rsp
;
4373 rsp
.type
= cpu_to_le16(type
);
4374 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
4375 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4382 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4383 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4386 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4389 if (cmd_len
< sizeof(*rsp
))
4392 type
= __le16_to_cpu(rsp
->type
);
4393 result
= __le16_to_cpu(rsp
->result
);
4395 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4397 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4398 if (cmd
->ident
!= conn
->info_ident
||
4399 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4402 cancel_delayed_work(&conn
->info_timer
);
4404 if (result
!= L2CAP_IR_SUCCESS
) {
4405 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4406 conn
->info_ident
= 0;
4408 l2cap_conn_start(conn
);
4414 case L2CAP_IT_FEAT_MASK
:
4415 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4417 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4418 struct l2cap_info_req req
;
4419 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4421 conn
->info_ident
= l2cap_get_ident(conn
);
4423 l2cap_send_cmd(conn
, conn
->info_ident
,
4424 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4426 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4427 conn
->info_ident
= 0;
4429 l2cap_conn_start(conn
);
4433 case L2CAP_IT_FIXED_CHAN
:
4434 conn
->fixed_chan_mask
= rsp
->data
[0];
4435 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4436 conn
->info_ident
= 0;
4438 l2cap_conn_start(conn
);
4445 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4446 struct l2cap_cmd_hdr
*cmd
,
4447 u16 cmd_len
, void *data
)
4449 struct l2cap_create_chan_req
*req
= data
;
4450 struct l2cap_create_chan_rsp rsp
;
4451 struct l2cap_chan
*chan
;
4452 struct hci_dev
*hdev
;
4455 if (cmd_len
!= sizeof(*req
))
4458 if (!conn
->hs_enabled
)
4461 psm
= le16_to_cpu(req
->psm
);
4462 scid
= le16_to_cpu(req
->scid
);
4464 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4466 /* For controller id 0 make BR/EDR connection */
4467 if (req
->amp_id
== AMP_ID_BREDR
) {
4468 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4473 /* Validate AMP controller id */
4474 hdev
= hci_dev_get(req
->amp_id
);
4478 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4483 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4486 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4487 struct hci_conn
*hs_hcon
;
4489 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
,
4493 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4498 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4500 mgr
->bredr_chan
= chan
;
4501 chan
->hs_hcon
= hs_hcon
;
4502 chan
->fcs
= L2CAP_FCS_NONE
;
4503 conn
->mtu
= hdev
->block_mtu
;
4512 rsp
.scid
= cpu_to_le16(scid
);
4513 rsp
.result
= cpu_to_le16(L2CAP_CR_BAD_AMP
);
4514 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4516 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4522 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4524 struct l2cap_move_chan_req req
;
4527 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4529 ident
= l2cap_get_ident(chan
->conn
);
4530 chan
->ident
= ident
;
4532 req
.icid
= cpu_to_le16(chan
->scid
);
4533 req
.dest_amp_id
= dest_amp_id
;
4535 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4538 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4541 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4543 struct l2cap_move_chan_rsp rsp
;
4545 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4547 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4548 rsp
.result
= cpu_to_le16(result
);
4550 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4554 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4556 struct l2cap_move_chan_cfm cfm
;
4558 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4560 chan
->ident
= l2cap_get_ident(chan
->conn
);
4562 cfm
.icid
= cpu_to_le16(chan
->scid
);
4563 cfm
.result
= cpu_to_le16(result
);
4565 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4568 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4571 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4573 struct l2cap_move_chan_cfm cfm
;
4575 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4577 cfm
.icid
= cpu_to_le16(icid
);
4578 cfm
.result
= cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4580 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4584 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4587 struct l2cap_move_chan_cfm_rsp rsp
;
4589 BT_DBG("icid 0x%4.4x", icid
);
4591 rsp
.icid
= cpu_to_le16(icid
);
4592 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4595 static void __release_logical_link(struct l2cap_chan
*chan
)
4597 chan
->hs_hchan
= NULL
;
4598 chan
->hs_hcon
= NULL
;
4600 /* Placeholder - release the logical link */
4603 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4605 /* Logical link setup failed */
4606 if (chan
->state
!= BT_CONNECTED
) {
4607 /* Create channel failure, disconnect */
4608 l2cap_send_disconn_req(chan
, ECONNRESET
);
4612 switch (chan
->move_role
) {
4613 case L2CAP_MOVE_ROLE_RESPONDER
:
4614 l2cap_move_done(chan
);
4615 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4617 case L2CAP_MOVE_ROLE_INITIATOR
:
4618 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4619 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4620 /* Remote has only sent pending or
4621 * success responses, clean up
4623 l2cap_move_done(chan
);
4626 /* Other amp move states imply that the move
4627 * has already aborted
4629 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4634 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4635 struct hci_chan
*hchan
)
4637 struct l2cap_conf_rsp rsp
;
4639 chan
->hs_hchan
= hchan
;
4640 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4642 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4644 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4647 set_default_fcs(chan
);
4649 err
= l2cap_ertm_init(chan
);
4651 l2cap_send_disconn_req(chan
, -err
);
4653 l2cap_chan_ready(chan
);
4657 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4658 struct hci_chan
*hchan
)
4660 chan
->hs_hcon
= hchan
->conn
;
4661 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4663 BT_DBG("move_state %d", chan
->move_state
);
4665 switch (chan
->move_state
) {
4666 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4667 /* Move confirm will be sent after a success
4668 * response is received
4670 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4672 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4673 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4674 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4675 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4676 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4677 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4678 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4679 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4680 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4684 /* Move was not in expected state, free the channel */
4685 __release_logical_link(chan
);
4687 chan
->move_state
= L2CAP_MOVE_STABLE
;
4691 /* Call with chan locked */
4692 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4695 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4698 l2cap_logical_fail(chan
);
4699 __release_logical_link(chan
);
4703 if (chan
->state
!= BT_CONNECTED
) {
4704 /* Ignore logical link if channel is on BR/EDR */
4705 if (chan
->local_amp_id
!= AMP_ID_BREDR
)
4706 l2cap_logical_finish_create(chan
, hchan
);
4708 l2cap_logical_finish_move(chan
, hchan
);
4712 void l2cap_move_start(struct l2cap_chan
*chan
)
4714 BT_DBG("chan %p", chan
);
4716 if (chan
->local_amp_id
== AMP_ID_BREDR
) {
4717 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4719 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4720 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4721 /* Placeholder - start physical link setup */
4723 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4724 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4726 l2cap_move_setup(chan
);
4727 l2cap_send_move_chan_req(chan
, 0);
4731 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4732 u8 local_amp_id
, u8 remote_amp_id
)
4734 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4735 local_amp_id
, remote_amp_id
);
4737 chan
->fcs
= L2CAP_FCS_NONE
;
4739 /* Outgoing channel on AMP */
4740 if (chan
->state
== BT_CONNECT
) {
4741 if (result
== L2CAP_CR_SUCCESS
) {
4742 chan
->local_amp_id
= local_amp_id
;
4743 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4745 /* Revert to BR/EDR connect */
4746 l2cap_send_conn_req(chan
);
4752 /* Incoming channel on AMP */
4753 if (__l2cap_no_conn_pending(chan
)) {
4754 struct l2cap_conn_rsp rsp
;
4756 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4757 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4759 if (result
== L2CAP_CR_SUCCESS
) {
4760 /* Send successful response */
4761 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
4762 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4764 /* Send negative response */
4765 rsp
.result
= cpu_to_le16(L2CAP_CR_NO_MEM
);
4766 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4769 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4772 if (result
== L2CAP_CR_SUCCESS
) {
4773 l2cap_state_change(chan
, BT_CONFIG
);
4774 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4775 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4777 l2cap_build_conf_req(chan
, buf
), buf
);
4778 chan
->num_conf_req
++;
4783 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4786 l2cap_move_setup(chan
);
4787 chan
->move_id
= local_amp_id
;
4788 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4790 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4793 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4795 struct hci_chan
*hchan
= NULL
;
4797 /* Placeholder - get hci_chan for logical link */
4800 if (hchan
->state
== BT_CONNECTED
) {
4801 /* Logical link is ready to go */
4802 chan
->hs_hcon
= hchan
->conn
;
4803 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4804 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4805 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4807 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4809 /* Wait for logical link to be ready */
4810 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4813 /* Logical link not available */
4814 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4818 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4820 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4822 if (result
== -EINVAL
)
4823 rsp_result
= L2CAP_MR_BAD_ID
;
4825 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4827 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4830 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4831 chan
->move_state
= L2CAP_MOVE_STABLE
;
4833 /* Restart data transmission */
4834 l2cap_ertm_send(chan
);
4837 /* Invoke with locked chan */
4838 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
4840 u8 local_amp_id
= chan
->local_amp_id
;
4841 u8 remote_amp_id
= chan
->remote_amp_id
;
4843 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4844 chan
, result
, local_amp_id
, remote_amp_id
);
4846 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
4847 l2cap_chan_unlock(chan
);
4851 if (chan
->state
!= BT_CONNECTED
) {
4852 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4853 } else if (result
!= L2CAP_MR_SUCCESS
) {
4854 l2cap_do_move_cancel(chan
, result
);
4856 switch (chan
->move_role
) {
4857 case L2CAP_MOVE_ROLE_INITIATOR
:
4858 l2cap_do_move_initiate(chan
, local_amp_id
,
4861 case L2CAP_MOVE_ROLE_RESPONDER
:
4862 l2cap_do_move_respond(chan
, result
);
4865 l2cap_do_move_cancel(chan
, result
);
4871 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4872 struct l2cap_cmd_hdr
*cmd
,
4873 u16 cmd_len
, void *data
)
4875 struct l2cap_move_chan_req
*req
= data
;
4876 struct l2cap_move_chan_rsp rsp
;
4877 struct l2cap_chan
*chan
;
4879 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4881 if (cmd_len
!= sizeof(*req
))
4884 icid
= le16_to_cpu(req
->icid
);
4886 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4888 if (!conn
->hs_enabled
)
4891 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4893 rsp
.icid
= cpu_to_le16(icid
);
4894 rsp
.result
= cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
4895 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
4900 chan
->ident
= cmd
->ident
;
4902 if (chan
->scid
< L2CAP_CID_DYN_START
||
4903 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4904 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4905 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4906 result
= L2CAP_MR_NOT_ALLOWED
;
4907 goto send_move_response
;
4910 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4911 result
= L2CAP_MR_SAME_ID
;
4912 goto send_move_response
;
4915 if (req
->dest_amp_id
!= AMP_ID_BREDR
) {
4916 struct hci_dev
*hdev
;
4917 hdev
= hci_dev_get(req
->dest_amp_id
);
4918 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4919 !test_bit(HCI_UP
, &hdev
->flags
)) {
4923 result
= L2CAP_MR_BAD_ID
;
4924 goto send_move_response
;
4929 /* Detect a move collision. Only send a collision response
4930 * if this side has "lost", otherwise proceed with the move.
4931 * The winner has the larger bd_addr.
4933 if ((__chan_is_moving(chan
) ||
4934 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4935 bacmp(&conn
->hcon
->src
, &conn
->hcon
->dst
) > 0) {
4936 result
= L2CAP_MR_COLLISION
;
4937 goto send_move_response
;
4940 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4941 l2cap_move_setup(chan
);
4942 chan
->move_id
= req
->dest_amp_id
;
4945 if (req
->dest_amp_id
== AMP_ID_BREDR
) {
4946 /* Moving to BR/EDR */
4947 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4948 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4949 result
= L2CAP_MR_PEND
;
4951 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4952 result
= L2CAP_MR_SUCCESS
;
4955 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4956 /* Placeholder - uncomment when amp functions are available */
4957 /*amp_accept_physical(chan, req->dest_amp_id);*/
4958 result
= L2CAP_MR_PEND
;
4962 l2cap_send_move_chan_rsp(chan
, result
);
4964 l2cap_chan_unlock(chan
);
4969 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
4971 struct l2cap_chan
*chan
;
4972 struct hci_chan
*hchan
= NULL
;
4974 chan
= l2cap_get_chan_by_scid(conn
, icid
);
4976 l2cap_send_move_chan_cfm_icid(conn
, icid
);
4980 __clear_chan_timer(chan
);
4981 if (result
== L2CAP_MR_PEND
)
4982 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
4984 switch (chan
->move_state
) {
4985 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4986 /* Move confirm will be sent when logical link
4989 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4991 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
4992 if (result
== L2CAP_MR_PEND
) {
4994 } else if (test_bit(CONN_LOCAL_BUSY
,
4995 &chan
->conn_state
)) {
4996 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4998 /* Logical link is up or moving to BR/EDR,
5001 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
5002 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5005 case L2CAP_MOVE_WAIT_RSP
:
5007 if (result
== L2CAP_MR_SUCCESS
) {
5008 /* Remote is ready, send confirm immediately
5009 * after logical link is ready
5011 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5013 /* Both logical link and move success
5014 * are required to confirm
5016 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
5019 /* Placeholder - get hci_chan for logical link */
5021 /* Logical link not available */
5022 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5026 /* If the logical link is not yet connected, do not
5027 * send confirmation.
5029 if (hchan
->state
!= BT_CONNECTED
)
5032 /* Logical link is already ready to go */
5034 chan
->hs_hcon
= hchan
->conn
;
5035 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
5037 if (result
== L2CAP_MR_SUCCESS
) {
5038 /* Can confirm now */
5039 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5041 /* Now only need move success
5044 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
5047 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5050 /* Any other amp move state means the move failed. */
5051 chan
->move_id
= chan
->local_amp_id
;
5052 l2cap_move_done(chan
);
5053 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5056 l2cap_chan_unlock(chan
);
5059 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
5062 struct l2cap_chan
*chan
;
5064 chan
= l2cap_get_chan_by_ident(conn
, ident
);
5066 /* Could not locate channel, icid is best guess */
5067 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5071 __clear_chan_timer(chan
);
5073 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
5074 if (result
== L2CAP_MR_COLLISION
) {
5075 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5077 /* Cleanup - cancel move */
5078 chan
->move_id
= chan
->local_amp_id
;
5079 l2cap_move_done(chan
);
5083 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5085 l2cap_chan_unlock(chan
);
5088 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
5089 struct l2cap_cmd_hdr
*cmd
,
5090 u16 cmd_len
, void *data
)
5092 struct l2cap_move_chan_rsp
*rsp
= data
;
5095 if (cmd_len
!= sizeof(*rsp
))
5098 icid
= le16_to_cpu(rsp
->icid
);
5099 result
= le16_to_cpu(rsp
->result
);
5101 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5103 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
5104 l2cap_move_continue(conn
, icid
, result
);
5106 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
5111 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
5112 struct l2cap_cmd_hdr
*cmd
,
5113 u16 cmd_len
, void *data
)
5115 struct l2cap_move_chan_cfm
*cfm
= data
;
5116 struct l2cap_chan
*chan
;
5119 if (cmd_len
!= sizeof(*cfm
))
5122 icid
= le16_to_cpu(cfm
->icid
);
5123 result
= le16_to_cpu(cfm
->result
);
5125 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5127 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5129 /* Spec requires a response even if the icid was not found */
5130 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5134 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
5135 if (result
== L2CAP_MC_CONFIRMED
) {
5136 chan
->local_amp_id
= chan
->move_id
;
5137 if (chan
->local_amp_id
== AMP_ID_BREDR
)
5138 __release_logical_link(chan
);
5140 chan
->move_id
= chan
->local_amp_id
;
5143 l2cap_move_done(chan
);
5146 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5148 l2cap_chan_unlock(chan
);
5153 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
5154 struct l2cap_cmd_hdr
*cmd
,
5155 u16 cmd_len
, void *data
)
5157 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
5158 struct l2cap_chan
*chan
;
5161 if (cmd_len
!= sizeof(*rsp
))
5164 icid
= le16_to_cpu(rsp
->icid
);
5166 BT_DBG("icid 0x%4.4x", icid
);
5168 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5172 __clear_chan_timer(chan
);
5174 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
5175 chan
->local_amp_id
= chan
->move_id
;
5177 if (chan
->local_amp_id
== AMP_ID_BREDR
&& chan
->hs_hchan
)
5178 __release_logical_link(chan
);
5180 l2cap_move_done(chan
);
5183 l2cap_chan_unlock(chan
);
5188 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
5193 if (min
> max
|| min
< 6 || max
> 3200)
5196 if (to_multiplier
< 10 || to_multiplier
> 3200)
5199 if (max
>= to_multiplier
* 8)
5202 max_latency
= (to_multiplier
* 8 / max
) - 1;
5203 if (latency
> 499 || latency
> max_latency
)
5209 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5210 struct l2cap_cmd_hdr
*cmd
,
5211 u16 cmd_len
, u8
*data
)
5213 struct hci_conn
*hcon
= conn
->hcon
;
5214 struct l2cap_conn_param_update_req
*req
;
5215 struct l2cap_conn_param_update_rsp rsp
;
5216 u16 min
, max
, latency
, to_multiplier
;
5219 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
5222 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5225 req
= (struct l2cap_conn_param_update_req
*) data
;
5226 min
= __le16_to_cpu(req
->min
);
5227 max
= __le16_to_cpu(req
->max
);
5228 latency
= __le16_to_cpu(req
->latency
);
5229 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5231 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5232 min
, max
, latency
, to_multiplier
);
5234 memset(&rsp
, 0, sizeof(rsp
));
5236 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
5238 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5240 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5242 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5246 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
5251 static int l2cap_le_connect_rsp(struct l2cap_conn
*conn
,
5252 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5255 struct l2cap_le_conn_rsp
*rsp
= (struct l2cap_le_conn_rsp
*) data
;
5256 u16 dcid
, mtu
, mps
, credits
, result
;
5257 struct l2cap_chan
*chan
;
5260 if (cmd_len
< sizeof(*rsp
))
5263 dcid
= __le16_to_cpu(rsp
->dcid
);
5264 mtu
= __le16_to_cpu(rsp
->mtu
);
5265 mps
= __le16_to_cpu(rsp
->mps
);
5266 credits
= __le16_to_cpu(rsp
->credits
);
5267 result
= __le16_to_cpu(rsp
->result
);
5269 if (result
== L2CAP_CR_SUCCESS
&& (mtu
< 23 || mps
< 23))
5272 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5273 dcid
, mtu
, mps
, credits
, result
);
5275 mutex_lock(&conn
->chan_lock
);
5277 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5285 l2cap_chan_lock(chan
);
5288 case L2CAP_CR_SUCCESS
:
5292 chan
->remote_mps
= mps
;
5293 chan
->tx_credits
= credits
;
5294 l2cap_chan_ready(chan
);
5298 l2cap_chan_del(chan
, ECONNREFUSED
);
5302 l2cap_chan_unlock(chan
);
5305 mutex_unlock(&conn
->chan_lock
);
5310 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5311 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5316 switch (cmd
->code
) {
5317 case L2CAP_COMMAND_REJ
:
5318 l2cap_command_rej(conn
, cmd
, cmd_len
, data
);
5321 case L2CAP_CONN_REQ
:
5322 err
= l2cap_connect_req(conn
, cmd
, cmd_len
, data
);
5325 case L2CAP_CONN_RSP
:
5326 case L2CAP_CREATE_CHAN_RSP
:
5327 l2cap_connect_create_rsp(conn
, cmd
, cmd_len
, data
);
5330 case L2CAP_CONF_REQ
:
5331 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5334 case L2CAP_CONF_RSP
:
5335 l2cap_config_rsp(conn
, cmd
, cmd_len
, data
);
5338 case L2CAP_DISCONN_REQ
:
5339 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5342 case L2CAP_DISCONN_RSP
:
5343 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5346 case L2CAP_ECHO_REQ
:
5347 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5350 case L2CAP_ECHO_RSP
:
5353 case L2CAP_INFO_REQ
:
5354 err
= l2cap_information_req(conn
, cmd
, cmd_len
, data
);
5357 case L2CAP_INFO_RSP
:
5358 l2cap_information_rsp(conn
, cmd
, cmd_len
, data
);
5361 case L2CAP_CREATE_CHAN_REQ
:
5362 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5365 case L2CAP_MOVE_CHAN_REQ
:
5366 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5369 case L2CAP_MOVE_CHAN_RSP
:
5370 l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5373 case L2CAP_MOVE_CHAN_CFM
:
5374 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5377 case L2CAP_MOVE_CHAN_CFM_RSP
:
5378 l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5382 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5390 static int l2cap_le_connect_req(struct l2cap_conn
*conn
,
5391 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5394 struct l2cap_le_conn_req
*req
= (struct l2cap_le_conn_req
*) data
;
5395 struct l2cap_le_conn_rsp rsp
;
5396 struct l2cap_chan
*chan
, *pchan
;
5397 u16 dcid
, scid
, credits
, mtu
, mps
;
5401 if (cmd_len
!= sizeof(*req
))
5404 scid
= __le16_to_cpu(req
->scid
);
5405 mtu
= __le16_to_cpu(req
->mtu
);
5406 mps
= __le16_to_cpu(req
->mps
);
5411 if (mtu
< 23 || mps
< 23)
5414 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm
),
5417 /* Check if we have socket listening on psm */
5418 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
5419 &conn
->hcon
->dst
, LE_LINK
);
5421 result
= L2CAP_CR_BAD_PSM
;
5426 mutex_lock(&conn
->chan_lock
);
5427 l2cap_chan_lock(pchan
);
5429 if (!smp_sufficient_security(conn
->hcon
, pchan
->sec_level
)) {
5430 result
= L2CAP_CR_AUTHENTICATION
;
5432 goto response_unlock
;
5435 /* Check if we already have channel with that dcid */
5436 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
5437 result
= L2CAP_CR_NO_MEM
;
5439 goto response_unlock
;
5442 chan
= pchan
->ops
->new_connection(pchan
);
5444 result
= L2CAP_CR_NO_MEM
;
5445 goto response_unlock
;
5448 l2cap_le_flowctl_init(chan
);
5450 bacpy(&chan
->src
, &conn
->hcon
->src
);
5451 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
5452 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
5453 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
5457 chan
->remote_mps
= mps
;
5458 chan
->tx_credits
= __le16_to_cpu(req
->credits
);
5460 __l2cap_chan_add(conn
, chan
);
5462 credits
= chan
->rx_credits
;
5464 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
5466 chan
->ident
= cmd
->ident
;
5468 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
5469 l2cap_state_change(chan
, BT_CONNECT2
);
5470 result
= L2CAP_CR_PEND
;
5471 chan
->ops
->defer(chan
);
5473 l2cap_chan_ready(chan
);
5474 result
= L2CAP_CR_SUCCESS
;
5478 l2cap_chan_unlock(pchan
);
5479 mutex_unlock(&conn
->chan_lock
);
5481 if (result
== L2CAP_CR_PEND
)
5486 rsp
.mtu
= cpu_to_le16(chan
->imtu
);
5487 rsp
.mps
= cpu_to_le16(chan
->mps
);
5493 rsp
.dcid
= cpu_to_le16(dcid
);
5494 rsp
.credits
= cpu_to_le16(credits
);
5495 rsp
.result
= cpu_to_le16(result
);
5497 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_LE_CONN_RSP
, sizeof(rsp
), &rsp
);
5502 static inline int l2cap_le_credits(struct l2cap_conn
*conn
,
5503 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5506 struct l2cap_le_credits
*pkt
;
5507 struct l2cap_chan
*chan
;
5508 u16 cid
, credits
, max_credits
;
5510 if (cmd_len
!= sizeof(*pkt
))
5513 pkt
= (struct l2cap_le_credits
*) data
;
5514 cid
= __le16_to_cpu(pkt
->cid
);
5515 credits
= __le16_to_cpu(pkt
->credits
);
5517 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid
, credits
);
5519 chan
= l2cap_get_chan_by_dcid(conn
, cid
);
5523 max_credits
= LE_FLOWCTL_MAX_CREDITS
- chan
->tx_credits
;
5524 if (credits
> max_credits
) {
5525 BT_ERR("LE credits overflow");
5526 l2cap_send_disconn_req(chan
, ECONNRESET
);
5528 /* Return 0 so that we don't trigger an unnecessary
5529 * command reject packet.
5534 chan
->tx_credits
+= credits
;
5536 while (chan
->tx_credits
&& !skb_queue_empty(&chan
->tx_q
)) {
5537 l2cap_do_send(chan
, skb_dequeue(&chan
->tx_q
));
5541 if (chan
->tx_credits
)
5542 chan
->ops
->resume(chan
);
5544 l2cap_chan_unlock(chan
);
5549 static inline int l2cap_le_command_rej(struct l2cap_conn
*conn
,
5550 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5553 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
5554 struct l2cap_chan
*chan
;
5556 if (cmd_len
< sizeof(*rej
))
5559 mutex_lock(&conn
->chan_lock
);
5561 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5565 l2cap_chan_lock(chan
);
5566 l2cap_chan_del(chan
, ECONNREFUSED
);
5567 l2cap_chan_unlock(chan
);
5570 mutex_unlock(&conn
->chan_lock
);
5574 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5575 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5580 switch (cmd
->code
) {
5581 case L2CAP_COMMAND_REJ
:
5582 l2cap_le_command_rej(conn
, cmd
, cmd_len
, data
);
5585 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5586 err
= l2cap_conn_param_update_req(conn
, cmd
, cmd_len
, data
);
5589 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5592 case L2CAP_LE_CONN_RSP
:
5593 l2cap_le_connect_rsp(conn
, cmd
, cmd_len
, data
);
5596 case L2CAP_LE_CONN_REQ
:
5597 err
= l2cap_le_connect_req(conn
, cmd
, cmd_len
, data
);
5600 case L2CAP_LE_CREDITS
:
5601 err
= l2cap_le_credits(conn
, cmd
, cmd_len
, data
);
5604 case L2CAP_DISCONN_REQ
:
5605 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5608 case L2CAP_DISCONN_RSP
:
5609 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5613 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5621 static inline void l2cap_le_sig_channel(struct l2cap_conn
*conn
,
5622 struct sk_buff
*skb
)
5624 struct hci_conn
*hcon
= conn
->hcon
;
5625 struct l2cap_cmd_hdr
*cmd
;
5629 if (hcon
->type
!= LE_LINK
)
5632 if (skb
->len
< L2CAP_CMD_HDR_SIZE
)
5635 cmd
= (void *) skb
->data
;
5636 skb_pull(skb
, L2CAP_CMD_HDR_SIZE
);
5638 len
= le16_to_cpu(cmd
->len
);
5640 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
->code
, len
, cmd
->ident
);
5642 if (len
!= skb
->len
|| !cmd
->ident
) {
5643 BT_DBG("corrupted command");
5647 err
= l2cap_le_sig_cmd(conn
, cmd
, len
, skb
->data
);
5649 struct l2cap_cmd_rej_unk rej
;
5651 BT_ERR("Wrong link type (%d)", err
);
5653 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5654 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
5662 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5663 struct sk_buff
*skb
)
5665 struct hci_conn
*hcon
= conn
->hcon
;
5666 u8
*data
= skb
->data
;
5668 struct l2cap_cmd_hdr cmd
;
5671 l2cap_raw_recv(conn
, skb
);
5673 if (hcon
->type
!= ACL_LINK
)
5676 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5678 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5679 data
+= L2CAP_CMD_HDR_SIZE
;
5680 len
-= L2CAP_CMD_HDR_SIZE
;
5682 cmd_len
= le16_to_cpu(cmd
.len
);
5684 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5687 if (cmd_len
> len
|| !cmd
.ident
) {
5688 BT_DBG("corrupted command");
5692 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5694 struct l2cap_cmd_rej_unk rej
;
5696 BT_ERR("Wrong link type (%d)", err
);
5698 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5699 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5711 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5713 u16 our_fcs
, rcv_fcs
;
5716 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5717 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5719 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5721 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5722 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5723 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5724 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5726 if (our_fcs
!= rcv_fcs
)
5732 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5734 struct l2cap_ctrl control
;
5736 BT_DBG("chan %p", chan
);
5738 memset(&control
, 0, sizeof(control
));
5741 control
.reqseq
= chan
->buffer_seq
;
5742 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5744 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5745 control
.super
= L2CAP_SUPER_RNR
;
5746 l2cap_send_sframe(chan
, &control
);
5749 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5750 chan
->unacked_frames
> 0)
5751 __set_retrans_timer(chan
);
5753 /* Send pending iframes */
5754 l2cap_ertm_send(chan
);
5756 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5757 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5758 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5761 control
.super
= L2CAP_SUPER_RR
;
5762 l2cap_send_sframe(chan
, &control
);
5766 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5767 struct sk_buff
**last_frag
)
5769 /* skb->len reflects data in skb as well as all fragments
5770 * skb->data_len reflects only data in fragments
5772 if (!skb_has_frag_list(skb
))
5773 skb_shinfo(skb
)->frag_list
= new_frag
;
5775 new_frag
->next
= NULL
;
5777 (*last_frag
)->next
= new_frag
;
5778 *last_frag
= new_frag
;
5780 skb
->len
+= new_frag
->len
;
5781 skb
->data_len
+= new_frag
->len
;
5782 skb
->truesize
+= new_frag
->truesize
;
5785 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5786 struct l2cap_ctrl
*control
)
5790 switch (control
->sar
) {
5791 case L2CAP_SAR_UNSEGMENTED
:
5795 err
= chan
->ops
->recv(chan
, skb
);
5798 case L2CAP_SAR_START
:
5802 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5803 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5805 if (chan
->sdu_len
> chan
->imtu
) {
5810 if (skb
->len
>= chan
->sdu_len
)
5814 chan
->sdu_last_frag
= skb
;
5820 case L2CAP_SAR_CONTINUE
:
5824 append_skb_frag(chan
->sdu
, skb
,
5825 &chan
->sdu_last_frag
);
5828 if (chan
->sdu
->len
>= chan
->sdu_len
)
5838 append_skb_frag(chan
->sdu
, skb
,
5839 &chan
->sdu_last_frag
);
5842 if (chan
->sdu
->len
!= chan
->sdu_len
)
5845 err
= chan
->ops
->recv(chan
, chan
->sdu
);
5848 /* Reassembly complete */
5850 chan
->sdu_last_frag
= NULL
;
5858 kfree_skb(chan
->sdu
);
5860 chan
->sdu_last_frag
= NULL
;
5867 static int l2cap_resegment(struct l2cap_chan
*chan
)
5873 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
5877 if (chan
->mode
!= L2CAP_MODE_ERTM
)
5880 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
5881 l2cap_tx(chan
, NULL
, NULL
, event
);
5884 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
5887 /* Pass sequential frames to l2cap_reassemble_sdu()
5888 * until a gap is encountered.
5891 BT_DBG("chan %p", chan
);
5893 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5894 struct sk_buff
*skb
;
5895 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5896 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
5898 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
5903 skb_unlink(skb
, &chan
->srej_q
);
5904 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5905 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
5910 if (skb_queue_empty(&chan
->srej_q
)) {
5911 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5912 l2cap_send_ack(chan
);
5918 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
5919 struct l2cap_ctrl
*control
)
5921 struct sk_buff
*skb
;
5923 BT_DBG("chan %p, control %p", chan
, control
);
5925 if (control
->reqseq
== chan
->next_tx_seq
) {
5926 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5927 l2cap_send_disconn_req(chan
, ECONNRESET
);
5931 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5934 BT_DBG("Seq %d not available for retransmission",
5939 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5940 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5941 l2cap_send_disconn_req(chan
, ECONNRESET
);
5945 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5947 if (control
->poll
) {
5948 l2cap_pass_to_tx(chan
, control
);
5950 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5951 l2cap_retransmit(chan
, control
);
5952 l2cap_ertm_send(chan
);
5954 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5955 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5956 chan
->srej_save_reqseq
= control
->reqseq
;
5959 l2cap_pass_to_tx_fbit(chan
, control
);
5961 if (control
->final
) {
5962 if (chan
->srej_save_reqseq
!= control
->reqseq
||
5963 !test_and_clear_bit(CONN_SREJ_ACT
,
5965 l2cap_retransmit(chan
, control
);
5967 l2cap_retransmit(chan
, control
);
5968 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5969 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5970 chan
->srej_save_reqseq
= control
->reqseq
;
5976 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
5977 struct l2cap_ctrl
*control
)
5979 struct sk_buff
*skb
;
5981 BT_DBG("chan %p, control %p", chan
, control
);
5983 if (control
->reqseq
== chan
->next_tx_seq
) {
5984 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5985 l2cap_send_disconn_req(chan
, ECONNRESET
);
5989 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5991 if (chan
->max_tx
&& skb
&&
5992 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5993 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5994 l2cap_send_disconn_req(chan
, ECONNRESET
);
5998 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6000 l2cap_pass_to_tx(chan
, control
);
6002 if (control
->final
) {
6003 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
6004 l2cap_retransmit_all(chan
, control
);
6006 l2cap_retransmit_all(chan
, control
);
6007 l2cap_ertm_send(chan
);
6008 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
6009 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
6013 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
6015 BT_DBG("chan %p, txseq %d", chan
, txseq
);
6017 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
6018 chan
->expected_tx_seq
);
6020 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
6021 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6023 /* See notes below regarding "double poll" and
6026 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6027 BT_DBG("Invalid/Ignore - after SREJ");
6028 return L2CAP_TXSEQ_INVALID_IGNORE
;
6030 BT_DBG("Invalid - in window after SREJ sent");
6031 return L2CAP_TXSEQ_INVALID
;
6035 if (chan
->srej_list
.head
== txseq
) {
6036 BT_DBG("Expected SREJ");
6037 return L2CAP_TXSEQ_EXPECTED_SREJ
;
6040 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
6041 BT_DBG("Duplicate SREJ - txseq already stored");
6042 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
6045 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
6046 BT_DBG("Unexpected SREJ - not requested");
6047 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
6051 if (chan
->expected_tx_seq
== txseq
) {
6052 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
6054 BT_DBG("Invalid - txseq outside tx window");
6055 return L2CAP_TXSEQ_INVALID
;
6058 return L2CAP_TXSEQ_EXPECTED
;
6062 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
6063 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
6064 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6065 return L2CAP_TXSEQ_DUPLICATE
;
6068 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
6069 /* A source of invalid packets is a "double poll" condition,
6070 * where delays cause us to send multiple poll packets. If
6071 * the remote stack receives and processes both polls,
6072 * sequence numbers can wrap around in such a way that a
6073 * resent frame has a sequence number that looks like new data
6074 * with a sequence gap. This would trigger an erroneous SREJ
6077 * Fortunately, this is impossible with a tx window that's
6078 * less than half of the maximum sequence number, which allows
6079 * invalid frames to be safely ignored.
6081 * With tx window sizes greater than half of the tx window
6082 * maximum, the frame is invalid and cannot be ignored. This
6083 * causes a disconnect.
6086 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
6087 BT_DBG("Invalid/Ignore - txseq outside tx window");
6088 return L2CAP_TXSEQ_INVALID_IGNORE
;
6090 BT_DBG("Invalid - txseq outside tx window");
6091 return L2CAP_TXSEQ_INVALID
;
6094 BT_DBG("Unexpected - txseq indicates missing frames");
6095 return L2CAP_TXSEQ_UNEXPECTED
;
6099 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
6100 struct l2cap_ctrl
*control
,
6101 struct sk_buff
*skb
, u8 event
)
6104 bool skb_in_use
= false;
6106 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6110 case L2CAP_EV_RECV_IFRAME
:
6111 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
6112 case L2CAP_TXSEQ_EXPECTED
:
6113 l2cap_pass_to_tx(chan
, control
);
6115 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6116 BT_DBG("Busy, discarding expected seq %d",
6121 chan
->expected_tx_seq
= __next_seq(chan
,
6124 chan
->buffer_seq
= chan
->expected_tx_seq
;
6127 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
6131 if (control
->final
) {
6132 if (!test_and_clear_bit(CONN_REJ_ACT
,
6133 &chan
->conn_state
)) {
6135 l2cap_retransmit_all(chan
, control
);
6136 l2cap_ertm_send(chan
);
6140 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
6141 l2cap_send_ack(chan
);
6143 case L2CAP_TXSEQ_UNEXPECTED
:
6144 l2cap_pass_to_tx(chan
, control
);
6146 /* Can't issue SREJ frames in the local busy state.
6147 * Drop this frame, it will be seen as missing
6148 * when local busy is exited.
6150 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
6151 BT_DBG("Busy, discarding unexpected seq %d",
6156 /* There was a gap in the sequence, so an SREJ
6157 * must be sent for each missing frame. The
6158 * current frame is stored for later use.
6160 skb_queue_tail(&chan
->srej_q
, skb
);
6162 BT_DBG("Queued %p (queue len %d)", skb
,
6163 skb_queue_len(&chan
->srej_q
));
6165 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
6166 l2cap_seq_list_clear(&chan
->srej_list
);
6167 l2cap_send_srej(chan
, control
->txseq
);
6169 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
6171 case L2CAP_TXSEQ_DUPLICATE
:
6172 l2cap_pass_to_tx(chan
, control
);
6174 case L2CAP_TXSEQ_INVALID_IGNORE
:
6176 case L2CAP_TXSEQ_INVALID
:
6178 l2cap_send_disconn_req(chan
, ECONNRESET
);
6182 case L2CAP_EV_RECV_RR
:
6183 l2cap_pass_to_tx(chan
, control
);
6184 if (control
->final
) {
6185 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6187 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
6188 !__chan_is_moving(chan
)) {
6190 l2cap_retransmit_all(chan
, control
);
6193 l2cap_ertm_send(chan
);
6194 } else if (control
->poll
) {
6195 l2cap_send_i_or_rr_or_rnr(chan
);
6197 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6198 &chan
->conn_state
) &&
6199 chan
->unacked_frames
)
6200 __set_retrans_timer(chan
);
6202 l2cap_ertm_send(chan
);
6205 case L2CAP_EV_RECV_RNR
:
6206 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6207 l2cap_pass_to_tx(chan
, control
);
6208 if (control
&& control
->poll
) {
6209 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6210 l2cap_send_rr_or_rnr(chan
, 0);
6212 __clear_retrans_timer(chan
);
6213 l2cap_seq_list_clear(&chan
->retrans_list
);
6215 case L2CAP_EV_RECV_REJ
:
6216 l2cap_handle_rej(chan
, control
);
6218 case L2CAP_EV_RECV_SREJ
:
6219 l2cap_handle_srej(chan
, control
);
6225 if (skb
&& !skb_in_use
) {
6226 BT_DBG("Freeing %p", skb
);
6233 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
6234 struct l2cap_ctrl
*control
,
6235 struct sk_buff
*skb
, u8 event
)
6238 u16 txseq
= control
->txseq
;
6239 bool skb_in_use
= false;
6241 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6245 case L2CAP_EV_RECV_IFRAME
:
6246 switch (l2cap_classify_txseq(chan
, txseq
)) {
6247 case L2CAP_TXSEQ_EXPECTED
:
6248 /* Keep frame for reassembly later */
6249 l2cap_pass_to_tx(chan
, control
);
6250 skb_queue_tail(&chan
->srej_q
, skb
);
6252 BT_DBG("Queued %p (queue len %d)", skb
,
6253 skb_queue_len(&chan
->srej_q
));
6255 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
6257 case L2CAP_TXSEQ_EXPECTED_SREJ
:
6258 l2cap_seq_list_pop(&chan
->srej_list
);
6260 l2cap_pass_to_tx(chan
, control
);
6261 skb_queue_tail(&chan
->srej_q
, skb
);
6263 BT_DBG("Queued %p (queue len %d)", skb
,
6264 skb_queue_len(&chan
->srej_q
));
6266 err
= l2cap_rx_queued_iframes(chan
);
6271 case L2CAP_TXSEQ_UNEXPECTED
:
6272 /* Got a frame that can't be reassembled yet.
6273 * Save it for later, and send SREJs to cover
6274 * the missing frames.
6276 skb_queue_tail(&chan
->srej_q
, skb
);
6278 BT_DBG("Queued %p (queue len %d)", skb
,
6279 skb_queue_len(&chan
->srej_q
));
6281 l2cap_pass_to_tx(chan
, control
);
6282 l2cap_send_srej(chan
, control
->txseq
);
6284 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
6285 /* This frame was requested with an SREJ, but
6286 * some expected retransmitted frames are
6287 * missing. Request retransmission of missing
6290 skb_queue_tail(&chan
->srej_q
, skb
);
6292 BT_DBG("Queued %p (queue len %d)", skb
,
6293 skb_queue_len(&chan
->srej_q
));
6295 l2cap_pass_to_tx(chan
, control
);
6296 l2cap_send_srej_list(chan
, control
->txseq
);
6298 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
6299 /* We've already queued this frame. Drop this copy. */
6300 l2cap_pass_to_tx(chan
, control
);
6302 case L2CAP_TXSEQ_DUPLICATE
:
6303 /* Expecting a later sequence number, so this frame
6304 * was already received. Ignore it completely.
6307 case L2CAP_TXSEQ_INVALID_IGNORE
:
6309 case L2CAP_TXSEQ_INVALID
:
6311 l2cap_send_disconn_req(chan
, ECONNRESET
);
6315 case L2CAP_EV_RECV_RR
:
6316 l2cap_pass_to_tx(chan
, control
);
6317 if (control
->final
) {
6318 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6320 if (!test_and_clear_bit(CONN_REJ_ACT
,
6321 &chan
->conn_state
)) {
6323 l2cap_retransmit_all(chan
, control
);
6326 l2cap_ertm_send(chan
);
6327 } else if (control
->poll
) {
6328 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6329 &chan
->conn_state
) &&
6330 chan
->unacked_frames
) {
6331 __set_retrans_timer(chan
);
6334 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6335 l2cap_send_srej_tail(chan
);
6337 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6338 &chan
->conn_state
) &&
6339 chan
->unacked_frames
)
6340 __set_retrans_timer(chan
);
6342 l2cap_send_ack(chan
);
6345 case L2CAP_EV_RECV_RNR
:
6346 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6347 l2cap_pass_to_tx(chan
, control
);
6348 if (control
->poll
) {
6349 l2cap_send_srej_tail(chan
);
6351 struct l2cap_ctrl rr_control
;
6352 memset(&rr_control
, 0, sizeof(rr_control
));
6353 rr_control
.sframe
= 1;
6354 rr_control
.super
= L2CAP_SUPER_RR
;
6355 rr_control
.reqseq
= chan
->buffer_seq
;
6356 l2cap_send_sframe(chan
, &rr_control
);
6360 case L2CAP_EV_RECV_REJ
:
6361 l2cap_handle_rej(chan
, control
);
6363 case L2CAP_EV_RECV_SREJ
:
6364 l2cap_handle_srej(chan
, control
);
6368 if (skb
&& !skb_in_use
) {
6369 BT_DBG("Freeing %p", skb
);
6376 static int l2cap_finish_move(struct l2cap_chan
*chan
)
6378 BT_DBG("chan %p", chan
);
6380 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6383 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6385 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6387 return l2cap_resegment(chan
);
6390 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
6391 struct l2cap_ctrl
*control
,
6392 struct sk_buff
*skb
, u8 event
)
6396 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6402 l2cap_process_reqseq(chan
, control
->reqseq
);
6404 if (!skb_queue_empty(&chan
->tx_q
))
6405 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6407 chan
->tx_send_head
= NULL
;
6409 /* Rewind next_tx_seq to the point expected
6412 chan
->next_tx_seq
= control
->reqseq
;
6413 chan
->unacked_frames
= 0;
6415 err
= l2cap_finish_move(chan
);
6419 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6420 l2cap_send_i_or_rr_or_rnr(chan
);
6422 if (event
== L2CAP_EV_RECV_IFRAME
)
6425 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
6428 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
6429 struct l2cap_ctrl
*control
,
6430 struct sk_buff
*skb
, u8 event
)
6434 if (!control
->final
)
6437 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6439 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6440 l2cap_process_reqseq(chan
, control
->reqseq
);
6442 if (!skb_queue_empty(&chan
->tx_q
))
6443 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6445 chan
->tx_send_head
= NULL
;
6447 /* Rewind next_tx_seq to the point expected
6450 chan
->next_tx_seq
= control
->reqseq
;
6451 chan
->unacked_frames
= 0;
6454 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6456 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6458 err
= l2cap_resegment(chan
);
6461 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6466 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
6468 /* Make sure reqseq is for a packet that has been sent but not acked */
6471 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
6472 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
6475 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6476 struct sk_buff
*skb
, u8 event
)
6480 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
6481 control
, skb
, event
, chan
->rx_state
);
6483 if (__valid_reqseq(chan
, control
->reqseq
)) {
6484 switch (chan
->rx_state
) {
6485 case L2CAP_RX_STATE_RECV
:
6486 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6488 case L2CAP_RX_STATE_SREJ_SENT
:
6489 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
6492 case L2CAP_RX_STATE_WAIT_P
:
6493 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
6495 case L2CAP_RX_STATE_WAIT_F
:
6496 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
6503 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6504 control
->reqseq
, chan
->next_tx_seq
,
6505 chan
->expected_ack_seq
);
6506 l2cap_send_disconn_req(chan
, ECONNRESET
);
6512 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6513 struct sk_buff
*skb
)
6517 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6520 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6521 L2CAP_TXSEQ_EXPECTED
) {
6522 l2cap_pass_to_tx(chan
, control
);
6524 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6525 __next_seq(chan
, chan
->buffer_seq
));
6527 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6529 l2cap_reassemble_sdu(chan
, skb
, control
);
6532 kfree_skb(chan
->sdu
);
6535 chan
->sdu_last_frag
= NULL
;
6539 BT_DBG("Freeing %p", skb
);
6544 chan
->last_acked_seq
= control
->txseq
;
6545 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6550 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6552 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
6556 __unpack_control(chan
, skb
);
6561 * We can just drop the corrupted I-frame here.
6562 * Receiver will miss it and start proper recovery
6563 * procedures and ask for retransmission.
6565 if (l2cap_check_fcs(chan
, skb
))
6568 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6569 len
-= L2CAP_SDULEN_SIZE
;
6571 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6572 len
-= L2CAP_FCS_SIZE
;
6574 if (len
> chan
->mps
) {
6575 l2cap_send_disconn_req(chan
, ECONNRESET
);
6579 if (!control
->sframe
) {
6582 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6583 control
->sar
, control
->reqseq
, control
->final
,
6586 /* Validate F-bit - F=0 always valid, F=1 only
6587 * valid in TX WAIT_F
6589 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6592 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6593 event
= L2CAP_EV_RECV_IFRAME
;
6594 err
= l2cap_rx(chan
, control
, skb
, event
);
6596 err
= l2cap_stream_rx(chan
, control
, skb
);
6600 l2cap_send_disconn_req(chan
, ECONNRESET
);
6602 const u8 rx_func_to_event
[4] = {
6603 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6604 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6607 /* Only I-frames are expected in streaming mode */
6608 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6611 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6612 control
->reqseq
, control
->final
, control
->poll
,
6616 BT_ERR("Trailing bytes: %d in sframe", len
);
6617 l2cap_send_disconn_req(chan
, ECONNRESET
);
6621 /* Validate F and P bits */
6622 if (control
->final
&& (control
->poll
||
6623 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6626 event
= rx_func_to_event
[control
->super
];
6627 if (l2cap_rx(chan
, control
, skb
, event
))
6628 l2cap_send_disconn_req(chan
, ECONNRESET
);
6638 static void l2cap_chan_le_send_credits(struct l2cap_chan
*chan
)
6640 struct l2cap_conn
*conn
= chan
->conn
;
6641 struct l2cap_le_credits pkt
;
6644 /* We return more credits to the sender only after the amount of
6645 * credits falls below half of the initial amount.
6647 if (chan
->rx_credits
>= (le_max_credits
+ 1) / 2)
6650 return_credits
= le_max_credits
- chan
->rx_credits
;
6652 BT_DBG("chan %p returning %u credits to sender", chan
, return_credits
);
6654 chan
->rx_credits
+= return_credits
;
6656 pkt
.cid
= cpu_to_le16(chan
->scid
);
6657 pkt
.credits
= cpu_to_le16(return_credits
);
6659 chan
->ident
= l2cap_get_ident(conn
);
6661 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CREDITS
, sizeof(pkt
), &pkt
);
6664 static int l2cap_le_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6668 if (!chan
->rx_credits
) {
6669 BT_ERR("No credits to receive LE L2CAP data");
6670 l2cap_send_disconn_req(chan
, ECONNRESET
);
6674 if (chan
->imtu
< skb
->len
) {
6675 BT_ERR("Too big LE L2CAP PDU");
6680 BT_DBG("rx_credits %u -> %u", chan
->rx_credits
+ 1, chan
->rx_credits
);
6682 l2cap_chan_le_send_credits(chan
);
6689 sdu_len
= get_unaligned_le16(skb
->data
);
6690 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
6692 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6693 sdu_len
, skb
->len
, chan
->imtu
);
6695 if (sdu_len
> chan
->imtu
) {
6696 BT_ERR("Too big LE L2CAP SDU length received");
6701 if (skb
->len
> sdu_len
) {
6702 BT_ERR("Too much LE L2CAP data received");
6707 if (skb
->len
== sdu_len
)
6708 return chan
->ops
->recv(chan
, skb
);
6711 chan
->sdu_len
= sdu_len
;
6712 chan
->sdu_last_frag
= skb
;
6717 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6718 chan
->sdu
->len
, skb
->len
, chan
->sdu_len
);
6720 if (chan
->sdu
->len
+ skb
->len
> chan
->sdu_len
) {
6721 BT_ERR("Too much LE L2CAP data received");
6726 append_skb_frag(chan
->sdu
, skb
, &chan
->sdu_last_frag
);
6729 if (chan
->sdu
->len
== chan
->sdu_len
) {
6730 err
= chan
->ops
->recv(chan
, chan
->sdu
);
6733 chan
->sdu_last_frag
= NULL
;
6741 kfree_skb(chan
->sdu
);
6743 chan
->sdu_last_frag
= NULL
;
6747 /* We can't return an error here since we took care of the skb
6748 * freeing internally. An error return would cause the caller to
6749 * do a double-free of the skb.
6754 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6755 struct sk_buff
*skb
)
6757 struct l2cap_chan
*chan
;
6759 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6761 if (cid
== L2CAP_CID_A2MP
) {
6762 chan
= a2mp_channel_create(conn
, skb
);
6768 l2cap_chan_lock(chan
);
6770 BT_DBG("unknown cid 0x%4.4x", cid
);
6771 /* Drop packet and return */
6777 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6779 if (chan
->state
!= BT_CONNECTED
)
6782 switch (chan
->mode
) {
6783 case L2CAP_MODE_LE_FLOWCTL
:
6784 if (l2cap_le_data_rcv(chan
, skb
) < 0)
6789 case L2CAP_MODE_BASIC
:
6790 /* If socket recv buffers overflows we drop data here
6791 * which is *bad* because L2CAP has to be reliable.
6792 * But we don't have any other choice. L2CAP doesn't
6793 * provide flow control mechanism. */
6795 if (chan
->imtu
< skb
->len
) {
6796 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6800 if (!chan
->ops
->recv(chan
, skb
))
6804 case L2CAP_MODE_ERTM
:
6805 case L2CAP_MODE_STREAMING
:
6806 l2cap_data_rcv(chan
, skb
);
6810 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
6818 l2cap_chan_unlock(chan
);
6821 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
6822 struct sk_buff
*skb
)
6824 struct hci_conn
*hcon
= conn
->hcon
;
6825 struct l2cap_chan
*chan
;
6827 if (hcon
->type
!= ACL_LINK
)
6830 chan
= l2cap_global_chan_by_psm(0, psm
, &hcon
->src
, &hcon
->dst
,
6835 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6837 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6840 if (chan
->imtu
< skb
->len
)
6843 /* Store remote BD_ADDR and PSM for msg_name */
6844 bacpy(&bt_cb(skb
)->bdaddr
, &hcon
->dst
);
6845 bt_cb(skb
)->psm
= psm
;
6847 if (!chan
->ops
->recv(chan
, skb
))
6854 static void l2cap_att_channel(struct l2cap_conn
*conn
,
6855 struct sk_buff
*skb
)
6857 struct hci_conn
*hcon
= conn
->hcon
;
6858 struct l2cap_chan
*chan
;
6860 if (hcon
->type
!= LE_LINK
)
6863 chan
= l2cap_global_chan_by_scid(BT_CONNECTED
, L2CAP_CID_ATT
,
6864 &hcon
->src
, &hcon
->dst
);
6868 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6870 if (hci_blacklist_lookup(hcon
->hdev
, &hcon
->dst
, hcon
->dst_type
))
6873 if (chan
->imtu
< skb
->len
)
6876 if (!chan
->ops
->recv(chan
, skb
))
6883 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
6885 struct l2cap_hdr
*lh
= (void *) skb
->data
;
6886 struct hci_conn
*hcon
= conn
->hcon
;
6890 if (hcon
->state
!= BT_CONNECTED
) {
6891 BT_DBG("queueing pending rx skb");
6892 skb_queue_tail(&conn
->pending_rx
, skb
);
6896 skb_pull(skb
, L2CAP_HDR_SIZE
);
6897 cid
= __le16_to_cpu(lh
->cid
);
6898 len
= __le16_to_cpu(lh
->len
);
6900 if (len
!= skb
->len
) {
6905 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
6908 case L2CAP_CID_SIGNALING
:
6909 l2cap_sig_channel(conn
, skb
);
6912 case L2CAP_CID_CONN_LESS
:
6913 psm
= get_unaligned((__le16
*) skb
->data
);
6914 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
6915 l2cap_conless_channel(conn
, psm
, skb
);
6919 l2cap_att_channel(conn
, skb
);
6922 case L2CAP_CID_LE_SIGNALING
:
6923 l2cap_le_sig_channel(conn
, skb
);
6927 if (smp_sig_channel(conn
, skb
))
6928 l2cap_conn_del(conn
->hcon
, EACCES
);
6931 case L2CAP_FC_6LOWPAN
:
6932 bt_6lowpan_recv(conn
, skb
);
6936 l2cap_data_channel(conn
, cid
, skb
);
6941 static void process_pending_rx(struct work_struct
*work
)
6943 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
6945 struct sk_buff
*skb
;
6949 while ((skb
= skb_dequeue(&conn
->pending_rx
)))
6950 l2cap_recv_frame(conn
, skb
);
6953 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
)
6955 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6956 struct hci_chan
*hchan
;
6961 hchan
= hci_chan_create(hcon
);
6965 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_KERNEL
);
6967 hci_chan_del(hchan
);
6971 kref_init(&conn
->ref
);
6972 hcon
->l2cap_data
= conn
;
6974 hci_conn_get(conn
->hcon
);
6975 conn
->hchan
= hchan
;
6977 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
6979 switch (hcon
->type
) {
6981 if (hcon
->hdev
->le_mtu
) {
6982 conn
->mtu
= hcon
->hdev
->le_mtu
;
6987 conn
->mtu
= hcon
->hdev
->acl_mtu
;
6991 conn
->feat_mask
= 0;
6993 if (hcon
->type
== ACL_LINK
)
6994 conn
->hs_enabled
= test_bit(HCI_HS_ENABLED
,
6995 &hcon
->hdev
->dev_flags
);
6997 spin_lock_init(&conn
->lock
);
6998 mutex_init(&conn
->chan_lock
);
7000 INIT_LIST_HEAD(&conn
->chan_l
);
7001 INIT_LIST_HEAD(&conn
->users
);
7003 if (hcon
->type
== LE_LINK
)
7004 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
7006 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
7008 skb_queue_head_init(&conn
->pending_rx
);
7009 INIT_WORK(&conn
->pending_rx_work
, process_pending_rx
);
7011 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
7016 static bool is_valid_psm(u16 psm
, u8 dst_type
) {
7020 if (bdaddr_type_is_le(dst_type
))
7021 return (psm
<= 0x00ff);
7023 /* PSM must be odd and lsb of upper byte must be 0 */
7024 return ((psm
& 0x0101) == 0x0001);
7027 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
7028 bdaddr_t
*dst
, u8 dst_type
)
7030 struct l2cap_conn
*conn
;
7031 struct hci_conn
*hcon
;
7032 struct hci_dev
*hdev
;
7036 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan
->src
, dst
,
7037 dst_type
, __le16_to_cpu(psm
));
7039 hdev
= hci_get_route(dst
, &chan
->src
);
7041 return -EHOSTUNREACH
;
7045 l2cap_chan_lock(chan
);
7047 if (!is_valid_psm(__le16_to_cpu(psm
), dst_type
) && !cid
&&
7048 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
7053 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !psm
) {
7058 if (chan
->chan_type
== L2CAP_CHAN_FIXED
&& !cid
) {
7063 switch (chan
->mode
) {
7064 case L2CAP_MODE_BASIC
:
7066 case L2CAP_MODE_LE_FLOWCTL
:
7067 l2cap_le_flowctl_init(chan
);
7069 case L2CAP_MODE_ERTM
:
7070 case L2CAP_MODE_STREAMING
:
7079 switch (chan
->state
) {
7083 /* Already connecting */
7088 /* Already connected */
7102 /* Set destination address and psm */
7103 bacpy(&chan
->dst
, dst
);
7104 chan
->dst_type
= dst_type
;
7109 auth_type
= l2cap_get_auth_type(chan
);
7111 if (bdaddr_type_is_le(dst_type
)) {
7112 /* Convert from L2CAP channel address type to HCI address type
7114 if (dst_type
== BDADDR_LE_PUBLIC
)
7115 dst_type
= ADDR_LE_DEV_PUBLIC
;
7117 dst_type
= ADDR_LE_DEV_RANDOM
;
7119 hcon
= hci_connect_le(hdev
, dst
, dst_type
, chan
->sec_level
,
7122 hcon
= hci_connect_acl(hdev
, dst
, chan
->sec_level
, auth_type
);
7126 err
= PTR_ERR(hcon
);
7130 conn
= l2cap_conn_add(hcon
);
7132 hci_conn_drop(hcon
);
7137 if (cid
&& __l2cap_get_chan_by_dcid(conn
, cid
)) {
7138 hci_conn_drop(hcon
);
7143 /* Update source addr of the socket */
7144 bacpy(&chan
->src
, &hcon
->src
);
7145 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
7147 l2cap_chan_unlock(chan
);
7148 l2cap_chan_add(conn
, chan
);
7149 l2cap_chan_lock(chan
);
7151 /* l2cap_chan_add takes its own ref so we can drop this one */
7152 hci_conn_drop(hcon
);
7154 l2cap_state_change(chan
, BT_CONNECT
);
7155 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
7157 /* Release chan->sport so that it can be reused by other
7158 * sockets (as it's only used for listening sockets).
7160 write_lock(&chan_list_lock
);
7162 write_unlock(&chan_list_lock
);
7164 if (hcon
->state
== BT_CONNECTED
) {
7165 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
7166 __clear_chan_timer(chan
);
7167 if (l2cap_chan_check_security(chan
))
7168 l2cap_state_change(chan
, BT_CONNECTED
);
7170 l2cap_do_start(chan
);
7176 l2cap_chan_unlock(chan
);
7177 hci_dev_unlock(hdev
);
7182 /* ---- L2CAP interface with lower layer (HCI) ---- */
7184 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
7186 int exact
= 0, lm1
= 0, lm2
= 0;
7187 struct l2cap_chan
*c
;
7189 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
7191 /* Find listening sockets and check their link_mode */
7192 read_lock(&chan_list_lock
);
7193 list_for_each_entry(c
, &chan_list
, global_l
) {
7194 if (c
->state
!= BT_LISTEN
)
7197 if (!bacmp(&c
->src
, &hdev
->bdaddr
)) {
7198 lm1
|= HCI_LM_ACCEPT
;
7199 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7200 lm1
|= HCI_LM_MASTER
;
7202 } else if (!bacmp(&c
->src
, BDADDR_ANY
)) {
7203 lm2
|= HCI_LM_ACCEPT
;
7204 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
7205 lm2
|= HCI_LM_MASTER
;
7208 read_unlock(&chan_list_lock
);
7210 return exact
? lm1
: lm2
;
7213 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
7215 struct l2cap_conn
*conn
;
7217 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
7220 conn
= l2cap_conn_add(hcon
);
7222 l2cap_conn_ready(conn
);
7224 l2cap_conn_del(hcon
, bt_to_errno(status
));
7228 int l2cap_disconn_ind(struct hci_conn
*hcon
)
7230 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7232 BT_DBG("hcon %p", hcon
);
7235 return HCI_ERROR_REMOTE_USER_TERM
;
7236 return conn
->disc_reason
;
7239 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
7241 BT_DBG("hcon %p reason %d", hcon
, reason
);
7243 bt_6lowpan_del_conn(hcon
->l2cap_data
);
7245 l2cap_conn_del(hcon
, bt_to_errno(reason
));
7248 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
7250 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
7253 if (encrypt
== 0x00) {
7254 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
7255 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
7256 } else if (chan
->sec_level
== BT_SECURITY_HIGH
||
7257 chan
->sec_level
== BT_SECURITY_FIPS
)
7258 l2cap_chan_close(chan
, ECONNREFUSED
);
7260 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
7261 __clear_chan_timer(chan
);
7265 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
7267 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7268 struct l2cap_chan
*chan
;
7273 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
7275 if (hcon
->type
== LE_LINK
) {
7276 if (!status
&& encrypt
)
7277 smp_distribute_keys(conn
);
7278 cancel_delayed_work(&conn
->security_timer
);
7281 mutex_lock(&conn
->chan_lock
);
7283 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
7284 l2cap_chan_lock(chan
);
7286 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
7287 state_to_string(chan
->state
));
7289 if (chan
->scid
== L2CAP_CID_A2MP
) {
7290 l2cap_chan_unlock(chan
);
7294 if (chan
->scid
== L2CAP_CID_ATT
) {
7295 if (!status
&& encrypt
) {
7296 chan
->sec_level
= hcon
->sec_level
;
7297 l2cap_chan_ready(chan
);
7300 l2cap_chan_unlock(chan
);
7304 if (!__l2cap_no_conn_pending(chan
)) {
7305 l2cap_chan_unlock(chan
);
7309 if (!status
&& (chan
->state
== BT_CONNECTED
||
7310 chan
->state
== BT_CONFIG
)) {
7311 chan
->ops
->resume(chan
);
7312 l2cap_check_encryption(chan
, encrypt
);
7313 l2cap_chan_unlock(chan
);
7317 if (chan
->state
== BT_CONNECT
) {
7319 l2cap_start_connection(chan
);
7321 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7322 } else if (chan
->state
== BT_CONNECT2
) {
7323 struct l2cap_conn_rsp rsp
;
7327 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
7328 res
= L2CAP_CR_PEND
;
7329 stat
= L2CAP_CS_AUTHOR_PEND
;
7330 chan
->ops
->defer(chan
);
7332 l2cap_state_change(chan
, BT_CONFIG
);
7333 res
= L2CAP_CR_SUCCESS
;
7334 stat
= L2CAP_CS_NO_INFO
;
7337 l2cap_state_change(chan
, BT_DISCONN
);
7338 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
7339 res
= L2CAP_CR_SEC_BLOCK
;
7340 stat
= L2CAP_CS_NO_INFO
;
7343 rsp
.scid
= cpu_to_le16(chan
->dcid
);
7344 rsp
.dcid
= cpu_to_le16(chan
->scid
);
7345 rsp
.result
= cpu_to_le16(res
);
7346 rsp
.status
= cpu_to_le16(stat
);
7347 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
7350 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
7351 res
== L2CAP_CR_SUCCESS
) {
7353 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
7354 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
7356 l2cap_build_conf_req(chan
, buf
),
7358 chan
->num_conf_req
++;
7362 l2cap_chan_unlock(chan
);
7365 mutex_unlock(&conn
->chan_lock
);
7370 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
7372 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
7373 struct l2cap_hdr
*hdr
;
7376 /* For AMP controller do not create l2cap conn */
7377 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
7381 conn
= l2cap_conn_add(hcon
);
7386 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
7390 case ACL_START_NO_FLUSH
:
7393 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
7394 kfree_skb(conn
->rx_skb
);
7395 conn
->rx_skb
= NULL
;
7397 l2cap_conn_unreliable(conn
, ECOMM
);
7400 /* Start fragment always begin with Basic L2CAP header */
7401 if (skb
->len
< L2CAP_HDR_SIZE
) {
7402 BT_ERR("Frame is too short (len %d)", skb
->len
);
7403 l2cap_conn_unreliable(conn
, ECOMM
);
7407 hdr
= (struct l2cap_hdr
*) skb
->data
;
7408 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
7410 if (len
== skb
->len
) {
7411 /* Complete frame received */
7412 l2cap_recv_frame(conn
, skb
);
7416 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
7418 if (skb
->len
> len
) {
7419 BT_ERR("Frame is too long (len %d, expected len %d)",
7421 l2cap_conn_unreliable(conn
, ECOMM
);
7425 /* Allocate skb for the complete frame (with header) */
7426 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
7430 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7432 conn
->rx_len
= len
- skb
->len
;
7436 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
7438 if (!conn
->rx_len
) {
7439 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
7440 l2cap_conn_unreliable(conn
, ECOMM
);
7444 if (skb
->len
> conn
->rx_len
) {
7445 BT_ERR("Fragment is too long (len %d, expected %d)",
7446 skb
->len
, conn
->rx_len
);
7447 kfree_skb(conn
->rx_skb
);
7448 conn
->rx_skb
= NULL
;
7450 l2cap_conn_unreliable(conn
, ECOMM
);
7454 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
7456 conn
->rx_len
-= skb
->len
;
7458 if (!conn
->rx_len
) {
7459 /* Complete frame received. l2cap_recv_frame
7460 * takes ownership of the skb so set the global
7461 * rx_skb pointer to NULL first.
7463 struct sk_buff
*rx_skb
= conn
->rx_skb
;
7464 conn
->rx_skb
= NULL
;
7465 l2cap_recv_frame(conn
, rx_skb
);
7475 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
7477 struct l2cap_chan
*c
;
7479 read_lock(&chan_list_lock
);
7481 list_for_each_entry(c
, &chan_list
, global_l
) {
7482 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7484 c
->state
, __le16_to_cpu(c
->psm
),
7485 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
7486 c
->sec_level
, c
->mode
);
7489 read_unlock(&chan_list_lock
);
7494 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
7496 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
7499 static const struct file_operations l2cap_debugfs_fops
= {
7500 .open
= l2cap_debugfs_open
,
7502 .llseek
= seq_lseek
,
7503 .release
= single_release
,
7506 static struct dentry
*l2cap_debugfs
;
7508 int __init
l2cap_init(void)
7512 err
= l2cap_init_sockets();
7516 if (IS_ERR_OR_NULL(bt_debugfs
))
7519 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
7520 NULL
, &l2cap_debugfs_fops
);
7522 debugfs_create_u16("l2cap_le_max_credits", 0466, bt_debugfs
,
7524 debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs
,
7532 void l2cap_exit(void)
7534 bt_6lowpan_cleanup();
7535 debugfs_remove(l2cap_debugfs
);
7536 l2cap_cleanup_sockets();
7539 module_param(disable_ertm
, bool, 0644);
7540 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");